aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2023-06-22 18:46:56 +0100
committerAndrew Kelley <andrew@ziglang.org>2023-06-24 16:56:39 -0700
commitf26dda21171e26f44aeec8c59a75bbb3331eeb2e (patch)
treec935248861ae2693b314f2c8bc78fe38d9961b6d /src/codegen
parent447ca4e3fff021f471b748187b53f0a4744ad0bc (diff)
downloadzig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.tar.gz
zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.zip
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig102
-rw-r--r--src/codegen/c/type.zig20
-rw-r--r--src/codegen/llvm.zig264
-rw-r--r--src/codegen/llvm/bindings.zig2
-rw-r--r--src/codegen/spirv.zig50
-rw-r--r--src/codegen/spirv/Assembler.zig24
-rw-r--r--src/codegen/spirv/Cache.zig124
-rw-r--r--src/codegen/spirv/Module.zig14
-rw-r--r--src/codegen/spirv/Section.zig30
9 files changed, 315 insertions, 315 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 8afaae7cfa..317d77602f 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -326,7 +326,7 @@ pub const Function = struct {
.cty_idx = try f.typeToIndex(ty, .complete),
.alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
});
- return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
+ return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
}
fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
@@ -644,7 +644,7 @@ pub const DeclGen = struct {
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(base_ty, .complete);
const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
- .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod),
+ .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
@@ -662,7 +662,7 @@ pub const DeclGen = struct {
try dg.renderCType(writer, ptr_cty);
try writer.writeByte(')');
}
- switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) {
+ switch (fieldLocation(base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) {
.begin => try dg.renderParentPtr(writer, field.base, location),
.field => |name| {
try writer.writeAll("&(");
@@ -740,11 +740,11 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{@bitCast(f16, undefPattern(i16))}),
- 32 => try writer.print("{x}", .{@bitCast(f32, undefPattern(i32))}),
- 64 => try writer.print("{x}", .{@bitCast(f64, undefPattern(i64))}),
- 80 => try writer.print("{x}", .{@bitCast(f80, undefPattern(i80))}),
- 128 => try writer.print("{x}", .{@bitCast(f128, undefPattern(i128))}),
+ 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}),
+ 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}),
+ 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}),
+ 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}),
+ 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}),
else => unreachable,
}
try writer.writeAll(", ");
@@ -1041,11 +1041,11 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
- 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
- 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
- 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
- 128 => repr_val_big.set(@bitCast(u128, f128_val)),
+ 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))),
+ 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))),
+ 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))),
+ 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))),
+ 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable,
}
@@ -1103,11 +1103,11 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
- 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
- 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
- 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
- 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
+ 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}),
+ 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}),
+ 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}),
+ 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}),
+ 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable,
};
try writer.writeAll(", ");
@@ -1225,11 +1225,11 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
+ const s_u8 = @as(u8, @intCast(s.toUnsignedInt(mod)));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1239,7 +1239,7 @@ pub const DeclGen = struct {
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
try writer.print("'\\x{x}'", .{elem_val_u8});
}
if (ai.sentinel) |s| {
@@ -1840,7 +1840,7 @@ pub const DeclGen = struct {
decl.ty,
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
- @intCast(u32, decl.alignment.toByteUnits(0)),
+ @as(u32, @intCast(decl.alignment.toByteUnits(0))),
.complete,
);
try fwd_decl_writer.writeAll(";\n");
@@ -1907,7 +1907,7 @@ pub const DeclGen = struct {
const mod = dg.module;
const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(mod)),
+ .bits = @as(u16, @intCast(ty.bitSize(mod))),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -2481,7 +2481,7 @@ fn genExports(o: *Object) !void {
if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| {
for (exports.items[1..], 1..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
- try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) });
+ try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @as(u32, @intCast(i)) });
try fwd_decl_writer.print(", {s}, {s});\n", .{
fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null),
@@ -2510,7 +2510,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
try w.writeAll(") {\n switch (tag) {\n");
for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
- const index = @intCast(u32, index_usize);
+ const index = @as(u32, @intCast(index_usize));
const name = mod.intern_pool.stringToSlice(name_ip);
const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
@@ -2783,7 +2783,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
// Remember how many locals there were before entering the body so that we can free any that
// were newly introduced. Any new locals must necessarily be logically free after the then
// branch is complete.
- const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
+ const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len));
for (leading_deaths) |death| {
try die(f, inst, Air.indexToRef(death));
@@ -2804,7 +2804,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
// them, unless they were used to store allocs.
for (pre_locals_len..f.locals.items.len) |local_i| {
- const local_index = @intCast(LocalIndex, local_i);
+ const local_index = @as(LocalIndex, @intCast(local_i));
if (f.allocs.contains(local_index)) {
continue;
}
@@ -3364,7 +3364,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
+ const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod))));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3667,7 +3667,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits));
defer mask.deinit();
- try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits));
+ try mask.setTwosCompIntLimit(.max, .unsigned, @as(usize, @intCast(src_bits)));
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
@@ -4096,7 +4096,7 @@ fn airCall(
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len]));
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
@@ -4537,7 +4537,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
wrap_cty = elem_cty.toSignedness(dest_info.signedness);
need_bitcasts = wrap_cty.?.tag() == .zig_i128;
bits -= 1;
- bits %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8));
bits += 1;
}
try writer.writeAll(" = ");
@@ -4711,7 +4711,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
var extra_index: usize = switch_br.end;
for (0..switch_br.data.cases_len) |case_i| {
const case = f.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[case.end..][0..case.data.items_len]));
const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -4771,13 +4771,13 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
const gpa = f.object.dg.gpa;
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const result = result: {
@@ -4794,7 +4794,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
break :local local;
} else .none;
- const locals_begin = @intCast(LocalIndex, f.locals.items.len);
+ const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len));
const constraints_extra_begin = extra_i;
for (outputs) |output| {
const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
@@ -5402,7 +5402,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
inst_ty.intInfo(mod).signedness
else
.unsigned;
- const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
+ const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod))));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -6033,7 +6033,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6136,7 +6136,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @intCast(u16, ty.abiSize(mod) * 8);
+ const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8));
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -6186,7 +6186,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty = ptr_ty.childType(mod);
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6226,7 +6226,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6574,7 +6574,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("] = ");
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
- const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63));
+ const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
try writer.writeByte('[');
@@ -6745,8 +6745,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const ip = &mod.intern_pool;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const inst_ty = f.typeOfIndex(inst);
- const len = @intCast(usize, inst_ty.arrayLen(mod));
- const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
+ const len = @as(usize, @intCast(inst_ty.arrayLen(mod)));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len]));
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
@@ -7387,7 +7387,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri
fn undefPattern(comptime IntType: type) IntType {
const int_info = @typeInfo(IntType).Int;
const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
- return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
+ return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3)));
}
const FormatIntLiteralContext = struct {
@@ -7438,7 +7438,7 @@ fn formatIntLiteral(
} else data.val.toBigInt(&int_buf, mod);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
+ const c_bits = @as(usize, @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8));
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
@@ -7471,7 +7471,7 @@ fn formatIntLiteral(
const array_data = data.cty.castTag(.array).?.data;
break :info .{
.cty = data.dg.indexToCType(array_data.elem_type),
- .count = @intCast(usize, array_data.len),
+ .count = @as(usize, @intCast(array_data.len)),
.endian = target.cpu.arch.endian(),
.homogeneous = true,
};
@@ -7527,7 +7527,7 @@ fn formatIntLiteral(
var c_limb_int_info = std.builtin.Type.Int{
.signedness = undefined,
- .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+ .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))),
};
var c_limb_cty: CType = undefined;
@@ -7727,7 +7727,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
fn lowersToArray(ty: Type, mod: *Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
+ else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null,
};
}
@@ -7735,7 +7735,7 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi
assert(operands.len <= Liveness.bpi - 1);
var tomb_bits = f.liveness.getTombBits(inst);
for (operands) |operand| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
try die(f, inst, operand);
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index c8ce0be380..efff2e557c 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -138,7 +138,7 @@ pub const CType = extern union {
pub fn toIndex(self: Tag) Index {
assert(!self.hasPayload());
- return @intCast(Index, @intFromEnum(self));
+ return @as(Index, @intCast(@intFromEnum(self)));
}
pub fn Type(comptime self: Tag) type {
@@ -330,7 +330,7 @@ pub const CType = extern union {
store: *const Set,
pub fn hash(self: @This(), cty: CType) Map.Hash {
- return @truncate(Map.Hash, cty.hash(self.store.*));
+ return @as(Map.Hash, @truncate(cty.hash(self.store.*)));
}
pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool {
return lhs.eql(rhs);
@@ -340,7 +340,7 @@ pub const CType = extern union {
map: Map = .{},
pub fn indexToCType(self: Set, index: Index) CType {
- if (index < Tag.no_payload_count) return initTag(@enumFromInt(Tag, index));
+ if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index)));
return self.map.keys()[index - Tag.no_payload_count];
}
@@ -362,7 +362,7 @@ pub const CType = extern union {
return if (self.map.getIndexAdapted(
ty,
TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert },
- )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null;
+ )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null;
}
};
@@ -376,7 +376,7 @@ pub const CType = extern union {
pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index {
const t = cty.tag();
- if (@intFromEnum(t) < Tag.no_payload_count) return @intCast(Index, @intFromEnum(t));
+ if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t)));
const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set });
if (!gop.found_existing) gop.key_ptr.* = cty;
@@ -386,7 +386,7 @@ pub const CType = extern union {
assert(cty.eql(key.*));
assert(cty.hash(self.set) == key.hash(self.set));
}
- return @intCast(Index, Tag.no_payload_count + gop.index);
+ return @as(Index, @intCast(Tag.no_payload_count + gop.index));
}
pub fn typeToIndex(
@@ -424,7 +424,7 @@ pub const CType = extern union {
assert(adapter.eql(ty, cty.*));
assert(adapter.hash(ty) == cty.hash(self.set));
}
- return @intCast(Index, Tag.no_payload_count + gop.index);
+ return @as(Index, @intCast(Tag.no_payload_count + gop.index));
}
};
@@ -1388,7 +1388,7 @@ pub const CType = extern union {
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
.signedness = .unsigned,
- .bits = @intCast(u16, abi_align * 8),
+ .bits = @as(u16, @intCast(abi_align * 8)),
}).toIndex(),
} } };
self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1492,7 +1492,7 @@ pub const CType = extern union {
if (mod.typeToStruct(ty)) |struct_obj| {
try self.initType(struct_obj.backing_int_ty, kind, lookup);
} else {
- const bits = @intCast(u16, ty.bitSize(mod));
+ const bits = @as(u16, @intCast(ty.bitSize(mod)));
const int_ty = try mod.intType(.unsigned, bits);
try self.initType(int_ty, kind, lookup);
}
@@ -2299,7 +2299,7 @@ pub const CType = extern union {
}
pub fn hash(self: @This(), ty: Type) u32 {
- return @truncate(u32, self.to64().hash(ty));
+ return @as(u32, @truncate(self.to64().hash(ty)));
}
};
};
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 75b8a19e7d..dd8abb1205 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -592,7 +592,7 @@ pub const Object = struct {
llvm_errors[0] = llvm_slice_ty.getUndef();
for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| {
const name = mod.intern_pool.stringToSlice(name_nts);
- const str_init = o.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+ const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
const str_global = o.llvm_module.addGlobal(str_init.typeOf(), "");
str_global.setInitializer(str_init);
str_global.setLinkage(.Private);
@@ -607,7 +607,7 @@ pub const Object = struct {
llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len);
}
- const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len));
+ const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len)));
const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), "");
error_name_table_global.setInitializer(error_name_table_init);
@@ -1027,7 +1027,7 @@ pub const Object = struct {
llvm_arg_i += 1;
const param_llvm_ty = try o.lowerType(param_ty);
- const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+ const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
const int_llvm_ty = o.context.intType(abi_size * 8);
const alignment = @max(
param_ty.abiAlignment(mod),
@@ -1053,7 +1053,7 @@ pub const Object = struct {
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
- if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+ if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
o.addArgAttr(llvm_func, llvm_arg_i, "noalias");
}
}
@@ -1083,9 +1083,9 @@ pub const Object = struct {
const param_llvm_ty = try o.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
- const llvm_ty = o.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+ const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False);
for (field_types, 0..) |_, field_i_usize| {
- const field_i = @intCast(c_uint, field_i_usize);
+ const field_i = @as(c_uint, @intCast(field_i_usize));
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
@@ -1289,11 +1289,11 @@ pub const Object = struct {
if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
if (self.di_map.get(decl)) |di_node| {
if (try decl.isFunction(mod)) {
- const di_func = @ptrCast(*llvm.DISubprogram, di_node);
+ const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
di_func.replaceLinkageName(linkage_name);
} else {
- const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node);
+ const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
di_global.replaceLinkageName(linkage_name);
}
@@ -1315,11 +1315,11 @@ pub const Object = struct {
if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
if (self.di_map.get(decl)) |di_node| {
if (try decl.isFunction(mod)) {
- const di_func = @ptrCast(*llvm.DISubprogram, di_node);
+ const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
di_func.replaceLinkageName(linkage_name);
} else {
- const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node);
+ const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
di_global.replaceLinkageName(linkage_name);
}
@@ -1390,7 +1390,7 @@ pub const Object = struct {
const gop = try o.di_map.getOrPut(gpa, file);
errdefer assert(o.di_map.remove(file));
if (gop.found_existing) {
- return @ptrCast(*llvm.DIFile, gop.value_ptr.*);
+ return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*));
}
const dir_path_z = d: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
@@ -1514,7 +1514,7 @@ pub const Object = struct {
if (@sizeOf(usize) == @sizeOf(u64)) {
enumerators[i] = dib.createEnumerator2(
field_name_z,
- @intCast(c_uint, bigint.limbs.len),
+ @as(c_uint, @intCast(bigint.limbs.len)),
bigint.limbs.ptr,
int_info.bits,
int_info.signedness == .unsigned,
@@ -1538,7 +1538,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
enumerators.ptr,
- @intCast(c_int, enumerators.len),
+ @as(c_int, @intCast(enumerators.len)),
try o.lowerDebugType(int_ty, .full),
"",
);
@@ -1713,7 +1713,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
try o.lowerDebugType(ty.childType(mod), .full),
- @intCast(i64, ty.arrayLen(mod)),
+ @as(i64, @intCast(ty.arrayLen(mod))),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
@@ -2018,7 +2018,7 @@ pub const Object = struct {
0, // flags
null, // derived from
di_fields.items.ptr,
- @intCast(c_int, di_fields.items.len),
+ @as(c_int, @intCast(di_fields.items.len)),
0, // run time lang
null, // vtable holder
"", // unique id
@@ -2105,7 +2105,7 @@ pub const Object = struct {
0, // flags
null, // derived from
di_fields.items.ptr,
- @intCast(c_int, di_fields.items.len),
+ @as(c_int, @intCast(di_fields.items.len)),
0, // run time lang
null, // vtable holder
"", // unique id
@@ -2217,7 +2217,7 @@ pub const Object = struct {
ty.abiAlignment(mod) * 8, // align in bits
0, // flags
di_fields.items.ptr,
- @intCast(c_int, di_fields.items.len),
+ @as(c_int, @intCast(di_fields.items.len)),
0, // run time lang
"", // unique id
);
@@ -2330,7 +2330,7 @@ pub const Object = struct {
const fn_di_ty = dib.createSubroutineType(
param_di_types.items.ptr,
- @intCast(c_int, param_di_types.items.len),
+ @as(c_int, @intCast(param_di_types.items.len)),
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -2487,7 +2487,7 @@ pub const Object = struct {
}
if (fn_info.alignment.toByteUnitsOptional()) |a| {
- llvm_fn.setAlignment(@intCast(c_uint, a));
+ llvm_fn.setAlignment(@as(c_uint, @intCast(a)));
}
// Function attributes that are independent of analysis results of the function body.
@@ -2710,7 +2710,7 @@ pub const Object = struct {
if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null);
const elem_llvm_ty = try o.lowerType(elem_ty);
const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null);
- return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
+ return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len)));
},
.Vector => {
const elem_type = try o.lowerType(t.childType(mod));
@@ -2732,7 +2732,7 @@ pub const Object = struct {
};
const offset = child_ty.abiSize(mod) + 1;
const abi_size = t.abiSize(mod);
- const padding = @intCast(c_uint, abi_size - offset);
+ const padding = @as(c_uint, @intCast(abi_size - offset));
if (padding == 0) {
return o.context.structType(&fields_buf, 2, .False);
}
@@ -2761,7 +2761,7 @@ pub const Object = struct {
std.mem.alignForward(u64, error_size, payload_align) +
payload_size;
const abi_size = std.mem.alignForward(u64, payload_end, error_align);
- const padding = @intCast(c_uint, abi_size - payload_end);
+ const padding = @as(c_uint, @intCast(abi_size - payload_end));
if (padding == 0) {
return o.context.structType(&fields_buf, 2, .False);
}
@@ -2774,7 +2774,7 @@ pub const Object = struct {
std.mem.alignForward(u64, payload_size, error_align) +
error_size;
const abi_size = std.mem.alignForward(u64, error_end, payload_align);
- const padding = @intCast(c_uint, abi_size - error_end);
+ const padding = @as(c_uint, @intCast(abi_size - error_end));
if (padding == 0) {
return o.context.structType(&fields_buf, 2, .False);
}
@@ -2811,7 +2811,7 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
try llvm_field_types.append(gpa, llvm_array_ty);
}
const field_llvm_ty = try o.lowerType(field_ty.toType());
@@ -2824,14 +2824,14 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
try llvm_field_types.append(gpa, llvm_array_ty);
}
}
llvm_struct_ty.structSetBody(
llvm_field_types.items.ptr,
- @intCast(c_uint, llvm_field_types.items.len),
+ @as(c_uint, @intCast(llvm_field_types.items.len)),
.False,
);
@@ -2880,7 +2880,7 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
try llvm_field_types.append(gpa, llvm_array_ty);
}
const field_llvm_ty = try o.lowerType(field.ty);
@@ -2893,14 +2893,14 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
try llvm_field_types.append(gpa, llvm_array_ty);
}
}
llvm_struct_ty.structSetBody(
llvm_field_types.items.ptr,
- @intCast(c_uint, llvm_field_types.items.len),
+ @as(c_uint, @intCast(llvm_field_types.items.len)),
llvm.Bool.fromBool(any_underaligned_fields),
);
@@ -2914,7 +2914,7 @@ pub const Object = struct {
const union_obj = mod.typeToUnion(t).?;
if (union_obj.layout == .Packed) {
- const bitsize = @intCast(c_uint, t.bitSize(mod));
+ const bitsize = @as(c_uint, @intCast(t.bitSize(mod)));
const int_llvm_ty = o.context.intType(bitsize);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
@@ -2939,9 +2939,9 @@ pub const Object = struct {
break :t llvm_aligned_field_ty;
}
const padding_len = if (layout.tag_size == 0)
- @intCast(c_uint, layout.abi_size - layout.most_aligned_field_size)
+ @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size))
else
- @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size);
+ @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size));
const fields: [2]*llvm.Type = .{
llvm_aligned_field_ty,
o.context.intType(8).arrayType(padding_len),
@@ -3020,7 +3020,7 @@ pub const Object = struct {
},
.abi_sized_int => {
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
- const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+ const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
try llvm_params.append(o.context.intType(abi_size * 8));
},
.slice => {
@@ -3045,7 +3045,7 @@ pub const Object = struct {
.float_array => |count| {
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
- const field_count = @intCast(c_uint, count);
+ const field_count = @as(c_uint, @intCast(count));
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
},
@@ -3059,7 +3059,7 @@ pub const Object = struct {
return llvm.functionType(
llvm_ret_ty,
llvm_params.items.ptr,
- @intCast(c_uint, llvm_params.items.len),
+ @as(c_uint, @intCast(llvm_params.items.len)),
llvm.Bool.fromBool(fn_info.is_var_args),
);
}
@@ -3219,7 +3219,7 @@ pub const Object = struct {
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
- @intCast(c_uint, bigint.limbs.len),
+ @as(c_uint, @intCast(bigint.limbs.len)),
bigint.limbs.ptr,
);
}
@@ -3234,19 +3234,19 @@ pub const Object = struct {
const llvm_ty = try o.lowerType(tv.ty);
switch (tv.ty.floatBits(target)) {
16 => {
- const repr = @bitCast(u16, tv.val.toFloat(f16, mod));
+ const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod)));
const llvm_i16 = o.context.intType(16);
const int = llvm_i16.constInt(repr, .False);
return int.constBitCast(llvm_ty);
},
32 => {
- const repr = @bitCast(u32, tv.val.toFloat(f32, mod));
+ const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod)));
const llvm_i32 = o.context.intType(32);
const int = llvm_i32.constInt(repr, .False);
return int.constBitCast(llvm_ty);
},
64 => {
- const repr = @bitCast(u64, tv.val.toFloat(f64, mod));
+ const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod)));
const llvm_i64 = o.context.intType(64);
const int = llvm_i64.constInt(repr, .False);
return int.constBitCast(llvm_ty);
@@ -3265,7 +3265,7 @@ pub const Object = struct {
}
},
128 => {
- var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod));
+ var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod)));
// LLVM seems to require that the lower half of the f128 be placed first
// in the buffer.
if (native_endian == .Big) {
@@ -3343,7 +3343,7 @@ pub const Object = struct {
.array_type => switch (aggregate.storage) {
.bytes => |bytes| return o.context.constString(
bytes.ptr,
- @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
+ @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))),
.True, // Don't null terminate. Bytes has the sentinel, if any.
),
.elems => |elem_vals| {
@@ -3358,21 +3358,21 @@ pub const Object = struct {
if (need_unnamed) {
return o.context.constStruct(
llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ @as(c_uint, @intCast(llvm_elems.len)),
.True,
);
} else {
const llvm_elem_ty = try o.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ @as(c_uint, @intCast(llvm_elems.len)),
);
}
},
.repeated_elem => |val| {
const elem_ty = tv.ty.childType(mod);
const sentinel = tv.ty.sentinel(mod);
- const len = @intCast(usize, tv.ty.arrayLen(mod));
+ const len = @as(usize, @intCast(tv.ty.arrayLen(mod)));
const len_including_sent = len + @intFromBool(sentinel != null);
const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
@@ -3393,14 +3393,14 @@ pub const Object = struct {
if (need_unnamed) {
return o.context.constStruct(
llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ @as(c_uint, @intCast(llvm_elems.len)),
.True,
);
} else {
const llvm_elem_ty = try o.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ @as(c_uint, @intCast(llvm_elems.len)),
);
}
},
@@ -3425,7 +3425,7 @@ pub const Object = struct {
}
return llvm.constVector(
llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ @as(c_uint, @intCast(llvm_elems.len)),
);
},
.anon_struct_type => |tuple| {
@@ -3450,7 +3450,7 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
@@ -3472,7 +3472,7 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
}
@@ -3480,14 +3480,14 @@ pub const Object = struct {
if (need_unnamed) {
return o.context.constStruct(
llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
+ @as(c_uint, @intCast(llvm_fields.items.len)),
.False,
);
} else {
const llvm_struct_ty = try o.lowerType(tv.ty);
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
+ @as(c_uint, @intCast(llvm_fields.items.len)),
);
}
},
@@ -3498,7 +3498,7 @@ pub const Object = struct {
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod);
- const int_llvm_ty = o.context.intType(@intCast(c_uint, big_bits));
+ const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits)));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
@@ -3510,7 +3510,7 @@ pub const Object = struct {
.ty = field.ty,
.val = try tv.val.fieldValue(mod, i),
});
- const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
const small_int_ty = o.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
@@ -3547,7 +3547,7 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
@@ -3569,7 +3569,7 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
}
@@ -3577,13 +3577,13 @@ pub const Object = struct {
if (need_unnamed) {
return o.context.constStruct(
llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
+ @as(c_uint, @intCast(llvm_fields.items.len)),
.False,
);
} else {
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
+ @as(c_uint, @intCast(llvm_fields.items.len)),
);
}
},
@@ -3616,7 +3616,7 @@ pub const Object = struct {
if (!field_ty.hasRuntimeBits(mod))
return llvm_union_ty.constNull();
const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
- const ty_bit_size = @intCast(u16, field_ty.bitSize(mod));
+ const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod)));
const small_int_ty = o.context.intType(ty_bit_size);
const small_int_val = if (field_ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
@@ -3632,7 +3632,7 @@ pub const Object = struct {
var need_unnamed: bool = layout.most_aligned_field != field_index;
const payload = p: {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const padding_len = @intCast(c_uint, layout.payload_size);
+ const padding_len = @as(c_uint, @intCast(layout.payload_size));
break :p o.context.intType(8).arrayType(padding_len).getUndef();
}
const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
@@ -3641,7 +3641,7 @@ pub const Object = struct {
if (field_size == layout.payload_size) {
break :p field;
}
- const padding_len = @intCast(c_uint, layout.payload_size - field_size);
+ const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
const fields: [2]*llvm.Value = .{
field, o.context.intType(8).arrayType(padding_len).getUndef(),
};
@@ -3706,7 +3706,7 @@ pub const Object = struct {
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
- @intCast(c_uint, bigint.limbs.len),
+ @as(c_uint, @intCast(bigint.limbs.len)),
bigint.limbs.ptr,
);
}
@@ -3799,7 +3799,7 @@ pub const Object = struct {
const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
- const field_index = @intCast(u32, field_ptr.index);
+ const field_index = @as(u32, @intCast(field_ptr.index));
const llvm_u32 = o.context.intType(32);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
@@ -3834,7 +3834,7 @@ pub const Object = struct {
var b: usize = 0;
for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- b += @intCast(usize, field.ty.bitSize(mod));
+ b += @as(usize, @intCast(field.ty.bitSize(mod)));
}
break :b b;
};
@@ -3992,9 +3992,9 @@ pub const Object = struct {
) void {
const llvm_attr = o.context.createStringAttribute(
name.ptr,
- @intCast(c_uint, name.len),
+ @as(c_uint, @intCast(name.len)),
value.ptr,
- @intCast(c_uint, value.len),
+ @as(c_uint, @intCast(value.len)),
);
val.addAttributeAtIndex(index, llvm_attr);
}
@@ -4026,14 +4026,14 @@ pub const Object = struct {
.Enum => ty.intTagType(mod),
.Float => {
if (!is_rmw_xchg) return null;
- return o.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8));
+ return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8)));
},
.Bool => return o.context.intType(8),
else => return null,
};
const bit_count = int_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return o.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8));
+ return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8)));
} else {
return null;
}
@@ -4051,7 +4051,7 @@ pub const Object = struct {
if (param_ty.isPtrAtRuntime(mod)) {
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, param_index)) |i| {
- if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+ if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
o.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
}
}
@@ -4550,7 +4550,7 @@ pub const FuncGen = struct {
fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const o = self.dg.object;
const mod = o.module;
const callee_ty = self.typeOf(pl_op.operand);
@@ -4638,7 +4638,7 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+ const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
const int_llvm_ty = self.context.intType(abi_size * 8);
if (isByRef(param_ty, mod)) {
@@ -4683,10 +4683,10 @@ pub const FuncGen = struct {
break :p p;
};
- const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
+ const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False);
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_types, 0..) |field_ty, i_usize| {
- const i = @intCast(c_uint, i_usize);
+ const i = @as(c_uint, @intCast(i_usize));
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
load_inst.setAlignment(target.ptrBitWidth() / 8);
@@ -4742,7 +4742,7 @@ pub const FuncGen = struct {
try o.lowerType(zig_fn_ty),
llvm_fn,
llvm_args.items.ptr,
- @intCast(c_uint, llvm_args.items.len),
+ @as(c_uint, @intCast(llvm_args.items.len)),
toLlvmCallConv(fn_info.cc, target),
attr,
"",
@@ -4788,7 +4788,7 @@ pub const FuncGen = struct {
const llvm_arg_i = it.llvm_index - 2;
if (math.cast(u5, it.zig_index - 1)) |i| {
- if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+ if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
o.addArgAttr(call, llvm_arg_i, "noalias");
}
}
@@ -5213,7 +5213,7 @@ pub const FuncGen = struct {
phi_node.addIncoming(
breaks.items(.val).ptr,
breaks.items(.bb).ptr,
- @intCast(c_uint, breaks.len),
+ @as(c_uint, @intCast(breaks.len)),
);
return phi_node;
}
@@ -5379,7 +5379,7 @@ pub const FuncGen = struct {
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -5479,7 +5479,7 @@ pub const FuncGen = struct {
}
}
- const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod));
+ const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod)));
const rt_int_bits = compilerRtIntBits(operand_bits);
const rt_int_ty = self.context.intType(rt_int_bits);
var extended = e: {
@@ -5540,7 +5540,7 @@ pub const FuncGen = struct {
}
}
- const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod)));
+ const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod))));
const ret_ty = self.context.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -5806,12 +5806,12 @@ pub const FuncGen = struct {
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -5828,12 +5828,12 @@ pub const FuncGen = struct {
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -5924,8 +5924,8 @@ pub const FuncGen = struct {
fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
const di_scope = self.di_scope orelse return null;
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- self.prev_dbg_line = @intCast(c_uint, self.base_line + dbg_stmt.line + 1);
- self.prev_dbg_column = @intCast(c_uint, dbg_stmt.column + 1);
+ self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1));
+ self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1));
const inlined_at = if (self.dbg_inlined.items.len > 0)
self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
else
@@ -5949,7 +5949,7 @@ pub const FuncGen = struct {
const cur_debug_location = self.builder.getCurrentDebugLocation2();
try self.dbg_inlined.append(self.gpa, .{
- .loc = @ptrCast(*llvm.DILocation, cur_debug_location),
+ .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)),
.scope = self.di_scope.?,
.base_line = self.base_line,
});
@@ -6107,13 +6107,13 @@ pub const FuncGen = struct {
const o = self.dg.object;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
@@ -6390,7 +6390,7 @@ pub const FuncGen = struct {
1 => llvm_ret_types[0],
else => self.context.structType(
llvm_ret_types.ptr,
- @intCast(c_uint, return_count),
+ @as(c_uint, @intCast(return_count)),
.False,
),
};
@@ -6398,7 +6398,7 @@ pub const FuncGen = struct {
const llvm_fn_ty = llvm.functionType(
ret_llvm_ty,
llvm_param_types.ptr,
- @intCast(c_uint, param_count),
+ @as(c_uint, @intCast(param_count)),
.False,
);
const asm_fn = llvm.getInlineAsm(
@@ -6416,7 +6416,7 @@ pub const FuncGen = struct {
llvm_fn_ty,
asm_fn,
llvm_param_values.ptr,
- @intCast(c_uint, param_count),
+ @as(c_uint, @intCast(param_count)),
.C,
.Auto,
"",
@@ -6433,7 +6433,7 @@ pub const FuncGen = struct {
if (llvm_ret_indirect[i]) continue;
const output_value = if (return_count > 1) b: {
- break :b self.builder.buildExtractValue(call, @intCast(c_uint, llvm_ret_i), "");
+ break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), "");
} else call;
if (output != .none) {
@@ -7315,7 +7315,7 @@ pub const FuncGen = struct {
result_vector: *llvm.Value,
vector_len: usize,
) !*llvm.Value {
- const args_len = @intCast(c_uint, args_vectors.len);
+ const args_len = @as(c_uint, @intCast(args_vectors.len));
const llvm_i32 = self.context.intType(32);
assert(args_len <= 3);
@@ -7345,7 +7345,7 @@ pub const FuncGen = struct {
const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len);
break :b if (alias) |a| a.getAliasee() else null;
} orelse b: {
- const params_len = @intCast(c_uint, param_types.len);
+ const params_len = @as(c_uint, @intCast(param_types.len));
const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False);
const f = o.llvm_module.addFunction(fn_name, fn_type);
break :b f;
@@ -8319,8 +8319,8 @@ pub const FuncGen = struct {
return null;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false);
- const ptr_alignment = @intCast(u32, ptr_info.flags.alignment.toByteUnitsOptional() orelse
- ptr_info.child.toType().abiAlignment(mod));
+ const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse
+ ptr_info.child.toType().abiAlignment(mod)));
const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile);
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -8696,10 +8696,10 @@ pub const FuncGen = struct {
const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid");
const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid");
const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
- const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
+ const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len)));
for (names) |name| {
- const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
+ const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
const this_tag_int_value = try o.lowerValue(.{
.ty = Type.err_int,
.val = try mod.intValue(Type.err_int, err_int),
@@ -8779,10 +8779,10 @@ pub const FuncGen = struct {
const named_block = self.context.appendBasicBlock(fn_val, "Named");
const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed");
const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len));
+ const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len)));
for (enum_type.names, 0..) |_, field_index_usize| {
- const field_index = @intCast(u32, field_index_usize);
+ const field_index = @as(u32, @intCast(field_index_usize));
const this_tag_int_value = int: {
break :int try o.lowerValue(.{
.ty = enum_ty,
@@ -8855,16 +8855,16 @@ pub const FuncGen = struct {
const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue");
const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len));
+ const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len)));
const array_ptr_indices = [_]*llvm.Value{
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
for (enum_type.names, 0..) |name_ip, field_index_usize| {
- const field_index = @intCast(u32, field_index_usize);
+ const field_index = @as(u32, @intCast(field_index_usize));
const name = mod.intern_pool.stringToSlice(name_ip);
- const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+ const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, "");
str_global.setInitializer(str_init);
@@ -8986,7 +8986,7 @@ pub const FuncGen = struct {
val.* = llvm_i32.getUndef();
} else {
const int = elem.toSignedInt(mod);
- const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
+ const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len));
val.* = llvm_i32.constInt(unsigned, .False);
}
}
@@ -9150,8 +9150,8 @@ pub const FuncGen = struct {
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen(mod));
- const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
const llvm_result_ty = try o.lowerType(result_ty);
switch (result_ty.zigTypeTag(mod)) {
@@ -9171,7 +9171,7 @@ pub const FuncGen = struct {
const struct_obj = mod.typeToStruct(result_ty).?;
assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod);
- const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
+ const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
@@ -9181,7 +9181,7 @@ pub const FuncGen = struct {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
const small_int_ty = self.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
@@ -9251,7 +9251,7 @@ pub const FuncGen = struct {
for (elements, 0..) |elem, i| {
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
- llvm_usize.constInt(@intCast(c_uint, i), .False),
+ llvm_usize.constInt(@as(c_uint, @intCast(i)), .False),
};
const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
const llvm_elem = try self.resolveInst(elem);
@@ -9260,7 +9260,7 @@ pub const FuncGen = struct {
if (array_info.sentinel) |sent_val| {
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
- llvm_usize.constInt(@intCast(c_uint, array_info.len), .False),
+ llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False),
};
const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
const llvm_elem = try self.resolveValue(.{
@@ -9289,10 +9289,10 @@ pub const FuncGen = struct {
if (union_obj.layout == .Packed) {
const big_bits = union_ty.bitSize(mod);
- const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
+ const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
const field = union_obj.fields.values()[extra.field_index];
const non_int_val = try self.resolveInst(extra.init);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
const small_int_ty = self.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
@@ -9332,13 +9332,13 @@ pub const FuncGen = struct {
const llvm_union_ty = t: {
const payload = p: {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const padding_len = @intCast(c_uint, layout.payload_size);
+ const padding_len = @as(c_uint, @intCast(layout.payload_size));
break :p self.context.intType(8).arrayType(padding_len);
}
if (field_size == layout.payload_size) {
break :p field_llvm_ty;
}
- const padding_len = @intCast(c_uint, layout.payload_size - field_size);
+ const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
const fields: [2]*llvm.Type = .{
field_llvm_ty, self.context.intType(8).arrayType(padding_len),
};
@@ -9766,8 +9766,8 @@ pub const FuncGen = struct {
const elem_ty = info.child.toType();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- const ptr_alignment = @intCast(u32, info.flags.alignment.toByteUnitsOptional() orelse
- elem_ty.abiAlignment(mod));
+ const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse
+ elem_ty.abiAlignment(mod)));
const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
assert(info.flags.vector_index != .runtime);
@@ -9799,7 +9799,7 @@ pub const FuncGen = struct {
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9872,7 +9872,7 @@ pub const FuncGen = struct {
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
+ const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
const containing_int_ty = containing_int.typeOf();
const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False);
// Convert to equally-sized integer type in order to perform the bit
@@ -9945,7 +9945,7 @@ pub const FuncGen = struct {
if (!target_util.hasValgrindSupport(target)) return default_value;
const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
- const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod));
+ const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod)));
const array_llvm_ty = usize_llvm_ty.arrayType(6);
const array_ptr = fg.valgrind_client_request_array orelse a: {
@@ -9957,7 +9957,7 @@ pub const FuncGen = struct {
const zero = usize_llvm_ty.constInt(0, .False);
for (array_elements, 0..) |elem, i| {
const indexes = [_]*llvm.Value{
- zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
+ zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False),
};
const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, "");
const store_inst = fg.builder.buildStore(elem, elem_ptr);
@@ -10530,7 +10530,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
assert(classes[0] == .direct and classes[1] == .none);
const scalar_type = wasm_c_abi.scalarType(return_type, mod);
const abi_size = scalar_type.abiSize(mod);
- return o.context.intType(@intCast(c_uint, abi_size * 8));
+ return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
},
.aarch64, .aarch64_be => {
switch (aarch64_c_abi.classifyType(return_type, mod)) {
@@ -10539,7 +10539,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
.byval => return o.lowerType(return_type),
.integer => {
const bit_size = return_type.bitSize(mod);
- return o.context.intType(@intCast(c_uint, bit_size));
+ return o.context.intType(@as(c_uint, @intCast(bit_size)));
},
.double_integer => return o.context.intType(64).arrayType(2),
}
@@ -10560,7 +10560,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
.memory => return o.context.voidType(),
.integer => {
const bit_size = return_type.bitSize(mod);
- return o.context.intType(@intCast(c_uint, bit_size));
+ return o.context.intType(@as(c_uint, @intCast(bit_size)));
},
.double_integer => {
var llvm_types_buffer: [2]*llvm.Type = .{
@@ -10598,7 +10598,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
return o.lowerType(return_type);
} else {
const abi_size = return_type.abiSize(mod);
- return o.context.intType(@intCast(c_uint, abi_size * 8));
+ return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
}
},
.win_i128 => return o.context.intType(64).vectorType(2),
@@ -10656,7 +10656,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type
}
if (classes[0] == .integer and classes[1] == .none) {
const abi_size = return_type.abiSize(mod);
- return o.context.intType(@intCast(c_uint, abi_size * 8));
+ return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
}
return o.context.structType(&llvm_types_buffer, llvm_types_index, .False);
}
@@ -11145,28 +11145,28 @@ const AnnotatedDITypePtr = enum(usize) {
fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
- assert(@truncate(u1, addr) == 0);
- return @enumFromInt(AnnotatedDITypePtr, addr | 1);
+ assert(@as(u1, @truncate(addr)) == 0);
+ return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1));
}
fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
- return @enumFromInt(AnnotatedDITypePtr, addr);
+ return @as(AnnotatedDITypePtr, @enumFromInt(addr));
}
fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
const bit = @intFromBool(resolve == .fwd);
- return @enumFromInt(AnnotatedDITypePtr, addr | bit);
+ return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit));
}
fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType {
const fixed_addr = @intFromEnum(self) & ~@as(usize, 1);
- return @ptrFromInt(*llvm.DIType, fixed_addr);
+ return @as(*llvm.DIType, @ptrFromInt(fixed_addr));
}
fn isFwdOnly(self: AnnotatedDITypePtr) bool {
- return @truncate(u1, @intFromEnum(self)) != 0;
+ return @as(u1, @truncate(@intFromEnum(self))) != 0;
}
};
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index a8249a870f..b093588e80 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -8,7 +8,7 @@ pub const Bool = enum(c_int) {
_,
pub fn fromBool(b: bool) Bool {
- return @enumFromInt(Bool, @intFromBool(b));
+ return @as(Bool, @enumFromInt(@intFromBool(b)));
}
pub fn toBool(b: Bool) bool {
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index d81ca9a015..220909476f 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -466,7 +466,7 @@ pub const DeclGen = struct {
unused.* = undef;
}
- const word = @bitCast(Word, self.partial_word.buffer);
+ const word = @as(Word, @bitCast(self.partial_word.buffer));
const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word);
try self.members.append(self.u32_ty_ref);
try self.initializers.append(result_id);
@@ -482,7 +482,7 @@ pub const DeclGen = struct {
}
fn addUndef(self: *@This(), amt: u64) !void {
- for (0..@intCast(usize, amt)) |_| {
+ for (0..@as(usize, @intCast(amt))) |_| {
try self.addByte(undef);
}
}
@@ -539,13 +539,13 @@ pub const DeclGen = struct {
const mod = self.dg.module;
const int_info = ty.intInfo(mod);
const int_bits = switch (int_info.signedness) {
- .signed => @bitCast(u64, val.toSignedInt(mod)),
+ .signed => @as(u64, @bitCast(val.toSignedInt(mod))),
.unsigned => val.toUnsignedInt(mod),
};
// TODO: Swap endianess if the compiler is big endian.
const len = ty.abiSize(mod);
- try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]);
+ try self.addBytes(std.mem.asBytes(&int_bits)[0..@as(usize, @intCast(len))]);
}
fn addFloat(self: *@This(), ty: Type, val: Value) !void {
@@ -557,15 +557,15 @@ pub const DeclGen = struct {
switch (ty.floatBits(target)) {
16 => {
const float_bits = val.toFloat(f16, mod);
- try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
},
32 => {
const float_bits = val.toFloat(f32, mod);
- try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
},
64 => {
const float_bits = val.toFloat(f64, mod);
- try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
},
else => unreachable,
}
@@ -664,7 +664,7 @@ pub const DeclGen = struct {
.int => try self.addInt(ty, val),
.err => |err| {
const int = try mod.getErrorValue(err.name);
- try self.addConstInt(u16, @intCast(u16, int));
+ try self.addConstInt(u16, @as(u16, @intCast(int)));
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(mod);
@@ -755,10 +755,10 @@ pub const DeclGen = struct {
switch (aggregate.storage) {
.bytes => |bytes| try self.addBytes(bytes),
.elems, .repeated_elem => {
- for (0..@intCast(usize, array_type.len)) |i| {
+ for (0..@as(usize, @intCast(array_type.len))) |i| {
try self.lower(elem_ty, switch (aggregate.storage) {
.bytes => unreachable,
- .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(),
+ .elems => |elem_vals| elem_vals[@as(usize, @intCast(i))].toValue(),
.repeated_elem => |elem_val| elem_val.toValue(),
});
}
@@ -1132,7 +1132,7 @@ pub const DeclGen = struct {
const payload_padding_len = layout.payload_size - active_field_size;
if (payload_padding_len != 0) {
- const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref);
+ const payload_padding_ty_ref = try self.spv.arrayType(@as(u32, @intCast(payload_padding_len)), u8_ty_ref);
member_types.appendAssumeCapacity(payload_padding_ty_ref);
member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding"));
}
@@ -1259,7 +1259,7 @@ pub const DeclGen = struct {
return try self.spv.resolve(.{ .vector_type = .{
.component_type = try self.resolveType(ty.childType(mod), repr),
- .component_count = @intCast(u32, ty.vectorLen(mod)),
+ .component_count = @as(u32, @intCast(ty.vectorLen(mod))),
} });
},
.Struct => {
@@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
init_val,
actual_storage_class,
final_storage_class == .Generic,
- @intCast(u32, decl.alignment.toByteUnits(0)),
+ @as(u32, @intCast(decl.alignment.toByteUnits(0))),
);
}
}
@@ -1856,7 +1856,7 @@ pub const DeclGen = struct {
}
fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef {
- const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1;
+ const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(bits))) - 1;
const result_id = self.spv.allocId();
const mask_id = try self.spv.constInt(ty_ref, mask_value);
try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
@@ -2063,7 +2063,7 @@ pub const DeclGen = struct {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
const int = elem.toSignedInt(mod);
- const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
+ const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len));
self.func.body.writeOperand(spec.LiteralInteger, unsigned);
}
}
@@ -2689,7 +2689,7 @@ pub const DeclGen = struct {
// are not allowed to be created from a phi node, and throw an error for those.
const result_type_id = try self.resolveTypeId(ty);
- try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
+ try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(incoming_blocks.items.len * 2))); // result type + result + variable/parent...
self.func.body.writeOperand(spec.IdResultType, result_type_id);
self.func.body.writeOperand(spec.IdRef, result_id);
@@ -3105,7 +3105,7 @@ pub const DeclGen = struct {
while (case_i < num_cases) : (case_i += 1) {
// SPIR-V needs a literal here, which' width depends on the case condition.
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -3116,7 +3116,7 @@ pub const DeclGen = struct {
return self.todo("switch on runtime value???", .{});
};
const int_val = switch (cond_ty.zigTypeTag(mod)) {
- .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod),
+ .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod),
.Enum => blk: {
// TODO: figure out of cond_ty is correct (something with enum literals)
break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
@@ -3124,7 +3124,7 @@ pub const DeclGen = struct {
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
- 1 => .{ .uint32 = @intCast(u32, int_val) },
+ 1 => .{ .uint32 = @as(u32, @intCast(int_val)) },
2 => .{ .uint64 = int_val },
else => unreachable,
};
@@ -3139,7 +3139,7 @@ pub const DeclGen = struct {
var case_i: u32 = 0;
while (case_i < num_cases) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -3167,15 +3167,15 @@ pub const DeclGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
if (!is_volatile and self.liveness.isUnused(inst)) return null;
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
if (outputs.len > 1) {
@@ -3297,7 +3297,7 @@ pub const DeclGen = struct {
const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index 73a842ebe9..8f466668ea 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -293,7 +293,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
},
}
- break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } });
+ break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @as(u16, @intCast(bits)) } });
},
.OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
.component_type = try self.resolveTypeRef(operands[1].ref_id),
@@ -306,7 +306,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
},
.OpTypePointer => try self.spv.ptrType(
try self.resolveTypeRef(operands[2].ref_id),
- @enumFromInt(spec.StorageClass, operands[1].value),
+ @as(spec.StorageClass, @enumFromInt(operands[1].value)),
),
.OpTypeFunction => blk: {
const param_operands = operands[2..];
@@ -340,7 +340,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
else => switch (self.inst.opcode) {
.OpEntryPoint => unreachable,
.OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
- .OpVariable => switch (@enumFromInt(spec.StorageClass, operands[2].value)) {
+ .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) {
.Function => &self.func.prologue,
else => {
// This is currently disabled because global variables are required to be
@@ -391,7 +391,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
}
const actual_word_count = section.instructions.items.len - first_word;
- section.instructions.items[first_word] |= @as(u32, @intCast(u16, actual_word_count)) << 16 | @intFromEnum(self.inst.opcode);
+ section.instructions.items[first_word] |= @as(u32, @as(u16, @intCast(actual_word_count))) << 16 | @intFromEnum(self.inst.opcode);
if (maybe_result_id) |result| {
return AsmValue{ .value = result };
@@ -458,7 +458,7 @@ fn parseInstruction(self: *Assembler) !void {
if (!entry.found_existing) {
entry.value_ptr.* = .just_declared;
}
- break :blk @intCast(AsmValue.Ref, entry.index);
+ break :blk @as(AsmValue.Ref, @intCast(entry.index));
} else null;
const opcode_tok = self.currentToken();
@@ -613,7 +613,7 @@ fn parseRefId(self: *Assembler) !void {
entry.value_ptr.* = .unresolved_forward_reference;
}
- const index = @intCast(AsmValue.Ref, entry.index);
+ const index = @as(AsmValue.Ref, @intCast(entry.index));
try self.inst.operands.append(self.gpa, .{ .ref_id = index });
}
@@ -645,7 +645,7 @@ fn parseString(self: *Assembler) !void {
else
text[1..];
- const string_offset = @intCast(u32, self.inst.string_bytes.items.len);
+ const string_offset = @as(u32, @intCast(self.inst.string_bytes.items.len));
try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1);
self.inst.string_bytes.appendSliceAssumeCapacity(literal);
self.inst.string_bytes.appendAssumeCapacity(0);
@@ -693,18 +693,18 @@ fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness
const int = std.fmt.parseInt(i128, text, 0) catch break :invalid;
const min = switch (signedness) {
.unsigned => 0,
- .signed => -(@as(i128, 1) << (@intCast(u7, width) - 1)),
+ .signed => -(@as(i128, 1) << (@as(u7, @intCast(width)) - 1)),
};
- const max = (@as(i128, 1) << (@intCast(u7, width) - @intFromBool(signedness == .signed))) - 1;
+ const max = (@as(i128, 1) << (@as(u7, @intCast(width)) - @intFromBool(signedness == .signed))) - 1;
if (int < min or int > max) {
break :invalid;
}
// Note, we store the sign-extended version here.
if (width <= @bitSizeOf(spec.Word)) {
- try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(u32, @bitCast(u128, int)) });
+ try self.inst.operands.append(self.gpa, .{ .literal32 = @as(u32, @truncate(@as(u128, @bitCast(int)))) });
} else {
- try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(u64, @bitCast(u128, int)) });
+ try self.inst.operands.append(self.gpa, .{ .literal64 = @as(u64, @truncate(@as(u128, @bitCast(int)))) });
}
return;
}
@@ -725,7 +725,7 @@ fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void {
return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width });
};
- const float_bits = @bitCast(Int, value);
+ const float_bits = @as(Int, @bitCast(value));
if (width <= @bitSizeOf(spec.Word)) {
try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits });
} else {
diff --git a/src/codegen/spirv/Cache.zig b/src/codegen/spirv/Cache.zig
index 7d7fc0fb0d..7a3b6f61f5 100644
--- a/src/codegen/spirv/Cache.zig
+++ b/src/codegen/spirv/Cache.zig
@@ -158,16 +158,16 @@ const Tag = enum {
high: u32,
fn encode(value: f64) Float64 {
- const bits = @bitCast(u64, value);
+ const bits = @as(u64, @bitCast(value));
return .{
- .low = @truncate(u32, bits),
- .high = @truncate(u32, bits >> 32),
+ .low = @as(u32, @truncate(bits)),
+ .high = @as(u32, @truncate(bits >> 32)),
};
}
fn decode(self: Float64) f64 {
const bits = @as(u64, self.low) | (@as(u64, self.high) << 32);
- return @bitCast(f64, bits);
+ return @as(f64, @bitCast(bits));
}
};
@@ -189,8 +189,8 @@ const Tag = enum {
fn encode(ty: Ref, value: u64) Int64 {
return .{
.ty = ty,
- .low = @truncate(u32, value),
- .high = @truncate(u32, value >> 32),
+ .low = @as(u32, @truncate(value)),
+ .high = @as(u32, @truncate(value >> 32)),
};
}
@@ -207,13 +207,13 @@ const Tag = enum {
fn encode(ty: Ref, value: i64) Int64 {
return .{
.ty = ty,
- .low = @truncate(u32, @bitCast(u64, value)),
- .high = @truncate(u32, @bitCast(u64, value) >> 32),
+ .low = @as(u32, @truncate(@as(u64, @bitCast(value)))),
+ .high = @as(u32, @truncate(@as(u64, @bitCast(value)) >> 32)),
};
}
fn decode(self: Int64) i64 {
- return @bitCast(i64, @as(u64, self.low) | (@as(u64, self.high) << 32));
+ return @as(i64, @bitCast(@as(u64, self.low) | (@as(u64, self.high) << 32)));
}
};
};
@@ -305,21 +305,21 @@ pub const Key = union(enum) {
/// Turns this value into the corresponding 32-bit literal, 2s complement signed.
fn toBits32(self: Int) u32 {
return switch (self.value) {
- .uint64 => |val| @intCast(u32, val),
- .int64 => |val| if (val < 0) @bitCast(u32, @intCast(i32, val)) else @intCast(u32, val),
+ .uint64 => |val| @as(u32, @intCast(val)),
+ .int64 => |val| if (val < 0) @as(u32, @bitCast(@as(i32, @intCast(val)))) else @as(u32, @intCast(val)),
};
}
fn toBits64(self: Int) u64 {
return switch (self.value) {
.uint64 => |val| val,
- .int64 => |val| @bitCast(u64, val),
+ .int64 => |val| @as(u64, @bitCast(val)),
};
}
fn to(self: Int, comptime T: type) T {
return switch (self.value) {
- inline else => |val| @intCast(T, val),
+ inline else => |val| @as(T, @intCast(val)),
};
}
};
@@ -357,9 +357,9 @@ pub const Key = union(enum) {
.float => |float| {
std.hash.autoHash(&hasher, float.ty);
switch (float.value) {
- .float16 => |value| std.hash.autoHash(&hasher, @bitCast(u16, value)),
- .float32 => |value| std.hash.autoHash(&hasher, @bitCast(u32, value)),
- .float64 => |value| std.hash.autoHash(&hasher, @bitCast(u64, value)),
+ .float16 => |value| std.hash.autoHash(&hasher, @as(u16, @bitCast(value))),
+ .float32 => |value| std.hash.autoHash(&hasher, @as(u32, @bitCast(value))),
+ .float64 => |value| std.hash.autoHash(&hasher, @as(u64, @bitCast(value))),
}
},
.function_type => |func| {
@@ -379,7 +379,7 @@ pub const Key = union(enum) {
},
inline else => |key| std.hash.autoHash(&hasher, key),
}
- return @truncate(u32, hasher.final());
+ return @as(u32, @truncate(hasher.final()));
}
fn eql(a: Key, b: Key) bool {
@@ -411,7 +411,7 @@ pub const Key = union(enum) {
pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool {
_ = b_void;
- return ctx.self.lookup(@enumFromInt(Ref, b_index)).eql(a);
+ return ctx.self.lookup(@as(Ref, @enumFromInt(b_index))).eql(a);
}
pub fn hash(ctx: @This(), a: Key) u32 {
@@ -445,7 +445,7 @@ pub fn materialize(self: *const Self, spv: *Module) !Section {
var section = Section{};
errdefer section.deinit(spv.gpa);
for (self.items.items(.result_id), 0..) |result_id, index| {
- try self.emit(spv, result_id, @enumFromInt(Ref, index), &section);
+ try self.emit(spv, result_id, @as(Ref, @enumFromInt(index)), &section);
}
return section;
}
@@ -534,7 +534,7 @@ fn emit(
}
for (struct_type.memberNames(), 0..) |member_name, i| {
if (self.getString(member_name)) |name| {
- try spv.memberDebugName(result_id, @intCast(u32, i), "{s}", .{name});
+ try spv.memberDebugName(result_id, @as(u32, @intCast(i)), "{s}", .{name});
}
}
// TODO: Decorations?
@@ -557,7 +557,7 @@ fn emit(
.float => |float| {
const ty_id = self.resultId(float.ty);
const lit: Lit = switch (float.value) {
- .float16 => |value| .{ .uint32 = @bitCast(u16, value) },
+ .float16 => |value| .{ .uint32 = @as(u16, @bitCast(value)) },
.float32 => |value| .{ .float32 = value },
.float64 => |value| .{ .float64 = value },
};
@@ -603,7 +603,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
const adapter: Key.Adapter = .{ .self = self };
const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter);
if (entry.found_existing) {
- return @enumFromInt(Ref, entry.index);
+ return @as(Ref, @enumFromInt(entry.index));
}
const result_id = spv.allocId();
const item: Item = switch (key) {
@@ -640,10 +640,10 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
},
.function_type => |function| blk: {
const extra = try self.addExtra(spv, Tag.FunctionType{
- .param_len = @intCast(u32, function.parameters.len),
+ .param_len = @as(u32, @intCast(function.parameters.len)),
.return_type = function.return_type,
});
- try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, function.parameters));
+ try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(function.parameters)));
break :blk .{
.tag = .type_function,
.result_id = result_id,
@@ -678,12 +678,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
.struct_type => |struct_type| blk: {
const extra = try self.addExtra(spv, Tag.SimpleStructType{
.name = struct_type.name,
- .members_len = @intCast(u32, struct_type.member_types.len),
+ .members_len = @as(u32, @intCast(struct_type.member_types.len)),
});
- try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, struct_type.member_types));
+ try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(struct_type.member_types)));
if (struct_type.member_names) |member_names| {
- try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, member_names));
+ try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(member_names)));
break :blk Item{
.tag = .type_struct_simple_with_member_names,
.result_id = result_id,
@@ -721,7 +721,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
.result_id = result_id,
.data = try self.addExtra(spv, Tag.UInt32{
.ty = int.ty,
- .value = @intCast(u32, val),
+ .value = @as(u32, @intCast(val)),
}),
};
} else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) {
@@ -730,20 +730,20 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
.result_id = result_id,
.data = try self.addExtra(spv, Tag.Int32{
.ty = int.ty,
- .value = @intCast(i32, val),
+ .value = @as(i32, @intCast(val)),
}),
};
} else if (val < 0) {
break :blk .{
.tag = .int_large,
.result_id = result_id,
- .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(i64, val))),
+ .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @as(i64, @intCast(val)))),
};
} else {
break :blk .{
.tag = .uint_large,
.result_id = result_id,
- .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(u64, val))),
+ .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @as(u64, @intCast(val)))),
};
}
},
@@ -753,12 +753,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
16 => .{
.tag = .float16,
.result_id = result_id,
- .data = @bitCast(u16, float.value.float16),
+ .data = @as(u16, @bitCast(float.value.float16)),
},
32 => .{
.tag = .float32,
.result_id = result_id,
- .data = @bitCast(u32, float.value.float32),
+ .data = @as(u32, @bitCast(float.value.float32)),
},
64 => .{
.tag = .float64,
@@ -788,7 +788,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
};
try self.items.append(spv.gpa, item);
- return @enumFromInt(Ref, entry.index);
+ return @as(Ref, @enumFromInt(entry.index));
}
/// Turn a Ref back into a Key.
@@ -797,20 +797,20 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
const item = self.items.get(@intFromEnum(ref));
const data = item.data;
return switch (item.tag) {
- .type_simple => switch (@enumFromInt(Tag.SimpleType, data)) {
+ .type_simple => switch (@as(Tag.SimpleType, @enumFromInt(data))) {
.void => .void_type,
.bool => .bool_type,
},
.type_int_signed => .{ .int_type = .{
.signedness = .signed,
- .bits = @intCast(u16, data),
+ .bits = @as(u16, @intCast(data)),
} },
.type_int_unsigned => .{ .int_type = .{
.signedness = .unsigned,
- .bits = @intCast(u16, data),
+ .bits = @as(u16, @intCast(data)),
} },
.type_float => .{ .float_type = .{
- .bits = @intCast(u16, data),
+ .bits = @as(u16, @intCast(data)),
} },
.type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) },
.type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) },
@@ -819,26 +819,26 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
return .{
.function_type = .{
.return_type = payload.data.return_type,
- .parameters = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.param_len]),
+ .parameters = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len])),
},
};
},
.type_ptr_generic => .{
.ptr_type = .{
.storage_class = .Generic,
- .child_type = @enumFromInt(Ref, data),
+ .child_type = @as(Ref, @enumFromInt(data)),
},
},
.type_ptr_crosswgp => .{
.ptr_type = .{
.storage_class = .CrossWorkgroup,
- .child_type = @enumFromInt(Ref, data),
+ .child_type = @as(Ref, @enumFromInt(data)),
},
},
.type_ptr_function => .{
.ptr_type = .{
.storage_class = .Function,
- .child_type = @enumFromInt(Ref, data),
+ .child_type = @as(Ref, @enumFromInt(data)),
},
},
.type_ptr_simple => {
@@ -852,7 +852,7 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
},
.type_struct_simple => {
const payload = self.extraDataTrail(Tag.SimpleStructType, data);
- const member_types = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.members_len]);
+ const member_types = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len]));
return .{
.struct_type = .{
.name = payload.data.name,
@@ -864,8 +864,8 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
.type_struct_simple_with_member_names => {
const payload = self.extraDataTrail(Tag.SimpleStructType, data);
const trailing = self.extra.items[payload.trail..];
- const member_types = @ptrCast([]const Ref, trailing[0..payload.data.members_len]);
- const member_names = @ptrCast([]const String, trailing[payload.data.members_len..][0..payload.data.members_len]);
+ const member_types = @as([]const Ref, @ptrCast(trailing[0..payload.data.members_len]));
+ const member_names = @as([]const String, @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len]));
return .{
.struct_type = .{
.name = payload.data.name,
@@ -876,11 +876,11 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
},
.float16 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 16 } }),
- .value = .{ .float16 = @bitCast(f16, @intCast(u16, data)) },
+ .value = .{ .float16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) },
} },
.float32 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 32 } }),
- .value = .{ .float32 = @bitCast(f32, data) },
+ .value = .{ .float32 = @as(f32, @bitCast(data)) },
} },
.float64 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 64 } }),
@@ -923,17 +923,17 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
} };
},
.undef => .{ .undef = .{
- .ty = @enumFromInt(Ref, data),
+ .ty = @as(Ref, @enumFromInt(data)),
} },
.null => .{ .null = .{
- .ty = @enumFromInt(Ref, data),
+ .ty = @as(Ref, @enumFromInt(data)),
} },
.bool_true => .{ .bool = .{
- .ty = @enumFromInt(Ref, data),
+ .ty = @as(Ref, @enumFromInt(data)),
.value = true,
} },
.bool_false => .{ .bool = .{
- .ty = @enumFromInt(Ref, data),
+ .ty = @as(Ref, @enumFromInt(data)),
.value = false,
} },
};
@@ -949,7 +949,7 @@ pub fn resultId(self: Self, ref: Ref) IdResult {
fn get(self: *const Self, key: Key) Ref {
const adapter: Key.Adapter = .{ .self = self };
const index = self.map.getIndexAdapted(key, adapter).?;
- return @enumFromInt(Ref, index);
+ return @as(Ref, @enumFromInt(index));
}
fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
@@ -959,12 +959,12 @@ fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
}
fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
- const payload_offset = @intCast(u32, self.extra.items.len);
+ const payload_offset = @as(u32, @intCast(self.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
const field_val = @field(extra, field.name);
const word = switch (field.type) {
u32 => field_val,
- i32 => @bitCast(u32, field_val),
+ i32 => @as(u32, @bitCast(field_val)),
Ref => @intFromEnum(field_val),
StorageClass => @intFromEnum(field_val),
String => @intFromEnum(field_val),
@@ -986,16 +986,16 @@ fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, t
const word = self.extra.items[offset + i];
@field(result, field.name) = switch (field.type) {
u32 => word,
- i32 => @bitCast(i32, word),
- Ref => @enumFromInt(Ref, word),
- StorageClass => @enumFromInt(StorageClass, word),
- String => @enumFromInt(String, word),
+ i32 => @as(i32, @bitCast(word)),
+ Ref => @as(Ref, @enumFromInt(word)),
+ StorageClass => @as(StorageClass, @enumFromInt(word)),
+ String => @as(String, @enumFromInt(word)),
else => @compileError("Invalid type: " ++ @typeName(field.type)),
};
}
return .{
.data = result,
- .trail = offset + @intCast(u32, fields.len),
+ .trail = offset + @as(u32, @intCast(fields.len)),
};
}
@@ -1017,7 +1017,7 @@ pub const String = enum(u32) {
_ = ctx;
var hasher = std.hash.Wyhash.init(0);
hasher.update(a);
- return @truncate(u32, hasher.final());
+ return @as(u32, @truncate(hasher.final()));
}
};
};
@@ -1032,10 +1032,10 @@ pub fn addString(self: *Self, spv: *Module, str: []const u8) !String {
try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len);
self.string_bytes.appendSliceAssumeCapacity(str);
self.string_bytes.appendAssumeCapacity(0);
- entry.value_ptr.* = @intCast(u32, offset);
+ entry.value_ptr.* = @as(u32, @intCast(offset));
}
- return @enumFromInt(String, entry.index);
+ return @as(String, @enumFromInt(entry.index));
}
pub fn getString(self: *const Self, ref: String) ?[]const u8 {
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index 9d8cca9445..e61ac754ee 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -451,8 +451,8 @@ pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
return try self.resolveId(.{ .int = .{
.ty = ty_ref,
.value = switch (ty.signedness) {
- .signed => Value{ .int64 = @intCast(i64, value) },
- .unsigned => Value{ .uint64 = @intCast(u64, value) },
+ .signed => Value{ .int64 = @as(i64, @intCast(value)) },
+ .unsigned => Value{ .uint64 = @as(u64, @intCast(value)) },
},
} });
}
@@ -516,7 +516,7 @@ pub fn allocDecl(self: *Module, kind: DeclKind) !Decl.Index {
.begin_dep = undefined,
.end_dep = undefined,
});
- const index = @enumFromInt(Decl.Index, @intCast(u32, self.decls.items.len - 1));
+ const index = @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1))));
switch (kind) {
.func => {},
// If the decl represents a global, also allocate a global node.
@@ -540,9 +540,9 @@ pub fn globalPtr(self: *Module, index: Decl.Index) ?*Global {
/// Declare ALL dependencies for a decl.
pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void {
- const begin_dep = @intCast(u32, self.decl_deps.items.len);
+ const begin_dep = @as(u32, @intCast(self.decl_deps.items.len));
try self.decl_deps.appendSlice(self.gpa, deps);
- const end_dep = @intCast(u32, self.decl_deps.items.len);
+ const end_dep = @as(u32, @intCast(self.decl_deps.items.len));
const decl = self.declPtr(decl_index);
decl.begin_dep = begin_dep;
@@ -550,13 +550,13 @@ pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl
}
pub fn beginGlobal(self: *Module) u32 {
- return @intCast(u32, self.globals.section.instructions.items.len);
+ return @as(u32, @intCast(self.globals.section.instructions.items.len));
}
pub fn endGlobal(self: *Module, global_index: Decl.Index, begin_inst: u32) void {
const global = self.globalPtr(global_index).?;
global.begin_inst = begin_inst;
- global.end_inst = @intCast(u32, self.globals.section.instructions.items.len);
+ global.end_inst = @as(u32, @intCast(self.globals.section.instructions.items.len));
}
pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8) !void {
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index b35dc489e4..ae88dc7c8a 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -50,7 +50,7 @@ pub fn emitRaw(
) !void {
const word_count = 1 + operand_words;
try section.instructions.ensureUnusedCapacity(allocator, word_count);
- section.writeWord((@intCast(Word, word_count << 16)) | @intFromEnum(opcode));
+ section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode));
}
pub fn emit(
@@ -61,7 +61,7 @@ pub fn emit(
) !void {
const word_count = instructionSize(opcode, operands);
try section.instructions.ensureUnusedCapacity(allocator, word_count);
- section.writeWord(@intCast(Word, word_count << 16) | @intFromEnum(opcode));
+ section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode));
section.writeOperands(opcode.Operands(), operands);
}
@@ -94,8 +94,8 @@ pub fn writeWords(section: *Section, words: []const Word) void {
pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
section.writeWords(&.{
- @truncate(Word, dword),
- @truncate(Word, dword >> @bitSizeOf(Word)),
+ @as(Word, @truncate(dword)),
+ @as(Word, @truncate(dword >> @bitSizeOf(Word))),
});
}
@@ -145,7 +145,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand)
},
.Struct => |info| {
if (info.layout == .Packed) {
- section.writeWord(@bitCast(Word, operand));
+ section.writeWord(@as(Word, @bitCast(operand)));
} else {
section.writeExtendedMask(Operand, operand);
}
@@ -166,7 +166,7 @@ fn writeString(section: *Section, str: []const u8) void {
var j: usize = 0;
while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
- word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * @bitSizeOf(u8));
+ word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8)));
}
section.instructions.appendAssumeCapacity(word);
@@ -175,12 +175,12 @@ fn writeString(section: *Section, str: []const u8) void {
fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
switch (operand) {
- .int32 => |int| section.writeWord(@bitCast(Word, int)),
- .uint32 => |int| section.writeWord(@bitCast(Word, int)),
- .int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
- .uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
- .float32 => |float| section.writeWord(@bitCast(Word, float)),
- .float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)),
+ .int32 => |int| section.writeWord(@as(Word, @bitCast(int))),
+ .uint32 => |int| section.writeWord(@as(Word, @bitCast(int))),
+ .int64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
+ .uint64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
+ .float32 => |float| section.writeWord(@as(Word, @bitCast(float))),
+ .float64 => |float| section.writeDoubleWord(@as(DoubleWord, @bitCast(float))),
}
}
@@ -189,10 +189,10 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand
inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
switch (@typeInfo(field.type)) {
.Optional => if (@field(operand, field.name) != null) {
- mask |= 1 << @intCast(u5, bit);
+ mask |= 1 << @as(u5, @intCast(bit));
},
.Bool => if (@field(operand, field.name)) {
- mask |= 1 << @intCast(u5, bit);
+ mask |= 1 << @as(u5, @intCast(bit));
},
else => unreachable,
}
@@ -392,7 +392,7 @@ test "SPIR-V Section emit() - extended mask" {
(@as(Word, 5) << 16) | @intFromEnum(Opcode.OpLoopMerge),
10,
20,
- @bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }),
+ @as(Word, @bitCast(spec.LoopControl{ .Unroll = true, .DependencyLength = true })),
2,
}, section.instructions.items);
}