aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/c.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2023-06-22 18:46:56 +0100
committerAndrew Kelley <andrew@ziglang.org>2023-06-24 16:56:39 -0700
commitf26dda21171e26f44aeec8c59a75bbb3331eeb2e (patch)
treec935248861ae2693b314f2c8bc78fe38d9961b6d /src/codegen/c.zig
parent447ca4e3fff021f471b748187b53f0a4744ad0bc (diff)
downloadzig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.tar.gz
zig-f26dda21171e26f44aeec8c59a75bbb3331eeb2e.zip
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change
Diffstat (limited to 'src/codegen/c.zig')
-rw-r--r--src/codegen/c.zig102
1 files changed, 51 insertions, 51 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 8afaae7cfa..317d77602f 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -326,7 +326,7 @@ pub const Function = struct {
.cty_idx = try f.typeToIndex(ty, .complete),
.alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
});
- return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
+ return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
}
fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
@@ -644,7 +644,7 @@ pub const DeclGen = struct {
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(base_ty, .complete);
const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
- .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod),
+ .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
@@ -662,7 +662,7 @@ pub const DeclGen = struct {
try dg.renderCType(writer, ptr_cty);
try writer.writeByte(')');
}
- switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) {
+ switch (fieldLocation(base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) {
.begin => try dg.renderParentPtr(writer, field.base, location),
.field => |name| {
try writer.writeAll("&(");
@@ -740,11 +740,11 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{@bitCast(f16, undefPattern(i16))}),
- 32 => try writer.print("{x}", .{@bitCast(f32, undefPattern(i32))}),
- 64 => try writer.print("{x}", .{@bitCast(f64, undefPattern(i64))}),
- 80 => try writer.print("{x}", .{@bitCast(f80, undefPattern(i80))}),
- 128 => try writer.print("{x}", .{@bitCast(f128, undefPattern(i128))}),
+ 16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}),
+ 32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}),
+ 64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}),
+ 80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}),
+ 128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}),
else => unreachable,
}
try writer.writeAll(", ");
@@ -1041,11 +1041,11 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
- 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
- 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
- 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
- 128 => repr_val_big.set(@bitCast(u128, f128_val)),
+ 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))),
+ 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))),
+ 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))),
+ 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))),
+ 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable,
}
@@ -1103,11 +1103,11 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
- 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
- 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
- 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
- 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
+ 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}),
+ 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}),
+ 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}),
+ 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}),
+ 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable,
};
try writer.writeAll(", ");
@@ -1225,11 +1225,11 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
+ const s_u8 = @as(u8, @intCast(s.toUnsignedInt(mod)));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1239,7 +1239,7 @@ pub const DeclGen = struct {
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
try writer.print("'\\x{x}'", .{elem_val_u8});
}
if (ai.sentinel) |s| {
@@ -1840,7 +1840,7 @@ pub const DeclGen = struct {
decl.ty,
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
- @intCast(u32, decl.alignment.toByteUnits(0)),
+ @as(u32, @intCast(decl.alignment.toByteUnits(0))),
.complete,
);
try fwd_decl_writer.writeAll(";\n");
@@ -1907,7 +1907,7 @@ pub const DeclGen = struct {
const mod = dg.module;
const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(mod)),
+ .bits = @as(u16, @intCast(ty.bitSize(mod))),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -2481,7 +2481,7 @@ fn genExports(o: *Object) !void {
if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| {
for (exports.items[1..], 1..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
- try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) });
+ try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @as(u32, @intCast(i)) });
try fwd_decl_writer.print(", {s}, {s});\n", .{
fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null),
@@ -2510,7 +2510,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
try w.writeAll(") {\n switch (tag) {\n");
for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
- const index = @intCast(u32, index_usize);
+ const index = @as(u32, @intCast(index_usize));
const name = mod.intern_pool.stringToSlice(name_ip);
const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
@@ -2783,7 +2783,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
// Remember how many locals there were before entering the body so that we can free any that
// were newly introduced. Any new locals must necessarily be logically free after the then
// branch is complete.
- const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
+ const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len));
for (leading_deaths) |death| {
try die(f, inst, Air.indexToRef(death));
@@ -2804,7 +2804,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
// them, unless they were used to store allocs.
for (pre_locals_len..f.locals.items.len) |local_i| {
- const local_index = @intCast(LocalIndex, local_i);
+ const local_index = @as(LocalIndex, @intCast(local_i));
if (f.allocs.contains(local_index)) {
continue;
}
@@ -3364,7 +3364,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
+ const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod))));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3667,7 +3667,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits));
defer mask.deinit();
- try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits));
+ try mask.setTwosCompIntLimit(.max, .unsigned, @as(usize, @intCast(src_bits)));
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
@@ -4096,7 +4096,7 @@ fn airCall(
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
- const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len]));
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
@@ -4537,7 +4537,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
wrap_cty = elem_cty.toSignedness(dest_info.signedness);
need_bitcasts = wrap_cty.?.tag() == .zig_i128;
bits -= 1;
- bits %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8));
bits += 1;
}
try writer.writeAll(" = ");
@@ -4711,7 +4711,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
var extra_index: usize = switch_br.end;
for (0..switch_br.data.cases_len) |case_i| {
const case = f.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
+ const items = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[case.end..][0..case.data.items_len]));
const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -4771,13 +4771,13 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
- const clobbers_len = @truncate(u31, extra.data.flags);
+ const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+ const clobbers_len = @as(u31, @truncate(extra.data.flags));
const gpa = f.object.dg.gpa;
var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const result = result: {
@@ -4794,7 +4794,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
break :local local;
} else .none;
- const locals_begin = @intCast(LocalIndex, f.locals.items.len);
+ const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len));
const constraints_extra_begin = extra_i;
for (outputs) |output| {
const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
@@ -5402,7 +5402,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
inst_ty.intInfo(mod).signedness
else
.unsigned;
- const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
+ const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod))));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -6033,7 +6033,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6136,7 +6136,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @intCast(u16, ty.abiSize(mod) * 8);
+ const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8));
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -6186,7 +6186,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty = ptr_ty.childType(mod);
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6226,7 +6226,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat())
- mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
else
ty;
@@ -6574,7 +6574,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("] = ");
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
- const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63));
+ const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
try writer.writeByte('[');
@@ -6745,8 +6745,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const ip = &mod.intern_pool;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const inst_ty = f.typeOfIndex(inst);
- const len = @intCast(usize, inst_ty.arrayLen(mod));
- const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
+ const len = @as(usize, @intCast(inst_ty.arrayLen(mod)));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len]));
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
@@ -7387,7 +7387,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri
fn undefPattern(comptime IntType: type) IntType {
const int_info = @typeInfo(IntType).Int;
const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
- return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
+ return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3)));
}
const FormatIntLiteralContext = struct {
@@ -7438,7 +7438,7 @@ fn formatIntLiteral(
} else data.val.toBigInt(&int_buf, mod);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
+ const c_bits = @as(usize, @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8));
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
@@ -7471,7 +7471,7 @@ fn formatIntLiteral(
const array_data = data.cty.castTag(.array).?.data;
break :info .{
.cty = data.dg.indexToCType(array_data.elem_type),
- .count = @intCast(usize, array_data.len),
+ .count = @as(usize, @intCast(array_data.len)),
.endian = target.cpu.arch.endian(),
.homogeneous = true,
};
@@ -7527,7 +7527,7 @@ fn formatIntLiteral(
var c_limb_int_info = std.builtin.Type.Int{
.signedness = undefined,
- .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+ .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))),
};
var c_limb_cty: CType = undefined;
@@ -7727,7 +7727,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
fn lowersToArray(ty: Type, mod: *Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
+ else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null,
};
}
@@ -7735,7 +7735,7 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi
assert(operands.len <= Liveness.bpi - 1);
var tomb_bits = f.liveness.getTombBits(inst);
for (operands) |operand| {
- const dies = @truncate(u1, tomb_bits) != 0;
+ const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
try die(f, inst, operand);