aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-07-03 18:30:07 -0700
committerAndrew Kelley <andrew@ziglang.org>2025-07-07 22:43:52 -0700
commit30c2921eb87c3157d52edd7d8ee874209a0f7538 (patch)
tree876d4864abe53e1b43afa87b6e0f61572179ff86 /src/arch/wasm/CodeGen.zig
parentd09b99d043cc097de569fb32938a423342490a83 (diff)
downloadzig-30c2921eb87c3157d52edd7d8ee874209a0f7538.tar.gz
zig-30c2921eb87c3157d52edd7d8ee874209a0f7538.zip
compiler: update a bunch of format strings
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig42
1 files changed, 18 insertions, 24 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index e396f69af3..4a36eed402 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1463,7 +1463,7 @@ fn allocStack(cg: *CodeGen, ty: Type) !WValue {
}
const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
- return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+ return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
ty.fmt(pt), ty.abiSize(zcu),
});
};
@@ -1497,7 +1497,7 @@ fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
const abi_alignment = ptr_ty.ptrAlignment(zcu);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
- return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+ return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
});
};
@@ -2046,7 +2046,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
try cg.genInst(inst);
if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) {
- std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{
+ std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{s}')", .{
inst,
cg.air.instructions.items(.tag)[@intFromEnum(inst)],
});
@@ -2404,10 +2404,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr
try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
},
else => if (abi_size > 8) {
- return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
- ty.fmt(pt),
- abi_size,
- });
+ return cg.fail("TODO: `store` for type `{f}` with abisize `{d}`", .{ ty.fmt(pt), abi_size });
},
}
try cg.emitWValue(lhs);
@@ -2596,10 +2593,7 @@ fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WV
if (ty.zigTypeTag(zcu) == .int) {
return cg.binOpBigInt(lhs, rhs, ty, op);
} else {
- return cg.fail(
- "TODO: Implement binary operation for type: {}",
- .{ty.fmt(pt)},
- );
+ return cg.fail("TODO: Implement binary operation for type: {f}", .{ty.fmt(pt)});
}
}
@@ -2817,7 +2811,7 @@ fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (scalar_ty.zigTypeTag(zcu)) {
.int => if (ty.zigTypeTag(zcu) == .vector) {
- return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO implement airAbs for {f}", .{ty.fmt(pt)});
} else {
const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
@@ -3244,7 +3238,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
},
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
- .array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
+ .array_type => return cg.fail("Wasm TODO: LowerConstant for {f}", .{ty.fmt(pt)}),
.vector_type => {
assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct);
var buf: [16]u8 = undefined;
@@ -3332,7 +3326,7 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
},
else => unreachable,
},
- else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
+ else => return cg.fail("Wasm TODO: emitUndefined for type: {s}\n", .{ty.zigTypeTag(zcu)}),
}
}
@@ -3608,7 +3602,7 @@ fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} else {
const int_info = operand_ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
+ return cg.fail("TODO: Implement binary NOT for {f}", .{operand_ty.fmt(pt)});
};
switch (wasm_bits) {
@@ -3874,7 +3868,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
else => result: {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
- return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
+ return cg.fail("Field type '{f}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
if (isByRef(field_ty, zcu, cg.target)) {
switch (operand) {
@@ -4360,7 +4354,7 @@ fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opc
// a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
+ return cg.fail("Optional type {f} too big to fit into stack frame", .{optional_ty.fmt(pt)});
};
try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
@@ -4430,7 +4424,7 @@ fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
+ return cg.fail("Optional type {f} too big to fit into stack frame", .{opt_ty.fmt(pt)});
};
try cg.emitWValue(operand);
@@ -4462,7 +4456,7 @@ fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result cg.reuseOperand(ty_op.operand, operand);
}
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
+ return cg.fail("Optional type {f} too big to fit into stack frame", .{op_ty.fmt(pt)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
@@ -6196,7 +6190,7 @@ fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try cg.load(overflow_ret, Type.i32, 0);
try cg.addLocal(.local_set, overflow_bit.local.value);
break :blk res;
- } else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
+ } else return cg.fail("TODO: @mulWithOverflow for {f}", .{ty.fmt(pt)});
var bin_op_local = try mul.toLocal(cg, ty);
defer bin_op_local.free(cg);
@@ -6749,7 +6743,7 @@ fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const add = try cg.binOp(rem, rhs, ty, .add);
break :result try cg.binOp(add, rhs, ty, .rem);
}
- return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: @mod for {f}", .{ty.fmt(pt)});
};
return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
@@ -6767,7 +6761,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try cg.resolveInst(bin_op.lhs);
const rhs = try cg.resolveInst(bin_op.rhs);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
};
switch (wasm_bits) {
@@ -6804,7 +6798,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
64 => {
if (!(int_info.bits == 64 and int_info.signedness == .signed)) {
- return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
}
const overflow_ret = try cg.allocStack(Type.i32);
_ = try cg.callIntrinsic(
@@ -6822,7 +6816,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
128 => {
if (!(int_info.bits == 128 and int_info.signedness == .signed)) {
- return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
}
const overflow_ret = try cg.allocStack(Type.i32);
const ret = try cg.callIntrinsic(