aboutsummaryrefslogtreecommitdiff
path: root/src/Sema.zig
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2025-05-30 00:22:45 -0400
committerJacob Young <jacobly0@users.noreply.github.com>2025-05-31 18:54:28 -0400
commitb483defc5a5c2f93eb8a445974ab831ae4e4b321 (patch)
tree0fa6fa6721e8731b294f8aafed6ca8f02a0242ae /src/Sema.zig
parentc1e9ef9eaabb2219a3762c5957b1c63ad20bf1ed (diff)
downloadzig-b483defc5a5c2f93eb8a445974ab831ae4e4b321.tar.gz
zig-b483defc5a5c2f93eb8a445974ab831ae4e4b321.zip
Legalize: implement scalarization of binary operations
Diffstat (limited to 'src/Sema.zig')
-rw-r--r--src/Sema.zig754
1 files changed, 372 insertions, 382 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 3fa264be48..34c1bb4df7 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1881,7 +1881,7 @@ fn analyzeBodyInner(
extra.data.else_body_len,
);
const uncasted_cond = try sema.resolveInst(extra.data.condition);
- const cond = try sema.coerce(block, Type.bool, uncasted_cond, cond_src);
+ const cond = try sema.coerce(block, .bool, uncasted_cond, cond_src);
const cond_val = try sema.resolveConstDefinedValue(
block,
cond_src,
@@ -2012,7 +2012,7 @@ fn resolveConstBool(
reason: ComptimeReason,
) !bool {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.bool;
+ const wanted_type: Type = .bool;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
return val.toBool();
@@ -2037,7 +2037,7 @@ pub fn toConstString(
reason: ComptimeReason,
) ![]u8 {
const pt = sema.pt;
- const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src);
+ const coerced_inst = try sema.coerce(block, .slice_const_u8, air_inst, src);
const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt);
@@ -2051,7 +2051,7 @@ pub fn resolveConstStringIntern(
reason: ComptimeReason,
) !InternPool.NullTerminatedString {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.slice_const_u8;
+ const wanted_type: Type = .slice_const_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
return sema.sliceToIpString(block, src, val, reason);
@@ -2180,7 +2180,7 @@ fn analyzeAsType(
src: LazySrcLoc,
air_inst: Air.Inst.Ref,
) !Type {
- const wanted_type = Type.type;
+ const wanted_type: Type = .type;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .simple = .type });
return val.toType();
@@ -2641,7 +2641,7 @@ fn reparentOwnedErrorMsg(
msg.msg = msg_str;
}
-const align_ty = Type.u29;
+const align_ty: Type = .u29;
pub fn analyzeAsAlign(
sema: *Sema,
@@ -2819,7 +2819,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const parent_ty = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type);
+ const parent_ty: Type = .fromInterned(zcu.namespacePtr(block.namespace).owner_type);
const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu);
const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len);
@@ -3777,7 +3777,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
- const elem_ty = Type.fromInterned(ptr_info.child);
+ const elem_ty: Type = .fromInterned(ptr_info.child);
// If the alloc was created in a comptime scope, we already created a comptime alloc for it.
// However, if the final constructed value does not reference comptime-mutable memory, we wish
@@ -3848,7 +3848,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
- const elem_ty = Type.fromInterned(ptr_info.child);
+ const elem_ty: Type = .fromInterned(ptr_info.child);
const alloc_inst = alloc.toIndex() orelse return null;
const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null;
@@ -4024,9 +4024,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// As this is a union field, we must store to the pointer now to set the tag.
// If the payload is OPV, there will not be a payload store, so we store that value.
// Otherwise, there will be a payload store to process later, so undef will suffice.
- const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
+ const payload_ty: Type = .fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
- const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx);
+ const tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), idx);
const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
@@ -4050,7 +4050,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const air_ptr_inst = store_inst.data.bin_op.lhs.toIndex().?;
const store_val = (try sema.resolveValue(store_inst.data.bin_op.rhs)).?;
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
- try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(zcu.intern_pool.typeOf(store_val.toIntern())));
+ try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, .fromInterned(zcu.intern_pool.typeOf(store_val.toIntern())));
},
else => unreachable,
}
@@ -4284,7 +4284,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
else => unreachable,
};
if (zcu.intern_pool.isFuncBody(val)) {
- const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
+ const ty: Type = .fromInterned(zcu.intern_pool.typeOf(val));
if (try ty.fnHasRuntimeBitsSema(pt)) {
try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = val }));
try zcu.ensureFuncBodyAnalysisQueued(val);
@@ -4447,14 +4447,14 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const range_end = try sema.resolveInst(zir_arg_pair[1]);
break :l try sema.analyzeArithmetic(block, .sub, range_end, range_start, arg_src, arg_src, arg_src, true);
};
- const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
+ const arg_len = try sema.coerce(block, .usize, arg_len_uncoerced, arg_src);
if (len == .none) {
len = arg_len;
len_idx = i;
}
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
if (len_val) |v| {
- if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
+ if (!(try sema.valuesEqual(arg_val, v, .usize))) {
const msg = msg: {
const msg = try sema.errMsg(src, "non-matching for loop lengths", .{});
errdefer msg.destroy(gpa);
@@ -5343,7 +5343,7 @@ fn zirValidatePtrArrayInit(
// sentinel-terminated array, the sentinel will not have been populated by
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel(zcu)) |sentinel_val| {
- const array_len_ref = try pt.intRef(Type.usize, array_len);
+ const array_len_ref = try pt.intRef(.usize, array_len);
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = Air.internedToRef(sentinel_val.toIntern());
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
@@ -5828,7 +5828,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
defer tracy.end();
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int;
- return sema.pt.intRef(Type.comptime_int, int);
+ return sema.pt.intRef(.comptime_int, int);
}
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5846,7 +5846,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
- return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{
+ return Air.internedToRef((try sema.pt.intValue_big(.comptime_int, .{
.limbs = limbs,
.positive = true,
})).toIntern());
@@ -5856,7 +5856,7 @@ fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
_ = block;
const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float;
return Air.internedToRef((try sema.pt.floatValue(
- Type.comptime_float,
+ .comptime_float,
number,
)).toIntern());
}
@@ -5866,7 +5866,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
const number = extra.get();
- return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern());
+ return Air.internedToRef((try sema.pt.floatValue(.comptime_float, number)).toIntern());
}
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -6641,7 +6641,7 @@ pub fn analyzeExport(
};
const exported_nav = ip.getNav(exported_nav_index);
- const export_ty = Type.fromInterned(exported_nav.typeOf(ip));
+ const export_ty: Type = .fromInterned(exported_nav.typeOf(ip));
if (!try sema.validateExternType(export_ty, .other)) {
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -7005,7 +7005,7 @@ fn lookupInNamespace(
for (usingnamespaces.items) |sub_ns_nav| {
try sema.ensureNavResolved(block, src, sub_ns_nav, .fully);
- const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val);
+ const sub_ns_ty: Type = .fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val);
const sub_ns = zcu.namespacePtr(sub_ns_ty.getNamespaceIndex(zcu));
try checked_namespaces.put(gpa, sub_ns, {});
}
@@ -7081,7 +7081,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
const gpa = sema.gpa;
if (block.isComptime() or block.is_typeof) {
- const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
+ const index_val = try pt.intValue_u64(.usize, sema.comptime_err_ret_trace.items.len);
return Air.internedToRef(index_val.toIntern());
}
@@ -7326,13 +7326,13 @@ fn checkCallArgumentCount(
) !Type {
const pt = sema.pt;
const zcu = pt.zcu;
- const func_ty = func_ty: {
+ const func_ty: Type = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => break :func_ty callee_ty,
.pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
- break :func_ty Type.fromInterned(ptr_info.child);
+ break :func_ty .fromInterned(ptr_info.child);
}
},
.optional => {
@@ -7405,13 +7405,13 @@ fn callBuiltin(
const pt = sema.pt;
const zcu = pt.zcu;
const callee_ty = sema.typeOf(builtin_fn);
- const func_ty = func_ty: {
+ const func_ty: Type = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => break :func_ty callee_ty,
.pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
- break :func_ty Type.fromInterned(ptr_info.child);
+ break :func_ty .fromInterned(ptr_info.child);
}
},
else => {},
@@ -7568,7 +7568,7 @@ const CallArgsInfo = union(enum) {
}
}
// Give the arg its result type
- const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison;
+ const provide_param_ty: Type = maybe_param_ty orelse .generic_poison;
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(provide_param_ty.toIntern()));
// Resolve the arg!
const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
@@ -8353,7 +8353,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ
@tagName(backend), @tagName(target.cpu.arch),
});
}
- const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
+ const owner_func_ty: Type = .fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
if (owner_func_ty.toIntern() != func_ty.toIntern()) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(pt), owner_func_ty.fmt(pt),
@@ -8452,7 +8452,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1);
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
- const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, .{ .simple = .vector_length }));
+ const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, .u32, .{ .simple = .vector_length }));
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
try sema.checkVectorElemType(block, elem_type_src, elem_type);
const vector_type = try sema.pt.vectorType(.{
@@ -8470,7 +8470,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node });
const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node });
- const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, .{ .simple = .array_length });
+ const len = try sema.resolveInt(block, len_src, extra.lhs, .usize, .{ .simple = .array_length });
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
try sema.validateArrayElemType(block, elem_type, elem_src);
const array_ty = try sema.pt.arrayType(.{
@@ -8490,7 +8490,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node });
const sentinel_src = block.src(.{ .node_offset_array_type_sentinel = inst_data.src_node });
const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node });
- const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, .{ .simple = .array_length });
+ const len = try sema.resolveInt(block, len_src, extra.len, .usize, .{ .simple = .array_length });
const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
try sema.validateArrayElemType(block, elem_type, elem_src);
const uncasted_sentinel = try sema.resolveInst(extra.sentinel);
@@ -8599,7 +8599,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
const uncasted_operand = try sema.resolveInst(extra.operand);
- const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, .anyerror, uncasted_operand, operand_src);
const err_int_ty = try pt.errorIntType();
if (try sema.resolveValue(operand)) |val| {
@@ -9309,7 +9309,7 @@ fn zirFunc(
const ret_ty: Type = if (extra.data.ret_ty.is_generic)
.generic_poison
else switch (extra.data.ret_ty.body_len) {
- 0 => Type.void,
+ 0 => .void,
1 => blk: {
const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
@@ -9319,7 +9319,7 @@ fn zirFunc(
const ret_ty_body = sema.code.bodySlice(extra_index, extra.data.ret_ty.body_len);
extra_index += ret_ty_body.len;
- const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, .{ .simple = .function_ret_ty });
+ const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, .type, .{ .simple = .function_ret_ty });
break :blk ret_ty_val.toType();
},
};
@@ -9649,7 +9649,7 @@ fn funcCommon(
var comptime_bits: u32 = 0;
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
- const param_ty = Type.fromInterned(param_ty_ip);
+ const param_ty: Type = .fromInterned(param_ty_ip);
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
@@ -9870,7 +9870,7 @@ fn finishFunc(
const return_type: Type = if (opt_func_index == .none or ret_poison)
bare_return_type
else
- Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
+ .fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
if (!return_type.isValidReturnType(zcu)) {
const opaque_str = if (return_type.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
@@ -10130,14 +10130,14 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveValue(operand)) |operand_val| ct: {
if (!is_vector) {
if (operand_val.isUndef(zcu)) {
- return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern());
+ return .undef_usize;
}
const addr = try operand_val.getUnsignedIntSema(pt) orelse {
// Wasn't an integer pointer. This is a runtime operation.
break :ct;
};
return Air.internedToRef((try pt.intValue(
- Type.usize,
+ .usize,
addr,
)).toIntern());
}
@@ -10145,7 +10145,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
for (new_elems, 0..) |*new_elem, i| {
const ptr_val = try operand_val.elemValue(pt, i);
if (ptr_val.isUndef(zcu)) {
- new_elem.* = (try pt.undefValue(Type.usize)).toIntern();
+ new_elem.* = .undef_usize;
continue;
}
const addr = try ptr_val.getUnsignedIntSema(pt) orelse {
@@ -10153,7 +10153,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
break :ct;
};
new_elem.* = (try pt.intValue(
- Type.usize,
+ .usize,
addr,
)).toIntern();
}
@@ -10170,7 +10170,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addBitCast(.usize, old_elem);
}
@@ -10646,7 +10646,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const vec_len = operand_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem);
}
@@ -10675,7 +10675,7 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
- const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
@@ -10685,7 +10685,7 @@ fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm;
const array = try sema.resolveInst(inst_data.operand);
- const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx);
+ const elem_index = try sema.pt.intRef(.usize, inst_data.idx);
return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false);
}
@@ -10728,7 +10728,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
- const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
@@ -10742,7 +10742,7 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
- const elem_index = try pt.intRef(Type.usize, extra.index);
+ const elem_index = try pt.intRef(.usize, extra.index);
const array_ty = sema.typeOf(array_ptr).childType(zcu);
switch (array_ty.zigTypeTag(zcu)) {
.array, .vector => {},
@@ -11104,7 +11104,7 @@ const SwitchProngAnalysis = struct {
if (operand_ty.zigTypeTag(zcu) == .@"union") {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
@@ -11154,7 +11154,7 @@ const SwitchProngAnalysis = struct {
const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?;
- const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]);
+ const first_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_field_index]);
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
@@ -11165,7 +11165,7 @@ const SwitchProngAnalysis = struct {
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, zcu)) break false;
} else true;
@@ -11173,7 +11173,7 @@ const SwitchProngAnalysis = struct {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try pt.undefRef(field_ty);
}
@@ -11208,7 +11208,7 @@ const SwitchProngAnalysis = struct {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (field_indices, dummy_captures) |field_idx, *dummy| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -11271,7 +11271,7 @@ const SwitchProngAnalysis = struct {
// If we can, try to avoid that using in-memory coercions.
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
break :in_mem i;
}
@@ -11294,7 +11294,7 @@ const SwitchProngAnalysis = struct {
{
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
in_mem_coercible.unset(i);
}
@@ -11341,7 +11341,7 @@ const SwitchProngAnalysis = struct {
};
const field_idx = field_indices[idx];
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty);
const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
_ = try coerce_block.addBr(capture_block_inst, coerced);
@@ -11365,7 +11365,7 @@ const SwitchProngAnalysis = struct {
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
const first_imc_field_idx = field_indices[first_imc_item_idx];
- const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
+ const first_imc_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty);
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
_ = try coerce_block.addBr(capture_block_inst, coerced);
@@ -13165,7 +13165,7 @@ fn analyzeSwitchRuntimeBlock(
for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = zcu.typeToUnion(maybe_union_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]);
if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
@@ -13490,7 +13490,7 @@ const RangeSetUnhandledIterator = struct {
inline .u64, .i64 => |val_int| {
const next_int = @addWithOverflow(val_int, 1);
if (next_int[1] == 0)
- return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
+ return (try it.pt.intValue(.fromInterned(int.ty), next_int[0])).toIntern();
},
.big_int => {},
.lazy_align, .lazy_size => unreachable,
@@ -13506,7 +13506,7 @@ const RangeSetUnhandledIterator = struct {
);
result_bigint.addScalar(val_bigint, 1);
- return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
+ return (try it.pt.intValue_big(.fromInterned(int.ty), result_bigint.toConst())).toIntern();
}
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
@@ -13636,7 +13636,7 @@ fn validateErrSetSwitch(
.{},
);
}
- return Type.anyerror;
+ return .anyerror;
},
else => |err_set_ty_index| else_validation: {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
@@ -13839,7 +13839,7 @@ fn validateSwitchItemBool(
item_ref: Zir.Inst.Ref,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, item_src);
+ const item = try sema.resolveSwitchItemVal(block, item_ref, .bool, item_src);
if (Value.fromInterned(item.val).toBool()) {
true_count.* += 1;
} else {
@@ -14224,7 +14224,7 @@ fn zirShl(
return lhs;
}
if (air_tag != .shl_sat and scalar_ty.zigTypeTag(zcu) != .comptime_int) {
- const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+ const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits);
if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
@@ -14351,8 +14351,7 @@ fn zirShl(
try block.addReduce(ov_bit, .Or)
else
ov_bit;
- const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
- const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+ const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, .zero_u1);
try sema.addSafetyCheck(block, src, no_ov, .shl_overflow);
return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty);
@@ -14406,7 +14405,7 @@ fn zirShr(
return lhs;
}
if (scalar_ty.zigTypeTag(zcu) != .comptime_int) {
- const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+ const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits);
if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
@@ -14689,7 +14688,7 @@ fn analyzeTupleCat(
try sema.tupleFieldValByIndex(block, rhs, i, rhs_ty);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -14716,7 +14715,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
- if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
+ if (lhs_is_tuple) break :lhs_info undefined;
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
};
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
@@ -14892,7 +14891,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// lhs_dest_slice = dest[0..lhs.len]
const slice_ty_ref = Air.internedToRef(slice_ty.toIntern());
- const lhs_len_ref = try pt.intRef(Type.usize, lhs_len);
+ const lhs_len_ref = try pt.intRef(.usize, lhs_len);
const lhs_dest_slice = try block.addInst(.{
.tag = .slice,
.data = .{ .ty_pl = .{
@@ -14907,7 +14906,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
_ = try block.addBinOp(.memcpy, lhs_dest_slice, lhs);
// rhs_dest_slice = dest[lhs.len..][0..rhs.len]
- const rhs_len_ref = try pt.intRef(Type.usize, rhs_len);
+ const rhs_len_ref = try pt.intRef(.usize, rhs_len);
const rhs_dest_offset = try block.addInst(.{
.tag = .ptr_add,
.data = .{ .ty_pl = .{
@@ -14932,7 +14931,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
_ = try block.addBinOp(.memcpy, rhs_dest_slice, rhs);
if (res_sent_val) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -14943,7 +14942,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const elem_index = try pt.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14954,8 +14953,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const elem_index = try pt.intRef(Type.usize, elem_i);
- const rhs_index = try pt.intRef(Type.usize, rhs_elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
+ const rhs_index = try pt.intRef(.usize, rhs_elem_i);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14965,7 +14964,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store);
}
if (res_sent_val) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -14978,7 +14977,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
{
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const index = try pt.intRef(Type.usize, elem_i);
+ const index = try pt.intRef(.usize, elem_i);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = elem_i,
@@ -14988,7 +14987,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const index = try pt.intRef(Type.usize, rhs_elem_i);
+ const index = try pt.intRef(.usize, rhs_elem_i);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = @intCast(rhs_elem_i),
@@ -15012,8 +15011,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
switch (ptr_info.flags.size) {
.slice => {
const val = try sema.resolveConstDefinedValue(block, src, operand, .{ .simple = .slice_cat_operand });
- return Type.ArrayInfo{
- .elem_type = Type.fromInterned(ptr_info.child),
+ return .{
+ .elem_type = .fromInterned(ptr_info.child),
.sentinel = switch (ptr_info.sentinel) {
.none => null,
else => Value.fromInterned(ptr_info.sentinel),
@@ -15113,7 +15112,7 @@ fn analyzeTupleMul(
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -15166,7 +15165,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_ty.isTuple(zcu)) {
// In `**` rhs must be comptime-known, but lhs can be runtime-known
- const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor });
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor });
const factor_casted = try sema.usizeCast(block, rhs_src, factor);
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
}
@@ -15188,7 +15187,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
// In `**` rhs must be comptime-known, but lhs can be runtime-known
- const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor });
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor });
const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
@@ -15246,7 +15245,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// to get the same elem values.
const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len);
for (lhs_vals, 0..) |*lhs_val, idx| {
- const idx_ref = try pt.intRef(Type.usize, idx);
+ const idx_ref = try pt.intRef(.usize, idx);
lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false);
}
@@ -15267,14 +15266,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: usize = 0;
while (elem_i < result_len) {
for (lhs_vals) |lhs_val| {
- const elem_index = try pt.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store);
elem_i += 1;
}
}
if (lhs_info.sentinel) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef(sent_val.toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -16132,14 +16131,13 @@ fn zirOverflowArithmetic(
const maybe_rhs_val = try sema.resolveValue(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
- const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]);
+ const overflow_ty: Type = .fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]);
var result: struct {
inst: Air.Inst.Ref = .none,
wrapped: Value = Value.@"unreachable",
overflow_bit: Value,
} = result: {
- const zero_bit = try pt.intValue(Type.u1, 0);
switch (zir_tag) {
.add_with_overflow => {
// If either of the arguments is zero, `false` is returned and the other is stored
@@ -16147,12 +16145,12 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -16173,7 +16171,7 @@ fn zirOverflowArithmetic(
if (rhs_val.isUndef(zcu)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(zcu)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
@@ -16192,9 +16190,9 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
}
}
@@ -16202,9 +16200,9 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu)) {
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
}
@@ -16226,12 +16224,12 @@ fn zirOverflowArithmetic(
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -16309,10 +16307,10 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ov_ty = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
+ const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = .u1_type,
- }) else Type.u1;
+ }) else .u1;
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
@@ -16320,7 +16318,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
.types = &types,
.values = &values,
});
- return Type.fromInterned(tuple_ty);
+ return .fromInterned(tuple_ty);
}
fn analyzeArithmetic(
@@ -16380,7 +16378,7 @@ fn analyzeArithmetic(
const address = std.math.sub(u64, lhs_ptr.byte_offset, rhs_ptr.byte_offset) catch
return sema.fail(block, src, "operation results in overflow", .{});
const result = address / elem_size;
- return try pt.intRef(Type.usize, result);
+ return try pt.intRef(.usize, result);
} else {
break :runtime_src lhs_src;
}
@@ -16395,7 +16393,7 @@ fn analyzeArithmetic(
const lhs_int = try block.addBitCast(.usize, lhs);
const rhs_int = try block.addBitCast(.usize, rhs);
const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int);
- return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size));
+ return try block.addBinOp(.div_exact, address, try pt.intRef(.usize, elem_size));
}
} else {
switch (lhs_ty.ptrSize(zcu)) {
@@ -16527,8 +16525,7 @@ fn analyzeArithmetic(
try block.addReduce(ov_bit, .Or)
else
ov_bit;
- const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
- const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+ const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, .zero_u1);
try sema.addSafetyCheck(block, src, no_ov, .integer_overflow);
return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty);
@@ -16550,7 +16547,7 @@ fn analyzePtrArithmetic(
) CompileError!Air.Inst.Ref {
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
- const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
+ const offset = try sema.coerce(block, .usize, uncasted_offset, offset_src);
const pt = sema.pt;
const zcu = pt.zcu;
const opt_ptr_val = try sema.resolveValue(ptr);
@@ -16736,8 +16733,8 @@ fn zirAsm(
const uncasted_arg = try sema.resolveInst(input.data.operand);
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
switch (uncasted_arg_ty.zigTypeTag(zcu)) {
- .comptime_int => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
- .comptime_float => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
+ .comptime_int => arg.* = try sema.coerce(block, .usize, uncasted_arg, src),
+ .comptime_float => arg.* = try sema.coerce(block, .f64, uncasted_arg, src),
else => {
arg.* = uncasted_arg;
},
@@ -16860,9 +16857,7 @@ fn zirCmpEq(
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveValue(lhs)) |lval| {
if (try sema.resolveValue(rhs)) |rval| {
- if (lval.isUndef(zcu) or rval.isUndef(zcu)) {
- return pt.undefRef(Type.bool);
- }
+ if (lval.isUndef(zcu) or rval.isUndef(zcu)) return .undef_bool;
const lkey = zcu.intern_pool.indexToKey(lval.toIntern());
const rkey = zcu.intern_pool.indexToKey(rval.toIntern());
return if ((lkey.err.name == rkey.err.name) == (op == .eq))
@@ -16916,7 +16911,7 @@ fn analyzeCmpUnionTag(
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
if (try sema.resolveValue(coerced_tag)) |enum_val| {
- if (enum_val.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (enum_val.isUndef(zcu)) return .undef_bool;
const field_ty = union_ty.unionFieldType(enum_val, zcu).?;
if (field_ty.zigTypeTag(zcu) == .noreturn) {
return .bool_false;
@@ -17027,8 +17022,8 @@ fn cmpSelf(
const maybe_lhs_val = try sema.resolveValue(casted_lhs);
const maybe_rhs_val = try sema.resolveValue(casted_rhs);
- if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
- if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
+ if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
const runtime_src: LazySrcLoc = src: {
if (maybe_lhs_val) |lhs_val| {
@@ -17083,7 +17078,7 @@ fn runtimeBoolCmp(
) CompileError!Air.Inst.Ref {
if ((op == .neq) == rhs) {
try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addTyOp(.not, Type.bool, lhs);
+ return block.addTyOp(.not, .bool, lhs);
} else {
return lhs;
}
@@ -17107,7 +17102,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.comptime_float,
.comptime_int,
.void,
- => return pt.intRef(Type.comptime_int, 0),
+ => return .zero,
.bool,
.int,
@@ -17148,7 +17143,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.comptime_float,
.comptime_int,
.void,
- => return pt.intRef(Type.comptime_int, 0),
+ => return .zero,
.bool,
.int,
@@ -17167,7 +17162,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
=> {},
}
const bit_size = try operand_ty.bitSizeSema(pt);
- return pt.intRef(Type.comptime_int, bit_size);
+ return pt.intRef(.comptime_int, bit_size);
}
fn zirThis(
@@ -17285,7 +17280,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
assert(block.is_typeof);
// We need a dummy runtime instruction with the correct type.
- return block.addTy(.alloc, Type.fromInterned(capture_ty));
+ return block.addTy(.alloc, .fromInterned(capture_ty));
}
fn zirRetAddr(
@@ -17293,10 +17288,11 @@ fn zirRetAddr(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ _ = sema;
_ = extended;
if (block.isComptime()) {
// TODO: we could give a meaningful lazy value here. #14938
- return sema.pt.intRef(Type.usize, 0);
+ return .zero_usize;
} else {
return block.addNoOp(.ret_addr);
}
@@ -17349,7 +17345,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, func_name_len)).toIntern(),
} });
};
@@ -17375,7 +17371,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, module_name.len)).toIntern(),
+ .len = (try pt.intValue(.usize, module_name.len)).toIntern(),
} });
};
@@ -17401,7 +17397,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(),
+ .len = (try pt.intValue(.usize, file_name.len)).toIntern(),
} });
};
@@ -17414,9 +17410,9 @@ fn zirBuiltinSrc(
// fn_name: [:0]const u8,
func_name_val,
// line: u32,
- (try pt.intValue(Type.u32, extra.line + 1)).toIntern(),
+ (try pt.intValue(.u32, extra.line + 1)).toIntern(),
// column: u32,
- (try pt.intValue(Type.u32, extra.column + 1)).toIntern(),
+ (try pt.intValue(.u32, extra.column + 1)).toIntern(),
};
return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = src_loc_ty.toIntern(),
@@ -17511,7 +17507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, param_vals.len)).toIntern(),
} });
};
@@ -17564,7 +17560,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// signedness: Signedness,
(try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
// bits: u16,
- (try pt.intValue(Type.u16, info.bits)).toIntern(),
+ (try pt.intValue(.u16, info.bits)).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -17580,7 +17576,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_vals = .{
// bits: u16,
- (try pt.intValue(Type.u16, ty.bitSize(zcu))).toIntern(),
+ (try pt.intValue(.u16, ty.bitSize(zcu))).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -17594,7 +17590,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.pointer => {
const info = ty.ptrInfo(zcu);
const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
- try pt.intValue(Type.comptime_int, alignment)
+ try pt.intValue(.comptime_int, alignment)
else
try Type.fromInterned(info.child).lazyAbiAlignment(pt);
@@ -17638,7 +17634,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
- (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
// sentinel: ?*const anyopaque,
@@ -17659,7 +17655,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
- (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
};
@@ -17723,7 +17719,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, error_name_len)).toIntern(),
} });
};
@@ -17770,7 +17766,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, vals.len)).toIntern(),
} });
} else .none;
const errors_val = try pt.intern(.{ .opt = .{
@@ -17819,7 +17815,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.comptime_int_type,
)
else
- (try pt.intValue(Type.comptime_int, tag_index)).toIntern();
+ (try pt.intValue(.comptime_int, tag_index)).toIntern();
// TODO: write something like getCoercedInts to avoid needing to dupe
const name_val = v: {
@@ -17844,7 +17840,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, tag_name_len)).toIntern(),
} });
};
@@ -17887,7 +17883,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, enum_field_vals.len)).toIntern(),
} });
};
@@ -17949,7 +17945,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -17965,7 +17961,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try pt.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
@@ -18000,7 +17996,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, union_field_vals.len)).toIntern(),
} });
};
@@ -18070,7 +18066,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -18089,7 +18085,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(),
};
struct_field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18111,7 +18107,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
const field_name_len = field_name.length(ip);
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
const field_init = struct_type.fieldInit(ip, field_index);
const field_is_comptime = struct_type.fieldIsComptime(ip, field_index);
const name_val = v: {
@@ -18134,7 +18130,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -18159,7 +18155,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18195,7 +18191,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, struct_field_vals.len)).toIntern(),
} });
};
@@ -18304,7 +18300,7 @@ fn typeInfoDecls(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(),
+ .len = (try pt.intValue(.usize, decl_vals.items.len)).toIntern(),
} });
}
@@ -18354,7 +18350,7 @@ fn typeInfoNamespaceDecls(
.byte_offset = 0,
},
}),
- .len = (try pt.intValue(Type.usize, name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, name_len)).toIntern(),
},
});
};
@@ -18373,7 +18369,7 @@ fn typeInfoNamespaceDecls(
continue;
}
try sema.ensureNavResolved(block, src, nav, .fully);
- const namespace_ty = Type.fromInterned(ip.getNav(nav).status.fully_resolved.val);
+ const namespace_ty: Type = .fromInterned(ip.getNav(nav).status.fully_resolved.val);
try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu).toOptional(), declaration_ty, decl_vals, seen_namespaces);
}
}
@@ -18424,7 +18420,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
const pt = sema.pt;
const zcu = pt.zcu;
switch (operand.zigTypeTag(zcu)) {
- .comptime_int => return Type.comptime_int,
+ .comptime_int => return .comptime_int,
.int => {
const bits = operand.bitSize(zcu);
const count = if (bits == 0)
@@ -18512,14 +18508,12 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
const uncasted_operand = try sema.resolveInst(inst_data.operand);
- const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, .bool, uncasted_operand, operand_src);
if (try sema.resolveValue(operand)) |val| {
- return if (val.isUndef(zcu))
- pt.undefRef(Type.bool)
- else if (val.toBool()) .bool_false else .bool_true;
+ return if (val.isUndef(zcu)) .undef_bool else if (val.toBool()) .bool_false else .bool_true;
}
try sema.requireRuntimeBlock(block, src, null);
- return block.addTyOp(.not, Type.bool, operand);
+ return block.addTyOp(.not, .bool, operand);
}
fn zirBoolBr(
@@ -18544,7 +18538,7 @@ fn zirBoolBr(
const lhs_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
const rhs_src = parent_block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
- const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src);
+ const lhs = try sema.coerce(parent_block, .bool, uncoerced_lhs, lhs_src);
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
if (is_bool_or and lhs_val.toBool()) {
@@ -18559,7 +18553,7 @@ fn zirBoolBr(
if (sema.typeOf(rhs_result).isNoReturn(zcu)) {
return rhs_result;
}
- return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src);
+ return sema.coerce(parent_block, .bool, rhs_result, rhs_src);
}
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
@@ -18596,7 +18590,7 @@ fn zirBoolBr(
const rhs_result = try sema.resolveInlineBody(rhs_block, body, inst);
const rhs_noret = sema.typeOf(rhs_result).isNoReturn(zcu);
const coerced_rhs_result = if (!rhs_noret) rhs: {
- const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src);
+ const coerced_result = try sema.coerce(rhs_block, .bool, rhs_result, rhs_src);
_ = try rhs_block.addBr(block_inst, coerced_result);
break :rhs coerced_result;
} else rhs_result;
@@ -18797,7 +18791,7 @@ fn zirCondbr(
const else_body = sema.code.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
const uncasted_cond = try sema.resolveInst(extra.data.condition);
- const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
+ const cond = try sema.coerce(parent_block, .bool, uncasted_cond, cond_src);
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
const body = if (cond_val.toBool()) then_body else else_body;
@@ -19502,7 +19496,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
+ const coerced = try sema.coerce(block, .u32, try sema.resolveInst(ref), align_src);
const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{ .simple = .@"align" });
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
@@ -19526,14 +19520,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, .{ .simple = .type });
+ const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, .u16, .{ .simple = .type });
break :blk @intCast(bit_offset);
} else 0;
const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, .{ .simple = .type });
+ const host_size = try sema.resolveInt(block, hostsize_src, ref, .u16, .{ .simple = .type });
break :blk @intCast(host_size);
} else 0;
@@ -19767,7 +19761,7 @@ fn unionInit(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
- const field_ty = Type.fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
const init = try sema.coerce(block, field_ty, uncasted_init, init_src);
_ = union_ty_src;
return unionInitFromEnumTag(sema, block, init_src, union_ty, field_index, init);
@@ -19902,7 +19896,7 @@ fn zirStructInit(
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(zcu);
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
- const field_ty = Type.fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
if (field_ty.zigTypeTag(zcu) == .noreturn) {
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -19990,7 +19984,7 @@ fn finishStructInit(
.init_node_offset = init_src.offset.node_offset.x,
.elem_index = @intCast(i),
} });
- const field_ty = Type.fromInterned(tuple.types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(tuple.types.get(ip)[i]);
field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src);
continue;
}
@@ -20018,7 +20012,7 @@ fn finishStructInit(
.init_node_offset = init_src.offset.node_offset.x,
.elem_index = @intCast(i),
} });
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src);
continue;
}
@@ -20183,7 +20177,7 @@ fn structInitAnon(
const msg = try sema.errMsg(field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*));
+ try sema.addDeclaredHereNote(msg, .fromInterned(field_ty.*));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20317,7 +20311,7 @@ fn structInitAnon(
element_refs[i] = try sema.resolveInst(item.data.init);
}
- return block.addAggregateInit(Type.fromInterned(struct_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(struct_ty), element_refs);
}
fn zirArrayInit(
@@ -20441,7 +20435,7 @@ fn zirArrayInit(
});
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
- const index = try pt.intRef(Type.usize, i);
+ const index = try pt.intRef(.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
@@ -20455,7 +20449,7 @@ fn zirArrayInit(
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
for (resolved_args, 0..) |arg, i| {
- const index = try pt.intRef(Type.usize, i);
+ const index = try pt.intRef(.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
@@ -20504,7 +20498,7 @@ fn arrayInitAnon(
const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
- try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i]));
+ try sema.addDeclaredHereNote(msg, .fromInterned(types[i]));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20561,7 +20555,7 @@ fn arrayInitAnon(
element_refs[i] = try sema.resolveInst(operand);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref {
@@ -20632,7 +20626,7 @@ fn fieldType(
.optional => {
// Struct/array init through optional requires the child type to not be a pointer.
// If the child of .optional is a pointer it'll error on the next loop.
- cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
+ cur_ty = .fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
continue;
},
.error_union => {
@@ -20710,21 +20704,18 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .u1_type, .len = len }) else .u1;
if (try sema.resolveValue(operand)) |val| {
if (!is_vector) {
- if (val.isUndef(zcu)) return pt.undefRef(Type.u1);
- if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern());
- return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
+ return if (val.isUndef(zcu)) .undef_u1 else if (val.toBool()) .one_u1 else .zero_u1;
}
if (val.isUndef(zcu)) return pt.undefRef(dest_ty);
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
const old_elem = try val.elemValue(pt, i);
- const new_val = if (old_elem.isUndef(zcu))
- try pt.undefValue(Type.u1)
+ new_elem.* = if (old_elem.isUndef(zcu))
+ .undef_u1
else if (old_elem.toBool())
- try pt.intValue(Type.u1, 1)
+ .one_u1
else
- try pt.intValue(Type.u1, 0);
- new_elem.* = new_val.toIntern();
+ .zero_u1;
}
return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
@@ -20736,7 +20727,7 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addBitCast(.u1, old_elem);
}
@@ -20747,7 +20738,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const uncoerced_operand = try sema.resolveInst(inst_data.operand);
- const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src);
+ const operand = try sema.coerce(block, .anyerror, uncoerced_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name;
@@ -20993,12 +20984,12 @@ fn zirReify(
.float => {
const float = try sema.interpretBuiltinType(block, operand_src, .fromInterned(union_val.val), std.builtin.Type.Float);
- const ty = switch (float.bits) {
- 16 => Type.f16,
- 32 => Type.f32,
- 64 => Type.f64,
- 80 => Type.f80,
- 128 => Type.f128,
+ const ty: Type = switch (float.bits) {
+ 16 => .f16,
+ 32 => .f32,
+ 64 => .f64,
+ 80 => .f80,
+ 128 => .f128,
else => return sema.fail(block, src, "{}-bit float unsupported", .{float.bits}),
};
return Air.internedToRef(ty.toIntern());
@@ -21038,7 +21029,7 @@ fn zirReify(
try ip.getOrPutString(gpa, pt.tid, "sentinel_ptr", .no_embedded_nulls),
).?);
- if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
+ if (!try sema.intFitsInType(alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
@@ -21174,7 +21165,7 @@ fn zirReify(
},
.error_set => {
const payload_val = Value.fromInterned(union_val.val).optionalValue(zcu) orelse
- return Air.internedToRef(Type.anyerror.toIntern());
+ return .anyerror_type;
const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{ .simple = .error_set_contents });
@@ -21776,7 +21767,7 @@ fn reifyUnion(
errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error
for (field_types) |field_ty_ip| {
- const field_ty = Type.fromInterned(field_ty_ip);
+ const field_ty: Type = .fromInterned(field_ty_ip);
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
@@ -22060,7 +22051,7 @@ fn reifyStruct(
}
if (any_aligned_fields) {
- if (!try sema.intFitsInType(field_alignment_val, Type.u32, null)) {
+ if (!try sema.intFitsInType(field_alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
@@ -22149,7 +22140,7 @@ fn reifyStruct(
if (layout == .@"packed") {
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |field_idx| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_idx]);
field_ty.resolveLayout(pt) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -22325,7 +22316,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (block.wantSafety()) {
const len = dest_ty.vectorLen(zcu);
for (0..len) |i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
@@ -22358,7 +22349,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = dest_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
if (block.wantSafety()) {
@@ -22408,7 +22399,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = operand_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem);
}
@@ -22431,10 +22422,10 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src);
const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
- const operand_ty = if (is_vector) operand_ty: {
+ const operand_ty: Type = if (is_vector) operand_ty: {
const len = dest_ty.vectorLen(zcu);
break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len });
- } else Type.usize;
+ } else .usize;
const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src);
@@ -22495,7 +22486,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(
- Type.usize,
+ .usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
@@ -22516,7 +22507,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len = dest_ty.vectorLen(zcu);
if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
for (0..len) |i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
if (!ptr_ty.isAllowzeroPtr(zcu)) {
const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
@@ -22525,7 +22516,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_mask = Air.internedToRef((try pt.intValue(
- Type.usize,
+ .usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
@@ -22540,7 +22531,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
new_elem.* = try block.addBitCast(ptr_ty, old_elem);
}
@@ -22918,12 +22909,12 @@ fn ptrCastFull(
}
check_child: {
- const src_child = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: {
+ const src_child: Type = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: {
// *[n]T -> []T
break :blk Type.fromInterned(src_info.child).childType(zcu);
- } else Type.fromInterned(src_info.child);
+ } else .fromInterned(src_info.child);
- const dest_child = Type.fromInterned(dest_info.child);
+ const dest_child: Type = .fromInterned(dest_info.child);
const imc_res = try sema.coerceInMemoryAllowed(
block,
@@ -22956,7 +22947,7 @@ fn ptrCastFull(
}
if (is_array_ptr_to_slice) {
// [*]nT -> []T
- const arr_ty = Type.fromInterned(src_info.child);
+ const arr_ty: Type = .fromInterned(src_info.child);
if (arr_ty.sentinel(zcu)) |src_sentinel| {
const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
@@ -23158,7 +23149,7 @@ fn ptrCastFull(
if (dest_info.flags.size == .slice) {
// Because the operand is comptime-known and not `null`, the slice length has already been computed:
const len: Value = switch (dest_slice_len.?) {
- .undef => try pt.undefValue(.usize),
+ .undef => .undef_usize,
.constant => |n| try pt.intValue(.usize, n),
.equal_runtime_src_slice => unreachable,
.change_runtime_src_slice => unreachable,
@@ -23267,7 +23258,7 @@ fn ptrCastFull(
if (need_align_check) {
assert(operand_ptr_int != .none);
const align_mask = try pt.intRef(.usize, mask: {
- const target_ptr_mask: u64 = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0);
+ const target_ptr_mask = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0);
break :mask (dest_align.toByteUnits().? - 1) & target_ptr_mask;
});
const ptr_masked = try block.addBinOp(.bit_and, operand_ptr_int, align_mask);
@@ -23288,7 +23279,7 @@ fn ptrCastFull(
assert(need_operand_ptr);
const result_len: Air.Inst.Ref = switch (dest_slice_len.?) {
- .undef => try pt.undefRef(.usize),
+ .undef => .undef_usize,
.constant => |n| try pt.intRef(.usize, n),
.equal_runtime_src_slice => len: {
assert(need_operand_len);
@@ -23658,13 +23649,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
- return sema.pt.intRef(Type.comptime_int, offset);
+ return sema.pt.intRef(.comptime_int, offset);
}
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
// TODO reminder to make this a compile error for packed structs
- return sema.pt.intRef(Type.comptime_int, offset / 8);
+ return sema.pt.intRef(.comptime_int, offset / 8);
}
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
@@ -23705,7 +23696,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
if (i == field_index) {
return bit_sum;
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(zcu);
} else unreachable;
},
@@ -24620,10 +24611,10 @@ fn analyzeShuffle(
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
for (@intCast(0)..@intCast(min_len)) |i| {
- expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern();
+ expand_mask_values[i] = (try pt.intValue(.comptime_int, i)).toIntern();
}
for (@intCast(min_len)..@intCast(max_len)) |i| {
- expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern();
+ expand_mask_values[i] = .negative_one;
}
const expand_mask = try pt.intern(.{ .aggregate = .{
.ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(),
@@ -25087,7 +25078,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
if (parent_ptr_info.flags.size != .one) {
return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)});
}
- const parent_ty = Type.fromInterned(parent_ptr_info.child);
+ const parent_ty: Type = .fromInterned(parent_ptr_info.child);
switch (parent_ty.zigTypeTag(zcu)) {
.@"struct", .@"union" => {},
else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}),
@@ -25741,7 +25732,7 @@ fn zirMemcpy(
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
len_val = dest_len_val;
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
- if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
+ if (!(try sema.valuesEqual(dest_len_val, src_len_val, .usize))) {
const msg = msg: {
const msg = try sema.errMsg(src, "non-matching copy lengths", .{});
errdefer msg.destroy(sema.gpa);
@@ -25952,7 +25943,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_elem_ty: Type = dest_elem_ty: {
const ptr_info = dest_ptr_ty.ptrInfo(zcu);
switch (ptr_info.flags.size) {
- .slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
+ .slice => break :dest_elem_ty .fromInterned(ptr_info.child),
.one => {
if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) {
break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(zcu);
@@ -26118,7 +26109,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
extra_index += body.len;
if (extra.data.bits.ret_ty_is_generic) break :blk .generic_poison;
- const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{ .simple = .function_ret_ty });
+ const val = try sema.resolveGenericBody(block, ret_src, body, inst, .type, .{ .simple = .function_ret_ty });
const ty = val.toType();
break :blk ty;
} else if (extra.data.bits.has_ret_ty_ref) blk: {
@@ -26129,7 +26120,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const ret_ty_air_ref = try sema.resolveInst(ret_ty_ref);
const ret_ty_val = try sema.resolveConstDefinedValue(block, ret_src, ret_ty_air_ref, .{ .simple = .function_ret_ty });
break :blk ret_ty_val.toType();
- } else Type.void;
+ } else .void;
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
const x = sema.code.extra[extra_index];
@@ -26223,7 +26214,7 @@ fn zirWasmMemorySize(
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
- const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, .{ .simple = .wasm_memory_index }));
+ const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, .u32, .{ .simple = .wasm_memory_index }));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
.tag = .wasm_memory_size,
@@ -26248,8 +26239,8 @@ fn zirWasmMemoryGrow(
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
- const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{ .simple = .wasm_memory_index }));
- const delta = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.rhs), delta_src);
+ const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, .u32, .{ .simple = .wasm_memory_index }));
+ const delta = try sema.coerce(block, .usize, try sema.resolveInst(extra.rhs), delta_src);
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
@@ -26484,7 +26475,7 @@ fn zirWorkItem(
},
}
- const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, .{ .simple = .work_group_dim_index }));
+ const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, .u32, .{ .simple = .work_group_dim_index }));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
@@ -26552,7 +26543,7 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const inline_tag_val = try pt.enumValue(
callconv_tag_ty,
(try pt.intValue(
- Type.u8,
+ .u8,
@intFromEnum(std.builtin.CallingConvention.@"inline"),
)).toIntern(),
);
@@ -26760,7 +26751,7 @@ fn explainWhyTypeIsComptimeInner(
if (zcu.typeToStruct(ty)) |struct_type| {
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
const field_src: LazySrcLoc = .{
.base_node_inst = struct_type.zir_index,
.offset = .{ .container_field_type = @intCast(i) },
@@ -26780,7 +26771,7 @@ fn explainWhyTypeIsComptimeInner(
if (zcu.typeToUnion(ty)) |union_obj| {
for (0..union_obj.field_types.len) |i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[i]);
const field_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_type = @intCast(i) },
@@ -27171,7 +27162,7 @@ fn addSafetyCheckUnwrapError(
defer fail_block.instructions.deinit(gpa);
- const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
+ const err = try fail_block.addTyOp(unwrap_err_tag, .anyerror, operand);
try safetyPanicUnwrapError(sema, &fail_block, src, err);
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
@@ -27344,7 +27335,7 @@ fn fieldVal(
switch (inner_ty.zigTypeTag(zcu)) {
.array => {
if (field_name.eqlSlice("len", ip)) {
- return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(zcu))).toIntern());
+ return Air.internedToRef((try pt.intValue(.usize, inner_ty.arrayLen(zcu))).toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(zcu);
const result_ty = try pt.ptrTypeSema(.{
@@ -27527,7 +27518,7 @@ fn fieldPtr(
switch (inner_ty.zigTypeTag(zcu)) {
.array => {
if (field_name.eqlSlice("len", ip)) {
- const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(zcu));
+ const int_val = try pt.intValue(.usize, inner_ty.arrayLen(zcu));
return uavRef(sema, int_val.toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(zcu);
@@ -27769,12 +27760,12 @@ fn fieldCallBind(
if (zcu.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
break :find_field;
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
} else if (concrete_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
- return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(zcu)) };
+ return .{ .direct = try pt.intRef(.usize, concrete_ty.structFieldCount(zcu)) };
}
if (field_name.toUnsigned(ip)) |field_index| {
if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field;
@@ -27817,7 +27808,7 @@ fn fieldCallBind(
if (zcu.typeToFunc(decl_type)) |func_type| f: {
if (func_type.param_types.len == 0) break :f;
- const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]);
+ const first_param_type: Type = .fromInterned(func_type.param_types.get(ip)[0]);
if (first_param_type.isGenericPoison() or
(first_param_type.zigTypeTag(zcu) == .pointer and
(first_param_type.ptrSize(zcu) == .one or
@@ -28003,7 +27994,7 @@ fn structFieldPtr(
if (struct_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
- const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(zcu));
+ const len_inst = try pt.intRef(.usize, struct_ty.structFieldCount(zcu));
return sema.analyzeRef(block, src, len_inst);
}
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
@@ -28134,7 +28125,7 @@ fn structFieldVal(
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
if (try sema.typeHasOnePossibleValue(field_ty)) |field_val|
return Air.internedToRef(field_val.toIntern());
@@ -28167,7 +28158,7 @@ fn tupleFieldVal(
const pt = sema.pt;
const zcu = pt.zcu;
if (field_name.eqlSlice("len", &zcu.intern_pool)) {
- return pt.intRef(Type.usize, tuple_ty.structFieldCount(zcu));
+ return pt.intRef(.usize, tuple_ty.structFieldCount(zcu));
}
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
return sema.tupleFieldValByIndex(block, tuple_byval, field_index, tuple_ty);
@@ -28220,7 +28211,7 @@ fn tupleFieldValByIndex(
return switch (zcu.intern_pool.indexToKey(tuple_val.toIntern())) {
.undef => pt.undefRef(field_ty),
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
- .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &zcu.intern_pool)),
+ .bytes => |bytes| try pt.intValue(.u8, bytes.at(field_index, &zcu.intern_pool)),
.elems => |elems| Value.fromInterned(elems[field_index]),
.repeated_elem => |elem| Value.fromInterned(elem),
}.toIntern()),
@@ -28253,7 +28244,7 @@ fn unionFieldPtr(
try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -28295,8 +28286,8 @@ fn unionFieldPtr(
break :ct;
}
// Store to the union to initialize the tag.
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
- const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const payload_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty));
try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty);
} else {
@@ -28306,7 +28297,7 @@ fn unionFieldPtr(
return sema.failWithUseOfUndef(block, src);
}
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
if (!tag_matches) {
const msg = msg: {
@@ -28332,11 +28323,11 @@ fn unionFieldPtr(
if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
- const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
+ const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_val);
try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(zcu) == .noreturn) {
@@ -28363,14 +28354,14 @@ fn unionFieldVal(
try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
if (try sema.resolveValue(union_byval)) |union_val| {
if (union_val.isUndef(zcu)) return pt.undefRef(field_ty);
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.flagsUnordered(ip).layout) {
.auto => {
@@ -28408,9 +28399,9 @@ fn unionFieldVal(
if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
- const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
+ const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(zcu) == .noreturn) {
@@ -28540,7 +28531,7 @@ fn elemVal(
// TODO in case of a vector of pointers, we need to detect whether the element
// index is a scalar or vector instead of unconditionally casting to usize.
- const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, elem_index_uncasted, elem_index_src);
switch (indexable_ty.zigTypeTag(zcu)) {
.pointer => switch (indexable_ty.ptrSize(zcu)) {
@@ -28795,7 +28786,7 @@ fn elemValArray(
if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check.
if (maybe_index_val == null) {
- const len_inst = try pt.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28860,7 +28851,7 @@ fn elemPtrArray(
// Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) {
- const len_inst = try pt.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28917,9 +28908,9 @@ fn elemValSlice(
if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
- try pt.intRef(Type.usize, try slice_val.sliceLen(pt))
+ try pt.intRef(.usize, try slice_val.sliceLen(pt))
else
- try block.addTyOp(.slice_len, Type.usize, slice);
+ try block.addTyOp(.slice_len, .usize, slice);
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28976,8 +28967,8 @@ fn elemPtrSlice(
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef(zcu))
- break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
- break :len try block.addTyOp(.slice_len, Type.usize, slice);
+ break :len try pt.intRef(.usize, try slice_val.sliceLen(pt));
+ break :len try block.addTyOp(.slice_len, .usize, slice);
};
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
@@ -29142,7 +29133,7 @@ fn coerceExtra(
if (!inst_ty.isSinglePointer(zcu)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(zcu);
- const array_ty = Type.fromInterned(dest_info.child);
+ const array_ty: Type = .fromInterned(dest_info.child);
if (array_ty.zigTypeTag(zcu) != .array) break :single_item;
const array_elem_ty = array_ty.childType(zcu);
if (array_ty.arrayLen(zcu) != 1) break :single_item;
@@ -29164,7 +29155,7 @@ fn coerceExtra(
const array_elem_type = array_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = Type.fromInterned(dest_info.child);
+ const dst_elem_type: Type = .fromInterned(dest_info.child);
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val);
switch (elem_res) {
.ok => {},
@@ -29225,7 +29216,7 @@ fn coerceExtra(
// could be null.
const src_elem_ty = inst_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = Type.fromInterned(dest_info.child);
+ const dst_elem_type: Type = .fromInterned(dest_info.child);
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val)) {
.ok => {},
else => break :src_c_ptr,
@@ -29265,16 +29256,16 @@ fn coerceExtra(
.byte_offset = 0,
} })),
.comptime_int => {
- const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
+ const addr = sema.coerceExtra(block, .usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.int => {
- const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) {
- .signed => Type.isize,
- .unsigned => Type.usize,
+ const ptr_size_ty: Type = switch (inst_ty.intInfo(zcu).signedness) {
+ .signed => .isize,
+ .unsigned => .usize,
};
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
@@ -29291,8 +29282,8 @@ fn coerceExtra(
const inst_info = inst_ty.ptrInfo(zcu);
switch (try sema.coerceInMemoryAllowed(
block,
- Type.fromInterned(dest_info.child),
- Type.fromInterned(inst_info.child),
+ .fromInterned(dest_info.child),
+ .fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -29305,7 +29296,7 @@ fn coerceExtra(
if (inst_info.flags.size == .slice) {
assert(dest_info.sentinel == .none);
if (inst_info.sentinel == .none or
- inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
+ inst_info.sentinel != (try pt.intValue(.fromInterned(inst_info.child), 0)).toIntern())
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -29364,8 +29355,8 @@ fn coerceExtra(
switch (try sema.coerceInMemoryAllowed(
block,
- Type.fromInterned(dest_info.child),
- Type.fromInterned(inst_info.child),
+ .fromInterned(dest_info.child),
+ .fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -29378,7 +29369,7 @@ fn coerceExtra(
if (dest_info.sentinel == .none or inst_info.sentinel == .none or
Air.internedToRef(dest_info.sentinel) !=
- try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child)))
+ try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), .fromInterned(dest_info.child)))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -30658,8 +30649,8 @@ fn coerceInMemoryAllowedPtrs(
} };
}
- const dest_child = Type.fromInterned(dest_info.child);
- const src_child = Type.fromInterned(src_info.child);
+ const dest_child: Type = .fromInterned(dest_info.child);
+ const src_child: Type = .fromInterned(src_info.child);
const child = try sema.coerceInMemoryAllowed(
block,
dest_child,
@@ -30731,7 +30722,7 @@ fn coerceInMemoryAllowedPtrs(
.none => Value.@"unreachable",
else => Value.fromInterned(dest_info.sentinel),
},
- .ty = Type.fromInterned(dest_info.child),
+ .ty = .fromInterned(dest_info.child),
} };
}
@@ -30794,8 +30785,8 @@ fn coerceVarArgParam(
const inst_bits = uncasted_ty.floatBits(target);
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
- 32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
- 64 => break :float try sema.coerce(block, Type.f64, inst, inst_src),
+ 32 => break :float try sema.coerce(block, .f32, inst, inst_src),
+ 64 => break :float try sema.coerce(block, .f64, inst, inst_src),
else => unreachable,
}
},
@@ -30807,22 +30798,22 @@ fn coerceVarArgParam(
.signed => .int,
.unsigned => .uint,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_int,
- .unsigned => Type.c_uint,
+ .signed => .c_int,
+ .unsigned => .c_uint,
}, inst, inst_src);
if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) {
.signed => .long,
.unsigned => .ulong,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_long,
- .unsigned => Type.c_ulong,
+ .signed => .c_long,
+ .unsigned => .c_ulong,
}, inst, inst_src);
if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) {
.signed => .longlong,
.unsigned => .ulonglong,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_longlong,
- .unsigned => Type.c_ulonglong,
+ .signed => .c_longlong,
+ .unsigned => .c_ulonglong,
}, inst, inst_src);
break :int inst;
} else inst,
@@ -30889,7 +30880,7 @@ fn storePtr2(
while (i < field_count) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
- const elem_index = try pt.intRef(Type.usize, i);
+ const elem_index = try pt.intRef(.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
@@ -31216,7 +31207,7 @@ fn coerceArrayPtrToSlice(
const slice_val = try pt.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = slice_ptr.toIntern(),
- .len = (try pt.intValue(Type.usize, array_ty.arrayLen(zcu))).toIntern(),
+ .len = (try pt.intValue(.usize, array_ty.arrayLen(zcu))).toIntern(),
} });
return Air.internedToRef(slice_val);
}
@@ -31358,7 +31349,7 @@ fn coerceEnumToUnion(
};
const union_obj = zcu.typeToUnion(union_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFields(pt);
if (field_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
@@ -31448,7 +31439,7 @@ fn coerceEnumToUnion(
for (0..union_obj.field_types.len) |field_index| {
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
field_name.fmt(ip),
@@ -31536,7 +31527,7 @@ fn coerceArrayLike(
var runtime_src: ?LazySrcLoc = null;
for (element_vals, element_refs, 0..) |*val, *ref, i| {
- const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern());
+ const index_ref = Air.internedToRef((try pt.intValue(.usize, i)).toIntern());
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
@@ -31668,7 +31659,7 @@ fn coerceTupleToArrayPtrs(
const zcu = pt.zcu;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const ptr_info = ptr_array_ty.ptrInfo(zcu);
- const array_ty = Type.fromInterned(ptr_info.child);
+ const array_ty: Type = .fromInterned(ptr_info.child);
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.flags.alignment != .none) {
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
@@ -31721,14 +31712,14 @@ fn coerceTupleToTuple(
const field_index: u32 = @intCast(field_index_usize);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
- const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src);
+ const coerced = try sema.coerce(block, .fromInterned(field_ty), elem_ref, field_src);
field_refs[field_index] = coerced;
if (default_val != .none) {
const init_val = (try sema.resolveValue(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, .{ .simple = .stored_to_comptime_field });
};
- if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) {
+ if (!init_val.eql(Value.fromInterned(default_val), .fromInterned(field_ty), pt.zcu)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -31885,7 +31876,7 @@ pub fn ensureNavResolved(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index:
fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
const pt = sema.pt;
- const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque);
+ const ptr_anyopaque_ty = try pt.singleConstPtrType(.anyopaque);
return Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
.val = if (opt_val) |val| (try pt.getCoerced(
@@ -32140,12 +32131,12 @@ fn analyzeSliceLen(
const zcu = pt.zcu;
if (try sema.resolveValue(slice_inst)) |slice_val| {
if (slice_val.isUndef(zcu)) {
- return pt.undefRef(Type.usize);
+ return .undef_usize;
}
- return pt.intRef(Type.usize, try slice_val.sliceLen(pt));
+ return pt.intRef(.usize, try slice_val.sliceLen(pt));
}
try sema.requireRuntimeBlock(block, src, null);
- return block.addTyOp(.slice_len, Type.usize, slice_inst);
+ return block.addTyOp(.slice_len, .usize, slice_inst);
}
fn analyzeIsNull(
@@ -32156,7 +32147,7 @@ fn analyzeIsNull(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const result_ty = Type.bool;
+ const result_ty: Type = .bool;
if (try sema.resolveValue(operand)) |opt_val| {
if (opt_val.isUndef(zcu)) {
return pt.undefRef(result_ty);
@@ -32224,7 +32215,7 @@ fn analyzeIsNonErrComptimeOnly(
else => {},
}
} else if (operand == .undef) {
- return pt.undefRef(Type.bool);
+ return .undef_bool;
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return .bool_true;
@@ -32308,14 +32299,7 @@ fn analyzeIsNonErrComptimeOnly(
}
if (maybe_operand_val) |err_union| {
- if (err_union.isUndef(zcu)) {
- return pt.undefRef(Type.bool);
- }
- if (err_union.getErrorName(zcu) == .none) {
- return .bool_true;
- } else {
- return .bool_false;
- }
+ return if (err_union.isUndef(zcu)) .undef_bool else if (err_union.getErrorName(zcu) == .none) .bool_true else .bool_false;
}
return .none;
}
@@ -32412,8 +32396,8 @@ fn analyzeSlice(
);
const bounds_error_message = "slice of single-item pointer must have bounds [0..0], [0..1], or [1..1]";
- if (try sema.compareScalar(start_value, .neq, end_value, Type.comptime_int)) {
- if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, Type.comptime_int)) {
+ if (try sema.compareScalar(start_value, .neq, end_value, .comptime_int)) {
+ if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, .comptime_int)) {
const msg = msg: {
const msg = try sema.errMsg(start_src, bounds_error_message, .{});
errdefer msg.destroy(sema.gpa);
@@ -32429,7 +32413,7 @@ fn analyzeSlice(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
- } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, Type.comptime_int)) {
+ } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, .comptime_int)) {
const msg = msg: {
const msg = try sema.errMsg(end_src, bounds_error_message, .{});
errdefer msg.destroy(sema.gpa);
@@ -32447,7 +32431,7 @@ fn analyzeSlice(
return sema.failWithOwnedErrorMsg(block, msg);
}
} else {
- if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, Type.comptime_int)) {
+ if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, .comptime_int)) {
return sema.fail(
block,
end_src,
@@ -32512,7 +32496,7 @@ fn analyzeSlice(
break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src);
} else ptr_or_slice;
- const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
+ const start = try sema.coerce(block, .usize, uncasted_start, start_src);
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
const new_ptr_ty = sema.typeOf(new_ptr);
@@ -32523,20 +32507,20 @@ fn analyzeSlice(
var end_is_len = uncasted_end_opt == .none;
const end = e: {
if (array_ty.zigTypeTag(zcu) == .array) {
- const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(zcu));
+ const len_val = try pt.intValue(.usize, array_ty.arrayLen(zcu));
if (!end_is_len) {
const end = if (by_length) end: {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :end try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else try sema.coerce(block, .usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
const len_s_val = try pt.intValue(
- Type.usize,
+ .usize,
array_ty.arrayLenIncludingSentinel(zcu),
);
- if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
+ if (!(try sema.compareAll(end_val, .lte, len_s_val, .usize))) {
const sentinel_label: []const u8 = if (array_ty.sentinel(zcu) != null)
" +1 (sentinel)"
else
@@ -32557,7 +32541,7 @@ fn analyzeSlice(
// end_is_len is only true if we are NOT using the sentinel
// length. For sentinel-length, we don't want the type to
// contain the sentinel.
- if (end_val.eql(len_val, Type.usize, zcu)) {
+ if (end_val.eql(len_val, .usize, zcu)) {
end_is_len = true;
}
}
@@ -32568,10 +32552,10 @@ fn analyzeSlice(
} else if (slice_ty.isSlice(zcu)) {
if (!end_is_len) {
const end = if (by_length) end: {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :end try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else try sema.coerce(block, .usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveValue(ptr_or_slice)) |slice_val| {
if (slice_val.isUndef(zcu)) {
@@ -32580,8 +32564,8 @@ fn analyzeSlice(
const has_sentinel = slice_ty.sentinel(zcu) != null;
const slice_len = try slice_val.sliceLen(pt);
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
- const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent);
- if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
+ const slice_len_val_with_sentinel = try pt.intValue(.usize, len_plus_sent);
+ if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, .usize))) {
const sentinel_label: []const u8 = if (has_sentinel)
" +1 (sentinel)"
else
@@ -32602,8 +32586,8 @@ fn analyzeSlice(
// If the slice has a sentinel, we consider end_is_len
// is only true if it equals the length WITHOUT the
// sentinel, so we don't add a sentinel type.
- const slice_len_val = try pt.intValue(Type.usize, slice_len);
- if (end_val.eql(slice_len_val, Type.usize, zcu)) {
+ const slice_len_val = try pt.intValue(.usize, slice_len);
+ if (end_val.eql(slice_len_val, .usize, zcu)) {
end_is_len = true;
}
}
@@ -32614,10 +32598,10 @@ fn analyzeSlice(
}
if (!end_is_len) {
if (by_length) {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :e try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :e try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else break :e try sema.coerce(block, .usize, uncasted_end_opt, end_src);
}
return sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
};
@@ -32645,7 +32629,7 @@ fn analyzeSlice(
// requirement: start <= end
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| {
- if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) {
+ if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, .usize))) {
return sema.fail(
block,
start_src,
@@ -32715,7 +32699,7 @@ fn analyzeSlice(
try sema.addSafetyCheckCall(block, src, ok, .@"panic.startGreaterThanEnd", &.{ start, end });
}
const new_len = if (by_length)
- try sema.coerce(block, Type.usize, uncasted_end_opt, end_src)
+ try sema.coerce(block, .usize, uncasted_end_opt, end_src)
else
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
@@ -32753,9 +32737,9 @@ fn analyzeSlice(
bounds_check: {
const actual_len = if (array_ty.zigTypeTag(zcu) == .array)
- try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+ try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) l: {
- const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
+ const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice);
break :l if (slice_ty.sentinel(zcu) == null)
slice_len_inst
else
@@ -32811,15 +32795,15 @@ fn analyzeSlice(
// requirement: end <= len
const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .array)
- try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+ try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
// underlying value data includes the sentinel
- break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
+ break :blk try pt.intRef(.usize, try slice_val.sliceLen(pt));
}
- const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
+ const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice);
if (slice_ty.sentinel(zcu) == null) break :blk slice_len_inst;
// we have to add one because slice lengths don't include the sentinel
@@ -32935,8 +32919,8 @@ fn cmpNumeric(
}
// Any other comparison depends on both values, so the result is undef if either is undef.
- if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
- if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
+ if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
const runtime_src: LazySrcLoc = if (maybe_lhs_val) |lhs_val| rs: {
if (maybe_rhs_val) |rhs_val| {
@@ -33646,7 +33630,7 @@ fn resolvePeerTypes(
candidate_srcs: PeerTypeCandidateSrc,
) !Type {
switch (instructions.len) {
- 0 => return Type.noreturn,
+ 0 => return .noreturn,
1 => return sema.typeOf(instructions[0]),
else => {},
}
@@ -33780,12 +33764,12 @@ fn resolvePeerTypesInner(
.nullable => {
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
- if (!ty.eql(Type.null, zcu)) return .{ .conflict = .{
+ if (!ty.eql(.null, zcu)) return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
} };
}
- return .{ .success = Type.null };
+ return .{ .success = .null };
},
.optional => {
@@ -34006,7 +33990,7 @@ fn resolvePeerTypesInner(
};
// Try peer -> cur, then cur -> peer
- ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse {
+ ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) orelse {
return .{ .conflict = .{
.peer_idx_a = first_idx,
.peer_idx_b = i,
@@ -34153,8 +34137,8 @@ fn resolvePeerTypesInner(
};
// We abstract array handling slightly so that tuple pointers can work like array pointers
- const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child));
- const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child));
+ const peer_pointee_array = sema.typeIsArrayLike(.fromInterned(peer_info.child));
+ const cur_pointee_array = sema.typeIsArrayLike(.fromInterned(ptr_info.child));
// This switch is just responsible for deciding the size and pointee (not including
// single-pointer array sentinel).
@@ -34162,7 +34146,7 @@ fn resolvePeerTypesInner(
switch (peer_info.flags.size) {
.one => switch (ptr_info.flags.size) {
.one => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34204,7 +34188,7 @@ fn resolvePeerTypesInner(
.many => {
// Only works for *[n]T + [*]T -> [*]T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34217,7 +34201,7 @@ fn resolvePeerTypesInner(
.slice => {
// Only works for *[n]T + []T -> []T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34233,7 +34217,7 @@ fn resolvePeerTypesInner(
.one => {
// Only works for [*]T + *[n]T -> [*]T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34247,7 +34231,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.many => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34262,7 +34246,7 @@ fn resolvePeerTypesInner(
} };
}
// Okay, then works for [*]T + "[]T" -> [*]T
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34275,7 +34259,7 @@ fn resolvePeerTypesInner(
.one => {
// Only works for []T + *[n]T -> []T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .slice;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34293,7 +34277,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.slice => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34479,7 +34463,7 @@ fn resolvePeerTypesInner(
} },
}
}
- return .{ .success = Type.comptime_int };
+ return .{ .success = .comptime_int };
},
.comptime_float => {
@@ -34493,7 +34477,7 @@ fn resolvePeerTypesInner(
} },
}
}
- return .{ .success = Type.comptime_float };
+ return .{ .success = .comptime_float };
},
.fixed_int => {
@@ -34601,11 +34585,11 @@ fn resolvePeerTypesInner(
// Recreate the type so we eliminate any c_longdouble
const bits = @max(cur_ty.floatBits(target), ty.floatBits(target));
opt_cur_ty = switch (bits) {
- 16 => Type.f16,
- 32 => Type.f32,
- 64 => Type.f64,
- 80 => Type.f80,
- 128 => Type.f128,
+ 16 => .f16,
+ 32 => .f32,
+ 64 => .f64,
+ 80 => .f80,
+ 128 => .f128,
else => unreachable,
};
} else {
@@ -34716,7 +34700,7 @@ fn resolvePeerTypesInner(
break;
};
const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern());
- const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
+ const coerced_inst = sema.coerceExtra(block, .fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
// It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway
error.NotCoercible => {
comptime_val = null;
@@ -34729,7 +34713,7 @@ fn resolvePeerTypesInner(
comptime_val = coerced_val;
continue;
};
- if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), zcu)) {
+ if (!coerced_val.eql(existing, .fromInterned(field_ty.*), zcu)) {
comptime_val = null;
break;
}
@@ -34743,7 +34727,7 @@ fn resolvePeerTypesInner(
.values = field_vals,
});
- return .{ .success = Type.fromInterned(final_ty) };
+ return .{ .success = .fromInterned(final_ty) };
},
.exact => {
@@ -34813,7 +34797,7 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
const field_count = ty.structFieldCount(zcu);
if (field_count == 0) return .{
.len = 0,
- .elem_ty = Type.noreturn,
+ .elem_ty = .noreturn,
};
if (!ty.isTuple(zcu)) return null;
const elem_ty = ty.fieldType(0, zcu);
@@ -34902,7 +34886,7 @@ pub fn resolveStructAlignment(
var alignment: Alignment = .@"1";
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt))
continue;
const field_align = try field_ty.structFieldAlignmentSema(
@@ -34953,7 +34937,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
var big_align: Alignment = .@"1";
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
struct_type.offsets.get(ip)[i] = 0;
field_size.* = 0;
@@ -35001,7 +34985,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
const runtime_order = struct_type.runtime_order.get(ip);
for (runtime_order, 0..) |*ro, i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
ro.* = .omitted;
} else {
@@ -35095,7 +35079,7 @@ fn backingIntType(
const fields_bit_sum = blk: {
var accumulator: u64 = 0;
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
accumulator += try field_ty.bitSizeSema(pt);
}
break :blk accumulator;
@@ -35234,7 +35218,7 @@ pub fn resolveUnionAlignment(
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
const explicit_align = union_type.fieldAlign(ip, field_index);
@@ -35282,7 +35266,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
var max_size: u64 = 0;
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]);
if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .noreturn) continue; // TODO: should this affect alignment?
@@ -35307,7 +35291,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and
try Type.fromInterned(union_type.enum_tag_ty).hasRuntimeBitsSema(pt);
const size, const alignment, const padding = if (has_runtime_tag) layout: {
- const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
+ const enum_tag_type: Type = .fromInterned(union_type.enum_tag_ty);
const tag_align = try enum_tag_type.abiAlignmentSema(pt);
const tag_size = try enum_tag_type.abiSizeSema(pt);
@@ -35392,7 +35376,7 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
// See also similar code for unions.
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
try field_ty.resolveFully(pt);
}
}
@@ -35421,7 +35405,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
union_obj.setStatus(ip, .fully_resolved_wip);
for (0..union_obj.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFully(pt);
}
union_obj.setStatus(ip, .fully_resolved);
@@ -35553,7 +35537,7 @@ fn resolveInferredErrorSet(
// set. However, in the case of comptime/inline function calls with
// inferred error sets, each call gets an adhoc InferredErrorSet object, which
// has no corresponding function body.
- const ies_func_info = zcu.typeToFunc(Type.fromInterned(func.ty)).?;
+ const ies_func_info = zcu.typeToFunc(.fromInterned(func.ty)).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
// so here we can simply skip this case.
@@ -36008,7 +35992,7 @@ fn structFieldInits(
// In init bodies, the zir index of the struct itself is used
// to refer to the current field type.
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_i]);
const type_ref = Air.internedToRef(field_ty.toIntern());
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index});
sema.inst_map.putAssumeCapacity(zir_index, type_ref);
@@ -36135,7 +36119,7 @@ fn unionFields(
}
if (fields_len > 0) {
- const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1);
+ const field_count_val = try pt.intValue(.comptime_int, fields_len - 1);
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
const msg = msg: {
const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{});
@@ -36288,9 +36272,9 @@ fn unionFields(
}
const field_ty: Type = if (!has_type)
- Type.void
+ .void
else if (field_type_ref == .none)
- Type.noreturn
+ .noreturn
else
try sema.resolveType(&block_scope, type_src, field_type_ref);
@@ -36388,11 +36372,11 @@ fn unionFields(
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
- try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
+ try sema.addFieldErrNote(.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
- try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty));
+ try sema.addDeclaredHereNote(msg, .fromInterned(tag_ty));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
@@ -36530,10 +36514,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.comptime_int_type,
.comptime_float_type,
.enum_literal_type,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
@@ -36595,11 +36580,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.empty_tuple_type => Value.empty_tuple,
// values, not types
.undef,
+ .undef_bool,
+ .undef_usize,
+ .undef_u1,
.zero,
.zero_usize,
+ .zero_u1,
.zero_u8,
.one,
.one_usize,
+ .one_u1,
.one_u8,
.four_u8,
.negative_one,
@@ -36705,7 +36695,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.storage = .{ .elems = &.{} },
} }));
- if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| {
+ if (try sema.typeHasOnePossibleValue(.fromInterned(seq_type.child))) |opv| {
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
@@ -36740,7 +36730,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
field_val.* = field_opv.toIntern();
} else return null;
@@ -36773,13 +36763,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
try ty.resolveLayout(pt);
const union_obj = ip.loadUnionType(ty.toIntern());
- const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
+ const tag_val = (try sema.typeHasOnePossibleValue(.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
return null;
if (union_obj.field_types.len == 0) {
const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
- const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
+ const only_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[0]);
const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse
return null;
const only = try pt.internUnion(.{
@@ -36796,7 +36786,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
+ if (try sema.typeHasOnePossibleValue(.fromInterned(enum_type.tag_ty))) |int_opv| {
const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
@@ -36814,7 +36804,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
1 => try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
- (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
+ (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try ip.getCoercedInts(
zcu.gpa,
@@ -37041,7 +37031,7 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
if (ptr_type.flags.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
- const payload_ty = Type.fromInterned(opt_child);
+ const payload_ty: Type = .fromInterned(opt_child);
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
return null;
}
@@ -37175,7 +37165,7 @@ fn intFromFloatScalar(
var big_int = try float128IntPartToBigInt(sema.arena, float);
defer big_int.deinit();
- const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst());
+ const cti_result = try pt.intValue_big(.comptime_int, big_int.toConst());
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
@@ -37278,8 +37268,8 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
- if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
- const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
+ if (!(try sema.intFitsInType(int, .fromInterned(enum_type.tag_ty), null))) return false;
+ const int_coerced = try pt.getCoerced(int, .fromInterned(enum_type.tag_ty));
return enum_type.tagValueIndex(&zcu.intern_pool, int_coerced.toIntern()) != null;
}
@@ -37359,7 +37349,7 @@ fn compareVector(
const lhs_elem = try lhs.elemValue(pt, i);
const rhs_elem = try rhs.elemValue(pt, i);
if (lhs_elem.isUndef(zcu) or rhs_elem.isUndef(zcu)) {
- scalar.* = try pt.intern(.{ .undef = .bool_type });
+ scalar.* = .undef_bool;
} else {
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu));
scalar.* = Value.makeBool(res_bool).toIntern();
@@ -37826,7 +37816,7 @@ pub fn resolveDeclaredEnum(
.owner = .wrap(.{ .type = wip_ty.index }),
.func_index = .none,
.func_is_naked = false,
- .fn_ret_ty = Type.void,
+ .fn_ret_ty = .void,
.fn_ret_ty_ies = null,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
@@ -37999,7 +37989,7 @@ fn resolveDeclaredEnumInner(
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(ip, field_name, .none) == null);
- last_tag_val = try pt.intValue(Type.comptime_int, field_i);
+ last_tag_val = try pt.intValue(.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
break :overflow false;