aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig949
-rw-r--r--src/TypedValue.zig5
-rw-r--r--src/type.zig291
-rw-r--r--src/value.zig624
4 files changed, 1063 insertions, 806 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 13a3573112..cb34fd158f 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2228,7 +2228,6 @@ fn zirEnumDecl(
enum_obj.tag_ty_inferred = true;
}
}
- const target = mod.getTarget();
try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
@@ -2291,7 +2290,7 @@ fn zirEnumDecl(
});
} else if (any_values) {
const tag_val = if (last_tag_val) |val|
- try val.intAdd(Value.one, enum_obj.tag_ty, sema.arena, target)
+ try sema.intAdd(block, src, val, Value.one, enum_obj.tag_ty)
else
Value.zero;
last_tag_val = tag_val;
@@ -6054,7 +6053,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (int_val.isUndef()) {
return sema.failWithUseOfUndef(block, operand_src);
}
- if (!dest_ty.enumHasInt(int_val, sema.mod)) {
+ if (!(try sema.enumHasInt(block, src, dest_ty, int_val))) {
const msg = msg: {
const msg = try sema.errMsg(
block,
@@ -7082,7 +7081,7 @@ fn intCast(
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, target);
- break :range_val try range_minus_one.intAdd(Value.one, unsigned_operand_ty, sema.arena, target);
+ break :range_val try sema.intAdd(block, operand_src, range_minus_one, Value.one, unsigned_operand_ty);
} else dest_max_val;
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
@@ -8203,8 +8202,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
// Validation above ensured these will succeed.
const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable;
const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable;
- if (Value.compare(operand_val, .gte, first_tv.val, operand_ty, sema.mod) and
- Value.compare(operand_val, .lte, last_tv.val, operand_ty, sema.mod))
+ if ((try sema.compare(block, src, operand_val, .gte, first_tv.val, operand_ty)) and
+ (try sema.compare(block, src, operand_val, .lte, last_tv.val, operand_ty)))
{
return sema.resolveBlockBody(block, src, &child_block, body, inst, merges);
}
@@ -8878,7 +8877,7 @@ fn zirShl(
if (rhs_val.isUndef()) {
return sema.addConstUndef(sema.typeOf(lhs));
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return lhs;
}
}
@@ -8895,7 +8894,7 @@ fn zirShl(
}
const int_info = scalar_ty.intInfo(target);
const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits, target);
- if (truncated.compare(.eq, shifted, lhs_ty, sema.mod)) {
+ if (try sema.compare(block, src, truncated, .eq, shifted, lhs_ty)) {
break :val shifted;
}
return sema.addConstUndef(lhs_ty);
@@ -8999,13 +8998,13 @@ fn zirShr(
return sema.addConstUndef(lhs_ty);
}
// If rhs is 0, return lhs without doing any calculations.
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(lhs_ty, lhs_val);
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target);
- if (!truncated.compareWithZero(.eq)) {
+ if (!(try truncated.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
return sema.addConstUndef(lhs_ty);
}
}
@@ -9015,7 +9014,7 @@ fn zirShr(
// Even if lhs is not comptime known, we can still deduce certain things based
// on rhs.
// If rhs is 0, return lhs without doing any calculations.
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return lhs;
}
break :rs lhs_src;
@@ -9578,12 +9577,12 @@ fn zirOverflowArithmetic(
// to the result, even if it is undefined..
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ if (!lhs_val.isUndef() and (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) {
+ if (!rhs_val.isUndef() and (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
}
}
@@ -9593,7 +9592,7 @@ fn zirOverflowArithmetic(
break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
}
- const result = try lhs_val.intAddWithOverflow(rhs_val, dest_ty, sema.arena, target);
+ const result = try sema.intAddWithOverflow(block, src, lhs_val, rhs_val, dest_ty);
const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = overflowed, .wrapped = wrapped };
@@ -9606,14 +9605,14 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
- } else if (rhs_val.compareWithZero(.eq)) {
+ } else if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
}
- const result = try lhs_val.intSubWithOverflow(rhs_val, dest_ty, sema.arena, target);
+ const result = try sema.intSubWithOverflow(block, src, lhs_val, rhs_val, dest_ty);
const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = overflowed, .wrapped = wrapped };
@@ -9626,9 +9625,9 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
- } else if (lhs_val.compare(.eq, Value.one, dest_ty, mod)) {
+ } else if (try sema.compare(block, src, lhs_val, .eq, Value.one, dest_ty)) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
}
}
@@ -9636,9 +9635,9 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef()) {
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = rhs };
- } else if (rhs_val.compare(.eq, Value.one, dest_ty, mod)) {
+ } else if (try sema.compare(block, src, rhs_val, .eq, Value.one, dest_ty)) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
}
}
@@ -9662,12 +9661,12 @@ fn zirOverflowArithmetic(
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ if (!lhs_val.isUndef() and (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef() and rhs_val.compareWithZero(.eq)) {
+ if (!rhs_val.isUndef() and (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
break :result .{ .overflowed = try sema.addBool(overflowed_ty, false), .wrapped = lhs };
}
}
@@ -9815,7 +9814,7 @@ fn analyzeArithmetic(
// overflow (max_int), causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ if (!lhs_val.isUndef() and (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
return casted_rhs;
}
}
@@ -9827,7 +9826,7 @@ fn analyzeArithmetic(
return sema.addConstUndef(resolved_type);
}
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
}
@@ -9841,15 +9840,15 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const sum = try lhs_val.intAdd(rhs_val, resolved_type, sema.arena, target);
- if (!sum.intFitsInType(resolved_type, target)) {
+ const sum = try sema.intAdd(block, src, lhs_val, rhs_val, resolved_type);
+ if (!(try sema.intFitsInType(block, src, sum, resolved_type))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, sum);
}
return sema.addConstant(resolved_type, sum);
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatAdd(rhs_val, resolved_type, sema.arena, target),
+ try sema.floatAdd(lhs_val, rhs_val, resolved_type),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .add };
@@ -9860,7 +9859,7 @@ fn analyzeArithmetic(
// If either of the operands are zero, the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ if (!lhs_val.isUndef() and (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
return casted_rhs;
}
}
@@ -9868,13 +9867,13 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.numberAddWrap(rhs_val, resolved_type, sema.arena, target),
+ try sema.numberAddWrap(block, src, lhs_val, rhs_val, resolved_type),
);
} else break :rs .{ .src = lhs_src, .air_tag = .addwrap };
} else break :rs .{ .src = rhs_src, .air_tag = .addwrap };
@@ -9884,7 +9883,7 @@ fn analyzeArithmetic(
// If either of the operands are zero, then the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ if (!lhs_val.isUndef() and (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
return casted_rhs;
}
}
@@ -9892,12 +9891,12 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intAdd(rhs_val, resolved_type, sema.arena, target)
+ try sema.intAdd(block, src, lhs_val, rhs_val, resolved_type)
else
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target);
@@ -9921,7 +9920,7 @@ fn analyzeArithmetic(
return sema.addConstUndef(resolved_type);
}
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
}
@@ -9935,15 +9934,15 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const diff = try lhs_val.intSub(rhs_val, resolved_type, sema.arena, target);
- if (!diff.intFitsInType(resolved_type, target)) {
+ const diff = try sema.intSub(block, src, lhs_val, rhs_val, resolved_type);
+ if (!(try sema.intFitsInType(block, src, diff, resolved_type))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, diff);
}
return sema.addConstant(resolved_type, diff);
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatSub(rhs_val, resolved_type, sema.arena, target),
+ try sema.floatSub(lhs_val, rhs_val, resolved_type),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .sub };
@@ -9957,7 +9956,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
}
@@ -9968,7 +9967,7 @@ fn analyzeArithmetic(
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.numberSubWrap(rhs_val, resolved_type, sema.arena, target),
+ try sema.numberSubWrap(block, src, lhs_val, rhs_val, resolved_type),
);
} else break :rs .{ .src = rhs_src, .air_tag = .subwrap };
} else break :rs .{ .src = lhs_src, .air_tag = .subwrap };
@@ -9981,7 +9980,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return casted_lhs;
}
}
@@ -9991,7 +9990,7 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intSub(rhs_val, resolved_type, sema.arena, target)
+ try sema.intSub(block, src, lhs_val, rhs_val, resolved_type)
else
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target);
@@ -10032,7 +10031,7 @@ fn analyzeArithmetic(
.Int, .ComptimeInt, .ComptimeFloat => {
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
}
@@ -10041,7 +10040,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -10053,7 +10052,7 @@ fn analyzeArithmetic(
if (lhs_val.isUndef()) {
if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
@@ -10111,7 +10110,7 @@ fn analyzeArithmetic(
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
}
@@ -10120,7 +10119,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -10128,7 +10127,7 @@ fn analyzeArithmetic(
if (lhs_val.isUndef()) {
if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
@@ -10174,7 +10173,7 @@ fn analyzeArithmetic(
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
}
@@ -10183,7 +10182,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -10191,7 +10190,7 @@ fn analyzeArithmetic(
if (lhs_val.isUndef()) {
if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
@@ -10236,7 +10235,7 @@ fn analyzeArithmetic(
if (lhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
}
@@ -10245,7 +10244,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -10278,10 +10277,10 @@ fn analyzeArithmetic(
// For floats: either operand being undef makes the result undef.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, lhs_val, .eq, Value.one, resolved_type)) {
return casted_rhs;
}
}
@@ -10294,10 +10293,10 @@ fn analyzeArithmetic(
return sema.addConstUndef(resolved_type);
}
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .eq, Value.one, resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -10310,7 +10309,7 @@ fn analyzeArithmetic(
}
if (is_int) {
const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target);
- if (!product.intFitsInType(resolved_type, target)) {
+ if (!(try sema.intFitsInType(block, src, product, resolved_type))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, product);
}
return sema.addConstant(resolved_type, product);
@@ -10330,10 +10329,10 @@ fn analyzeArithmetic(
// If either of the operands are undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, lhs_val, .eq, Value.one, resolved_type)) {
return casted_rhs;
}
}
@@ -10342,10 +10341,10 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .eq, Value.one, resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -10366,10 +10365,10 @@ fn analyzeArithmetic(
// If either of the operands are undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, lhs_val, .eq, Value.one, resolved_type)) {
return casted_rhs;
}
}
@@ -10378,10 +10377,10 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, resolved_type, mod)) {
+ if (try sema.compare(block, src, rhs_val, .eq, Value.one, resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -10417,7 +10416,7 @@ fn analyzeArithmetic(
if (lhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, lhs_src);
}
- if (lhs_val.compareWithZero(.eq)) {
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.addConstant(resolved_type, Value.zero);
}
} else if (lhs_scalar_ty.isSignedInt()) {
@@ -10427,23 +10426,23 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target);
// If this answer could possibly be different by doing `intMod`,
// we must emit a compile error. Otherwise, it's OK.
- if (rhs_val.compareWithZero(.lt) != lhs_val.compareWithZero(.lt) and
- !rem_result.compareWithZero(.eq))
+ if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and
+ !(try rem_result.compareWithZeroAdvanced(.eq, sema.kit(block, src))))
{
- const bad_src = if (lhs_val.compareWithZero(.lt))
+ const bad_src = if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))
lhs_src
else
rhs_src;
return sema.failWithModRemNegative(block, bad_src, lhs_ty, rhs_ty);
}
- if (lhs_val.compareWithZero(.lt)) {
+ if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
// Negative
return sema.addConstant(resolved_type, Value.zero);
}
@@ -10461,14 +10460,14 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
- if (rhs_val.compareWithZero(.lt)) {
+ if (try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) {
+ if (lhs_val.isUndef() or (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
return sema.addConstant(
@@ -10504,7 +10503,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
@@ -10523,7 +10522,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -10561,7 +10560,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
@@ -10580,7 +10579,7 @@ fn analyzeArithmetic(
if (rhs_val.isUndef()) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (rhs_val.compareWithZero(.eq)) {
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -11095,11 +11094,11 @@ fn cmpSelf(
if (resolved_type.zigTypeTag() == .Vector) {
const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool");
- const cmp_val = try lhs_val.compareVector(op, rhs_val, resolved_type, sema.arena, sema.mod);
+ const cmp_val = try sema.compareVector(block, lhs_src, lhs_val, op, rhs_val, resolved_type);
return sema.addConstant(result_ty, cmp_val);
}
- if (lhs_val.compare(op, rhs_val, resolved_type, sema.mod)) {
+ if (try sema.compare(block, lhs_src, lhs_val, op, rhs_val, resolved_type)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
@@ -11157,24 +11156,22 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
- try sema.resolveTypeLayout(block, src, operand_ty);
- const target = sema.mod.getTarget();
- const abi_size = switch (operand_ty.zigTypeTag()) {
+ const ty = try sema.resolveType(block, operand_src, inst_data.operand);
+ switch (ty.zigTypeTag()) {
.Fn => unreachable,
.NoReturn,
.Undefined,
.Null,
.BoundFn,
.Opaque,
- => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}),
+ => return sema.fail(block, src, "no size available for type '{}'", .{ty.fmt(sema.mod)}),
.Type,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Void,
- => 0,
+ => return sema.addIntUnsigned(Type.comptime_int, 0),
.Bool,
.Int,
@@ -11190,9 +11187,14 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.Vector,
.Frame,
.AnyFrame,
- => operand_ty.abiSize(target),
- };
- return sema.addIntUnsigned(Type.comptime_int, abi_size);
+ => {},
+ }
+ const target = sema.mod.getTarget();
+ const val = try ty.lazyAbiSize(target, sema.arena);
+ if (val.tag() == .lazy_size) {
+ try sema.queueFullTypeResolution(ty);
+ }
+ return sema.addConstant(Type.comptime_int, val);
}
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -11202,7 +11204,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand_ty = try sema.resolveTypeFields(block, operand_src, unresolved_operand_ty);
const target = sema.mod.getTarget();
const bit_size = operand_ty.bitSize(target);
- return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size);
+ return sema.addIntUnsigned(Type.comptime_int, bit_size);
}
fn zirThis(
@@ -13516,10 +13518,11 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
const target = sema.mod.getTarget();
- return sema.addConstant(
- Type.comptime_int,
- try ty.lazyAbiAlignment(target, sema.arena),
- );
+ const val = try ty.lazyAbiAlignment(target, sema.arena);
+ if (val.tag() == .lazy_align) {
+ try sema.queueFullTypeResolution(ty);
+ }
+ return sema.addConstant(Type.comptime_int, val);
}
fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -14362,16 +14365,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.checkFloatType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
- const target = sema.mod.getTarget();
- const result_val = val.floatToInt(sema.arena, operand_ty, dest_ty, target) catch |err| switch (err) {
- error.FloatCannotFit => {
- return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{
- @floor(val.toFloat(f64)),
- dest_ty.fmt(sema.mod),
- });
- },
- else => |e| return e,
- };
+ const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty);
return sema.addConstant(dest_ty, result_val);
}
@@ -15563,7 +15557,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, target),
.Min => accum = accum.numberMin(elem_val, target),
.Max => accum = accum.numberMax(elem_val, target),
- .Add => accum = try accum.numberAddWrap(elem_val, scalar_ty, sema.arena, target),
+ .Add => accum = try sema.numberAddWrap(block, operand_src, accum, elem_val, scalar_ty),
.Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, target),
}
}
@@ -15958,14 +15952,14 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
- .Add => try stored_val.numberAddWrap(operand_val, elem_ty, sema.arena, target),
- .Sub => try stored_val.numberSubWrap(operand_val, elem_ty, sema.arena, target),
- .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target),
- .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target),
- .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target),
- .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target),
- .Max => stored_val.numberMax (operand_val, target),
- .Min => stored_val.numberMin (operand_val, target),
+ .Add => try sema.numberAddWrap(block, src, stored_val, operand_val, elem_ty),
+ .Sub => try sema.numberSubWrap(block, src, stored_val, operand_val, elem_ty),
+ .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target),
+ .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target),
+ .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target),
+ .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target),
+ .Max => stored_val.numberMax (operand_val, target),
+ .Min => stored_val.numberMin (operand_val, target),
// zig fmt: on
};
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
@@ -18890,18 +18884,13 @@ fn coerce(
.{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) },
);
}
- const result_val = val.floatToInt(sema.arena, inst_ty, dest_ty, target) catch |err| switch (err) {
- error.FloatCannotFit => {
- return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ @floor(val.toFloat(f64)), dest_ty.fmt(sema.mod) });
- },
- else => |e| return e,
- };
+ const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty);
return try sema.addConstant(dest_ty, result_val);
},
.Int, .ComptimeInt => {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
// comptime known integer to other number
- if (!val.intFitsInType(dest_ty, target)) {
+ if (!(try sema.intFitsInType(block, inst_src, val, dest_ty))) {
return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) });
}
return try sema.addConstant(dest_ty, val);
@@ -21093,7 +21082,7 @@ fn analyzeSlice(
sema.arena,
array_ty.arrayLenIncludingSentinel(),
);
- if (end_val.compare(.gt, len_s_val, Type.usize, mod)) {
+ if (try sema.compare(block, src, end_val, .gt, len_s_val, Type.usize)) {
const sentinel_label: []const u8 = if (array_ty.sentinel() != null)
" +1 (sentinel)"
else
@@ -21133,7 +21122,7 @@ fn analyzeSlice(
.data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel),
};
const slice_len_val = Value.initPayload(&int_payload.base);
- if (end_val.compare(.gt, slice_len_val, Type.usize, mod)) {
+ if (try sema.compare(block, src, end_val, .gt, slice_len_val, Type.usize)) {
const sentinel_label: []const u8 = if (has_sentinel)
" +1 (sentinel)"
else
@@ -21191,7 +21180,7 @@ fn analyzeSlice(
// requirement: start <= end
if (try sema.resolveDefinedValue(block, src, end)) |end_val| {
if (try sema.resolveDefinedValue(block, src, start)) |start_val| {
- if (start_val.compare(.gt, end_val, Type.usize, mod)) {
+ if (try sema.compare(block, src, start_val, .gt, end_val, Type.usize)) {
return sema.fail(
block,
start_src,
@@ -21399,11 +21388,11 @@ fn cmpNumeric(
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
// add/subtract 1.
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
- lhs_val.compareWithZero(.lt)
+ (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))
else
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt());
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
- rhs_val.compareWithZero(.lt)
+ (try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))
else
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt());
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
@@ -21541,7 +21530,7 @@ fn cmpVector(
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(result_ty);
}
- const cmp_val = try lhs_val.compareVector(op, rhs_val, lhs_ty, sema.arena, sema.mod);
+ const cmp_val = try sema.compareVector(block, src, lhs_val, op, rhs_val, lhs_ty);
return sema.addConstant(result_ty, cmp_val);
} else {
break :src rhs_src;
@@ -22904,8 +22893,6 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil
enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields;
}
- const target = sema.mod.getTarget();
-
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
@@ -22969,7 +22956,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil
});
} else {
const val = if (last_tag_val) |val|
- try val.intAdd(Value.one, int_tag_ty, sema.arena, target)
+ try sema.intAdd(block, src, val, Value.one, int_tag_ty)
else
Value.zero;
last_tag_val = val;
@@ -24119,3 +24106,707 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
const inst_ref = try sema.addType(ty);
try sema.types_to_resolve.append(sema.gpa, inst_ref);
}
+
+fn intAdd(sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Value, rhs: Value, ty: Type) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.intAddScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.intAddScalar(block, src, lhs, rhs);
+}
+
+fn intAddScalar(sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Value, rhs: Value) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const target = sema.mod.getTarget();
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, src));
+ const limbs = try sema.arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ );
+ var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.add(lhs_bigint, rhs_bigint);
+ return Value.fromBigInt(sema.arena, result_bigint.toConst());
+}
+
+/// Supports both (vectors of) floats and ints; handles undefined scalars.
+fn numberAddWrap(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.numberAddWrapScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.numberAddWrapScalar(block, src, lhs, rhs, ty);
+}
+
+/// Supports both floats and ints; handles undefined.
+fn numberAddWrapScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ if (ty.zigTypeTag() == .ComptimeInt) {
+ return sema.intAdd(block, src, lhs, rhs, ty);
+ }
+
+ if (ty.isAnyFloat()) {
+ return sema.floatAdd(lhs, rhs, ty);
+ }
+
+ const overflow_result = try sema.intAddWithOverflow(block, src, lhs, rhs, ty);
+ return overflow_result.wrapped_result;
+}
+
+fn intSub(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.intSubScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.intSubScalar(block, src, lhs, rhs);
+}
+
+fn intSubScalar(sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Value, rhs: Value) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const target = sema.mod.getTarget();
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, src));
+ const limbs = try sema.arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ );
+ var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.sub(lhs_bigint, rhs_bigint);
+ return Value.fromBigInt(sema.arena, result_bigint.toConst());
+}
+
+/// Supports both (vectors of) floats and ints; handles undefined scalars.
+fn numberSubWrap(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.numberSubWrapScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.numberSubWrapScalar(block, src, lhs, rhs, ty);
+}
+
+/// Supports both floats and ints; handles undefined.
+fn numberSubWrapScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ if (ty.zigTypeTag() == .ComptimeInt) {
+ return sema.intSub(block, src, lhs, rhs, ty);
+ }
+
+ if (ty.isAnyFloat()) {
+ return sema.floatSub(lhs, rhs, ty);
+ }
+
+ const overflow_result = try sema.intSubWithOverflow(block, src, lhs, rhs, ty);
+ return overflow_result.wrapped_result;
+}
+
+fn floatAdd(
+ sema: *Sema,
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.floatAddScalar(lhs, rhs, float_type);
+}
+
+fn floatAddScalar(
+ sema: *Sema,
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+) !Value {
+ const target = sema.mod.getTarget();
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val);
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val);
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val);
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val);
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val);
+ },
+ else => unreachable,
+ }
+}
+
+fn floatSub(
+ sema: *Sema,
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.floatSubScalar(lhs, rhs, float_type);
+}
+
+fn floatSubScalar(
+ sema: *Sema,
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+) !Value {
+ const target = sema.mod.getTarget();
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val);
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val);
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val);
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val);
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val);
+ },
+ else => unreachable,
+ }
+}
+
+fn intSubWithOverflow(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value.OverflowArithmeticResult {
+ if (ty.zigTypeTag() == .Vector) {
+ const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const of_math_result = try sema.intSubWithOverflowScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ overflowed_data[i] = of_math_result.overflowed;
+ scalar.* = of_math_result.wrapped_result;
+ }
+ return Value.OverflowArithmeticResult{
+ .overflowed = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
+ .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
+ };
+ }
+ return sema.intSubWithOverflowScalar(block, src, lhs, rhs, ty);
+}
+
+fn intSubWithOverflowScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value.OverflowArithmeticResult {
+ const target = sema.mod.getTarget();
+ const info = ty.intInfo(target);
+
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, src));
+ const limbs = try sema.arena.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcTwosCompLimbCount(info.bits),
+ );
+ var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
+ const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst());
+ return Value.OverflowArithmeticResult{
+ .overflowed = Value.makeBool(overflowed),
+ .wrapped_result = wrapped_result,
+ };
+}
+
+fn floatToInt(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ val: Value,
+ float_ty: Type,
+ int_ty: Type,
+) CompileError!Value {
+ if (float_ty.zigTypeTag() == .Vector) {
+ const elem_ty = float_ty.childType();
+ const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.floatToIntScalar(block, src, val.indexVectorlike(i), elem_ty, int_ty.scalarType());
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.floatToIntScalar(block, src, val, float_ty, int_ty);
+}
+
+fn floatToIntScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ val: Value,
+ float_ty: Type,
+ int_ty: Type,
+) CompileError!Value {
+ const Limb = std.math.big.Limb;
+
+ const float = val.toFloat(f128);
+ if (std.math.isNan(float)) {
+ return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
+ int_ty.fmt(sema.mod),
+ });
+ }
+ if (std.math.isInf(float)) {
+ return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{
+ int_ty.fmt(sema.mod),
+ });
+ }
+
+ const is_negative = std.math.signbit(float);
+ const floored = @floor(@fabs(float));
+
+ var rational = try std.math.big.Rational.init(sema.arena);
+ defer rational.deinit();
+ rational.setFloat(f128, floored) catch |err| switch (err) {
+ error.NonFiniteFloat => unreachable,
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+
+ // The float is reduced in rational.setFloat, so we assert that denominator is equal to one
+ const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
+ assert(rational.q.toConst().eqAbs(big_one));
+
+ const result_limbs = try sema.arena.dupe(Limb, rational.p.toConst().limbs);
+ const result = if (is_negative)
+ try Value.Tag.int_big_negative.create(sema.arena, result_limbs)
+ else
+ try Value.Tag.int_big_positive.create(sema.arena, result_limbs);
+
+ if (!(try sema.intFitsInType(block, src, result, int_ty))) {
+ return sema.fail(block, src, "float value {} cannot be stored in integer type '{}'", .{
+ val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod),
+ });
+ }
+ return result;
+}
+
+/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
+/// Vectors are also accepted. Vector results are reduced with AND.
+fn intFitsInType(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ self: Value,
+ ty: Type,
+) CompileError!bool {
+ const target = sema.mod.getTarget();
+ switch (self.tag()) {
+ .zero,
+ .undef,
+ .bool_false,
+ => return true,
+
+ .one,
+ .bool_true,
+ => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ return switch (info.signedness) {
+ .signed => info.bits >= 2,
+ .unsigned => info.bits >= 1,
+ };
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+
+ .lazy_align => {
+ const info = ty.intInfo(target);
+ const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
+ // If it is u16 or bigger we know the alignment fits without resolving it.
+ if (info.bits >= max_needed_bits) return true;
+ const x = try sema.typeAbiAlignment(block, src, self.castTag(.lazy_align).?.data);
+ if (x == 0) return true;
+ const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
+ return info.bits >= actual_needed_bits;
+ },
+ .lazy_size => {
+ const info = ty.intInfo(target);
+ const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed);
+ // If it is u64 or bigger we know the size fits without resolving it.
+ if (info.bits >= max_needed_bits) return true;
+ const x = try sema.typeAbiSize(block, src, self.castTag(.lazy_size).?.data);
+ if (x == 0) return true;
+ const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
+ return info.bits >= actual_needed_bits;
+ },
+
+ .int_u64 => switch (ty.zigTypeTag()) {
+ .Int => {
+ const x = self.castTag(.int_u64).?.data;
+ if (x == 0) return true;
+ const info = ty.intInfo(target);
+ const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
+ return info.bits >= needed_bits;
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+ .int_i64 => switch (ty.zigTypeTag()) {
+ .Int => {
+ const x = self.castTag(.int_i64).?.data;
+ if (x == 0) return true;
+ const info = ty.intInfo(target);
+ if (info.signedness == .unsigned and x < 0)
+ return false;
+ var buffer: Value.BigIntSpace = undefined;
+ return (try self.toBigIntAdvanced(&buffer, target, sema.kit(block, src))).fitsInTwosComp(info.signedness, info.bits);
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+ .int_big_positive => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ return self.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+ .int_big_negative => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ return self.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+
+ .the_only_possible_value => {
+ assert(ty.intInfo(target).bits == 0);
+ return true;
+ },
+
+ .decl_ref_mut,
+ .extern_fn,
+ .decl_ref,
+ .function,
+ .variable,
+ => switch (ty.zigTypeTag()) {
+ .Int => {
+ const info = ty.intInfo(target);
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ return switch (info.signedness) {
+ .signed => info.bits > ptr_bits,
+ .unsigned => info.bits >= ptr_bits,
+ };
+ },
+ .ComptimeInt => return true,
+ else => unreachable,
+ },
+
+ .aggregate => {
+ assert(ty.zigTypeTag() == .Vector);
+ for (self.castTag(.aggregate).?.data) |elem| {
+ if (!(try sema.intFitsInType(block, src, elem, ty.scalarType()))) {
+ return false;
+ }
+ }
+ return true;
+ },
+
+ else => unreachable,
+ }
+}
+
+fn intInRange(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ tag_ty: Type,
+ int_val: Value,
+ end: usize,
+) !bool {
+ if (try int_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) return false;
+ var end_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = end,
+ };
+ const end_val = Value.initPayload(&end_payload.base);
+ if (try sema.compare(block, src, int_val, .gte, end_val, tag_ty)) return false;
+ return true;
+}
+
+/// Asserts the type is an enum.
+fn enumHasInt(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ty: Type,
+ int: Value,
+) CompileError!bool {
+ switch (ty.tag()) {
+ .enum_nonexhaustive => return sema.intFitsInType(block, src, int, ty),
+ .enum_full => {
+ const enum_full = ty.castTag(.enum_full).?.data;
+ const tag_ty = enum_full.tag_ty;
+ if (enum_full.values.count() == 0) {
+ return intInRange(sema, block, src, tag_ty, int, enum_full.fields.count());
+ } else {
+ return enum_full.values.containsContext(int, .{
+ .ty = tag_ty,
+ .mod = sema.mod,
+ });
+ }
+ },
+ .enum_numbered => {
+ const enum_obj = ty.castTag(.enum_numbered).?.data;
+ const tag_ty = enum_obj.tag_ty;
+ if (enum_obj.values.count() == 0) {
+ return intInRange(sema, block, src, tag_ty, int, enum_obj.fields.count());
+ } else {
+ return enum_obj.values.containsContext(int, .{
+ .ty = tag_ty,
+ .mod = sema.mod,
+ });
+ }
+ },
+ .enum_simple => {
+ const enum_simple = ty.castTag(.enum_simple).?.data;
+ const fields_len = enum_simple.fields.count();
+ const bits = std.math.log2_int_ceil(usize, fields_len);
+ var buffer: Type.Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = bits,
+ };
+ const tag_ty = Type.initPayload(&buffer.base);
+ return intInRange(sema, block, src, tag_ty, int, fields_len);
+ },
+ .atomic_order,
+ .atomic_rmw_op,
+ .calling_convention,
+ .address_space,
+ .float_mode,
+ .reduce_op,
+ .call_options,
+ .prefetch_options,
+ .export_options,
+ .extern_options,
+ => unreachable,
+
+ else => unreachable,
+ }
+}
+
+fn intAddWithOverflow(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value.OverflowArithmeticResult {
+ if (ty.zigTypeTag() == .Vector) {
+ const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const of_math_result = try sema.intAddWithOverflowScalar(block, src, lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ overflowed_data[i] = of_math_result.overflowed;
+ scalar.* = of_math_result.wrapped_result;
+ }
+ return Value.OverflowArithmeticResult{
+ .overflowed = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
+ .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
+ };
+ }
+ return sema.intAddWithOverflowScalar(block, src, lhs, rhs, ty);
+}
+
+fn intAddWithOverflowScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) !Value.OverflowArithmeticResult {
+ const target = sema.mod.getTarget();
+ const info = ty.intInfo(target);
+
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, src));
+ const limbs = try sema.arena.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcTwosCompLimbCount(info.bits),
+ );
+ var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
+ const result = try Value.fromBigInt(sema.arena, result_bigint.toConst());
+ return Value.OverflowArithmeticResult{
+ .overflowed = Value.makeBool(overflowed),
+ .wrapped_result = result,
+ };
+}
+
+/// Asserts the values are comparable. Both operands have type `ty`.
+/// Vector results will be reduced with AND.
+fn compare(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ op: std.math.CompareOperator,
+ rhs: Value,
+ ty: Type,
+) CompileError!bool {
+ if (ty.zigTypeTag() == .Vector) {
+ var i: usize = 0;
+ while (i < ty.vectorLen()) : (i += 1) {
+ if (!(try sema.compareScalar(block, src, lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()))) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return sema.compareScalar(block, src, lhs, op, rhs, ty);
+}
+
+/// Asserts the values are comparable. Both operands have type `ty`.
+fn compareScalar(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ op: std.math.CompareOperator,
+ rhs: Value,
+ ty: Type,
+) CompileError!bool {
+ switch (op) {
+ .eq => return sema.valuesEqual(block, src, lhs, rhs, ty),
+ .neq => return !(try sema.valuesEqual(block, src, lhs, rhs, ty)),
+ else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema.kit(block, src)),
+ }
+}
+
+fn valuesEqual(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+) CompileError!bool {
+ return Value.eqlAdvanced(lhs, rhs, ty, sema.mod, sema.kit(block, src));
+}
+
+/// Asserts the values are comparable vectors of type `ty`.
+pub fn compareVector(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Value,
+ op: std.math.CompareOperator,
+ rhs: Value,
+ ty: Type,
+) !Value {
+ assert(ty.zigTypeTag() == .Vector);
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const res_bool = try sema.compareScalar(block, src, lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType());
+ scalar.* = Value.makeBool(res_bool);
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+}
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 43c26b254e..b6aee29a4b 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -232,6 +232,11 @@ pub fn print(
const x = sub_ty.abiAlignment(target);
return writer.print("{d}", .{x});
},
+ .lazy_size => {
+ const sub_ty = val.castTag(.lazy_size).?.data;
+ const x = sub_ty.abiSize(target);
+ return writer.print("{d}", .{x});
+ },
.function => return writer.print("(function '{s}')", .{
mod.declPtr(val.castTag(.function).?.data.owner_decl).name,
}),
diff --git a/src/type.zig b/src/type.zig
index cc00f712f0..ea65cc8916 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2760,7 +2760,7 @@ pub const Type = extern union {
.sema_kit => |sk| sk,
else => null,
};
- return switch (ty.tag()) {
+ switch (ty.tag()) {
.u1,
.u8,
.i8,
@@ -3028,7 +3028,7 @@ pub const Type = extern union {
=> unreachable,
.generic_poison => unreachable,
- };
+ }
}
pub fn abiAlignmentAdvancedUnion(
@@ -3076,10 +3076,37 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = max_align };
}
+ /// May capture a reference to `ty`.
+ pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value {
+ switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) {
+ .val => |val| return val,
+ .scalar => |x| return Value.Tag.int_u64.create(arena, x),
+ }
+ }
+
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
- pub fn abiSize(self: Type, target: Target) u64 {
- return switch (self.tag()) {
+ pub fn abiSize(ty: Type, target: Target) u64 {
+ return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar;
+ }
+
+ const AbiSizeAdvanced = union(enum) {
+ scalar: u64,
+ val: Value,
+ };
+
+ /// If you pass `eager` you will get back `scalar` and assert the type is resolved.
+ /// In this case there will be no error, guaranteed.
+ /// If you pass `lazy` you may get back `scalar` or `val`.
+ /// If `val` is returned, a reference to `ty` has been captured.
+ /// If you pass `sema_kit` you will get back `scalar` and resolve the type if
+ /// necessary, possibly returning a CompileError.
+ pub fn abiSizeAdvanced(
+ ty: Type,
+ target: Target,
+ strat: AbiAlignmentAdvancedStrat,
+ ) Module.CompileError!AbiSizeAdvanced {
+ switch (ty.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_void_no_args => unreachable, // represents machine code; not a pointer
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
@@ -3109,32 +3136,59 @@ pub const Type = extern union {
.empty_struct_literal,
.empty_struct,
.void,
- => 0,
+ => return AbiSizeAdvanced{ .scalar = 0 },
- .@"struct", .tuple, .anon_struct => switch (self.containerLayout()) {
+ .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) {
.Packed => {
- const struct_obj = self.castTag(.@"struct").?.data;
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ switch (strat) {
+ .sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty),
+ .lazy => |arena| {
+ if (!struct_obj.haveFieldTypes()) {
+ return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
+ }
+ },
+ .eager => {},
+ }
var buf: Type.Payload.Bits = undefined;
const int_ty = struct_obj.packedIntegerType(target, &buf);
- return int_ty.abiSize(target);
+ return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) };
},
else => {
- const field_count = self.structFieldCount();
+ switch (strat) {
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
+ .lazy => |arena| {
+ if (ty.castTag(.@"struct")) |payload| {
+ const struct_obj = payload.data;
+ if (!struct_obj.haveLayout()) {
+ return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
+ }
+ }
+ },
+ .eager => {},
+ }
+ const field_count = ty.structFieldCount();
if (field_count == 0) {
- return 0;
+ return AbiSizeAdvanced{ .scalar = 0 };
}
- return self.structFieldOffset(field_count, target);
+ return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) };
},
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
- const int_tag_ty = self.intTagType(&buffer);
- return int_tag_ty.abiSize(target);
+ const int_tag_ty = ty.intTagType(&buffer);
+ return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) };
+ },
+ .@"union" => {
+ const union_obj = ty.castTag(.@"union").?.data;
+ // TODO pass `true` for have_tag when unions have a safety tag
+ return abiSizeAdvancedUnion(ty, target, strat, union_obj, false);
+ },
+ .union_tagged => {
+ const union_obj = ty.castTag(.union_tagged).?.data;
+ return abiSizeAdvancedUnion(ty, target, strat, union_obj, true);
},
- // TODO pass `true` for have_tag when unions have a safety tag
- .@"union" => return self.castTag(.@"union").?.data.abiSize(target, false),
- .union_tagged => return self.castTag(.union_tagged).?.data.abiSize(target, true),
.u1,
.u8,
@@ -3146,21 +3200,31 @@ pub const Type = extern union {
.address_space,
.float_mode,
.reduce_op,
- => return 1,
+ => return AbiSizeAdvanced{ .scalar = 1 },
- .array_u8 => self.castTag(.array_u8).?.data,
- .array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data + 1,
+ .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data },
+ .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
.array, .vector => {
- const payload = self.cast(Payload.Array).?.data;
- const elem_size = payload.elem_type.abiSize(target);
- assert(elem_size >= payload.elem_type.abiAlignment(target));
- return payload.len * elem_size;
+ const payload = ty.cast(Payload.Array).?.data;
+ switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
+ .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size },
+ .val => switch (strat) {
+ .sema_kit => unreachable,
+ .eager => unreachable,
+ .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
+ },
+ }
},
.array_sentinel => {
- const payload = self.castTag(.array_sentinel).?.data;
- const elem_size = payload.elem_type.abiSize(target);
- assert(elem_size >= payload.elem_type.abiAlignment(target));
- return (payload.len + 1) * elem_size;
+ const payload = ty.castTag(.array_sentinel).?.data;
+ switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
+ .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size },
+ .val => switch (strat) {
+ .sema_kit => unreachable,
+ .eager => unreachable,
+ .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
+ },
+ }
},
.isize,
@@ -3178,95 +3242,96 @@ pub const Type = extern union {
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
- => return @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
.const_slice,
.mut_slice,
.const_slice_u8,
.const_slice_u8_sentinel_0,
- => return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
+ => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
- .pointer => switch (self.castTag(.pointer).?.data.size) {
- .Slice => @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
- else => @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ .pointer => switch (ty.castTag(.pointer).?.data.size) {
+ .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
+ else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
},
- .c_short => return @divExact(CType.short.sizeInBits(target), 8),
- .c_ushort => return @divExact(CType.ushort.sizeInBits(target), 8),
- .c_int => return @divExact(CType.int.sizeInBits(target), 8),
- .c_uint => return @divExact(CType.uint.sizeInBits(target), 8),
- .c_long => return @divExact(CType.long.sizeInBits(target), 8),
- .c_ulong => return @divExact(CType.ulong.sizeInBits(target), 8),
- .c_longlong => return @divExact(CType.longlong.sizeInBits(target), 8),
- .c_ulonglong => return @divExact(CType.ulonglong.sizeInBits(target), 8),
+ .c_short => return AbiSizeAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
+ .c_ushort => return AbiSizeAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
+ .c_int => return AbiSizeAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
+ .c_uint => return AbiSizeAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
+ .c_long => return AbiSizeAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
+ .c_ulong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
+ .c_longlong => return AbiSizeAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
+ .c_ulonglong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
- .f16 => return 2,
- .f32 => return 4,
- .f64 => return 8,
- .f128 => return 16,
+ .f16 => return AbiSizeAdvanced{ .scalar = 2 },
+ .f32 => return AbiSizeAdvanced{ .scalar = 4 },
+ .f64 => return AbiSizeAdvanced{ .scalar = 8 },
+ .f128 => return AbiSizeAdvanced{ .scalar = 16 },
.f80 => switch (target.cpu.arch) {
- .i386 => return 12,
- .x86_64 => return 16,
+ .i386 => return AbiSizeAdvanced{ .scalar = 12 },
+ .x86_64 => return AbiSizeAdvanced{ .scalar = 16 },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = 80,
};
const u80_ty = initPayload(&payload.base);
- return abiSize(u80_ty, target);
+ return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
},
},
.c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
- 16 => return abiSize(Type.f16, target),
- 32 => return abiSize(Type.f32, target),
- 64 => return abiSize(Type.f64, target),
- 80 => return abiSize(Type.f80, target),
- 128 => return abiSize(Type.f128, target),
+ 16 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f16, target) },
+ 32 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f32, target) },
+ 64 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f64, target) },
+ 80 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f80, target) },
+ 128 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f128, target) },
else => unreachable,
},
+ // TODO revisit this when we have the concept of the error tag type
.error_set,
.error_set_single,
.anyerror_void_error_union,
.anyerror,
.error_set_inferred,
.error_set_merged,
- => return 2, // TODO revisit this when we have the concept of the error tag type
+ => return AbiSizeAdvanced{ .scalar = 2 },
- .i16, .u16 => return intAbiSize(16, target),
- .i32, .u32 => return intAbiSize(32, target),
- .i64, .u64 => return intAbiSize(64, target),
- .u128, .i128 => return intAbiSize(128, target),
+ .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) },
+ .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) },
+ .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) },
+ .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) },
.int_signed, .int_unsigned => {
- const bits: u16 = self.cast(Payload.Bits).?.data;
- if (bits == 0) return 0;
- return intAbiSize(bits, target);
+ const bits: u16 = ty.cast(Payload.Bits).?.data;
+ if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
+ return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) };
},
.optional => {
var buf: Payload.ElemType = undefined;
- const child_type = self.optionalChild(&buf);
- if (!child_type.hasRuntimeBits()) return 1;
+ const child_type = ty.optionalChild(&buf);
+ if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 };
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
- return @divExact(target.cpu.arch.ptrBitWidth(), 8);
+ return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
// Optional types are represented as a struct with the child type as the first
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
- return child_type.abiAlignment(target) + child_type.abiSize(target);
+ return AbiSizeAdvanced{ .scalar = child_type.abiAlignment(target) + child_type.abiSize(target) };
},
.error_union => {
- const data = self.castTag(.error_union).?.data;
+ const data = ty.castTag(.error_union).?.data;
if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) {
- return 0;
+ return AbiSizeAdvanced{ .scalar = 0 };
} else if (!data.error_set.hasRuntimeBits()) {
- return data.payload.abiSize(target);
+ return AbiSizeAdvanced{ .scalar = data.payload.abiSize(target) };
} else if (!data.payload.hasRuntimeBits()) {
- return data.error_set.abiSize(target);
+ return AbiSizeAdvanced{ .scalar = data.error_set.abiSize(target) };
}
const code_align = abiAlignment(data.error_set, target);
const payload_align = abiAlignment(data.payload, target);
@@ -3278,9 +3343,28 @@ pub const Type = extern union {
size = std.mem.alignForwardGeneric(u64, size, payload_align);
size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, big_align);
- return size;
+ return AbiSizeAdvanced{ .scalar = size };
},
- };
+ }
+ }
+
+ pub fn abiSizeAdvancedUnion(
+ ty: Type,
+ target: Target,
+ strat: AbiAlignmentAdvancedStrat,
+ union_obj: *Module.Union,
+ have_tag: bool,
+ ) Module.CompileError!AbiSizeAdvanced {
+ switch (strat) {
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
+ .lazy => |arena| {
+ if (!union_obj.haveLayout()) {
+ return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
+ }
+ },
+ .eager => {},
+ }
+ return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) };
}
fn intAbiSize(bits: u16, target: Target) u64 {
@@ -5448,73 +5532,6 @@ pub const Type = extern union {
}
}
- /// Asserts the type is an enum.
- pub fn enumHasInt(ty: Type, int: Value, mod: *Module) bool {
- const S = struct {
- fn intInRange(tag_ty: Type, int_val: Value, end: usize, m: *Module) bool {
- if (int_val.compareWithZero(.lt)) return false;
- var end_payload: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = end,
- };
- const end_val = Value.initPayload(&end_payload.base);
- if (int_val.compare(.gte, end_val, tag_ty, m)) return false;
- return true;
- }
- };
- switch (ty.tag()) {
- .enum_nonexhaustive => return int.intFitsInType(ty, mod.getTarget()),
- .enum_full => {
- const enum_full = ty.castTag(.enum_full).?.data;
- const tag_ty = enum_full.tag_ty;
- if (enum_full.values.count() == 0) {
- return S.intInRange(tag_ty, int, enum_full.fields.count(), mod);
- } else {
- return enum_full.values.containsContext(int, .{
- .ty = tag_ty,
- .mod = mod,
- });
- }
- },
- .enum_numbered => {
- const enum_obj = ty.castTag(.enum_numbered).?.data;
- const tag_ty = enum_obj.tag_ty;
- if (enum_obj.values.count() == 0) {
- return S.intInRange(tag_ty, int, enum_obj.fields.count(), mod);
- } else {
- return enum_obj.values.containsContext(int, .{
- .ty = tag_ty,
- .mod = mod,
- });
- }
- },
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- const fields_len = enum_simple.fields.count();
- const bits = std.math.log2_int_ceil(usize, fields_len);
- var buffer: Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = bits,
- };
- const tag_ty = Type.initPayload(&buffer.base);
- return S.intInRange(tag_ty, int, fields_len, mod);
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .prefetch_options,
- .export_options,
- .extern_options,
- => unreachable,
-
- else => unreachable,
- }
- }
-
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
diff --git a/src/value.zig b/src/value.zig
index 588c7d2832..1280adf1e0 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -179,6 +179,8 @@ pub const Value = extern union {
bound_fn,
/// The ABI alignment of the payload type.
lazy_align,
+ /// The ABI alignment of the payload type.
+ lazy_size,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -289,6 +291,7 @@ pub const Value = extern union {
.ty,
.lazy_align,
+ .lazy_size,
=> Payload.Ty,
.int_type => Payload.IntType,
@@ -460,7 +463,7 @@ pub const Value = extern union {
.bound_fn,
=> unreachable,
- .ty, .lazy_align => {
+ .ty, .lazy_align, .lazy_size => {
const payload = self.cast(Payload.Ty).?;
const new_payload = try arena.create(Payload.Ty);
new_payload.* = .{
@@ -720,6 +723,11 @@ pub const Value = extern union {
try val.castTag(.lazy_align).?.data.dump("", options, out_stream);
return try out_stream.writeAll(")");
},
+ .lazy_size => {
+ try out_stream.writeAll("@sizeOf(");
+ try val.castTag(.lazy_size).?.data.dump("", options, out_stream);
+ return try out_stream.writeAll(")");
+ },
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return out_stream.print("{s}{d}", .{
@@ -1040,6 +1048,14 @@ pub const Value = extern union {
const x = ty.abiAlignment(target);
return BigIntMutable.init(&space.limbs, x).toConst();
},
+ .lazy_size => {
+ const ty = val.castTag(.lazy_size).?.data;
+ if (sema_kit) |sk| {
+ try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
+ }
+ const x = ty.abiSize(target);
+ return BigIntMutable.init(&space.limbs, x).toConst();
+ },
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
@@ -1087,6 +1103,14 @@ pub const Value = extern union {
return ty.abiAlignment(target);
}
},
+ .lazy_size => {
+ const ty = val.castTag(.lazy_size).?.data;
+ if (sema_kit) |sk| {
+ return (try ty.abiSizeAdvanced(target, .{ .sema_kit = sk })).scalar;
+ } else {
+ return ty.abiSize(target);
+ }
+ },
else => return null,
}
@@ -1670,118 +1694,6 @@ pub const Value = extern union {
}
}
- /// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
- /// Vectors are also accepted. Vector results are reduced with AND.
- pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
- switch (self.tag()) {
- .zero,
- .undef,
- .bool_false,
- => return true,
-
- .one,
- .bool_true,
- => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- return switch (info.signedness) {
- .signed => info.bits >= 2,
- .unsigned => info.bits >= 1,
- };
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
-
- .lazy_align => {
- const info = ty.intInfo(target);
- const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
- // If it is u16 or bigger we know the alignment fits without resolving it.
- if (info.bits >= max_needed_bits) return true;
- const x = self.castTag(.lazy_align).?.data.abiAlignment(target);
- if (x == 0) return true;
- const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
- return info.bits >= actual_needed_bits;
- },
-
- .int_u64 => switch (ty.zigTypeTag()) {
- .Int => {
- const x = self.castTag(.int_u64).?.data;
- if (x == 0) return true;
- const info = ty.intInfo(target);
- const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
- return info.bits >= needed_bits;
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_i64 => switch (ty.zigTypeTag()) {
- .Int => {
- const x = self.castTag(.int_i64).?.data;
- if (x == 0) return true;
- const info = ty.intInfo(target);
- if (info.signedness == .unsigned and x < 0)
- return false;
- var buffer: BigIntSpace = undefined;
- return self.toBigInt(&buffer, target).fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_big_positive => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- return self.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_big_negative => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- return self.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
-
- .the_only_possible_value => {
- assert(ty.intInfo(target).bits == 0);
- return true;
- },
-
- .decl_ref_mut,
- .extern_fn,
- .decl_ref,
- .function,
- .variable,
- => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- const ptr_bits = target.cpu.arch.ptrBitWidth();
- return switch (info.signedness) {
- .signed => info.bits > ptr_bits,
- .unsigned => info.bits >= ptr_bits,
- };
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
-
- .aggregate => {
- assert(ty.zigTypeTag() == .Vector);
- for (self.castTag(.aggregate).?.data) |elem| {
- if (!elem.intFitsInType(ty.scalarType(), target)) {
- return false;
- }
- }
- return true;
- },
-
- else => unreachable,
- }
- }
-
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
@@ -1849,6 +1761,14 @@ pub const Value = extern union {
return .eq;
}
},
+ .lazy_size => {
+ const ty = lhs.castTag(.lazy_size).?.data;
+ if (try ty.hasRuntimeBitsAdvanced(false, sema_kit)) {
+ return .gt;
+ } else {
+ return .eq;
+ }
+ },
.float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0),
.float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0),
@@ -1992,38 +1912,28 @@ pub const Value = extern union {
};
}
- /// Asserts the values are comparable vectors of type `ty`.
- pub fn compareVector(
- lhs: Value,
- op: std.math.CompareOperator,
- rhs: Value,
- ty: Type,
- allocator: Allocator,
- mod: *Module,
- ) !Value {
- assert(ty.zigTypeTag() == .Vector);
- const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod);
- scalar.* = makeBool(res_bool);
- }
- return Value.Tag.aggregate.create(allocator, result_data);
- }
-
/// Asserts the value is comparable.
/// Vector results will be reduced with AND.
pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool {
+ return compareWithZeroAdvanced(lhs, op, null) catch unreachable;
+ }
+
+ pub fn compareWithZeroAdvanced(
+ lhs: Value,
+ op: std.math.CompareOperator,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!bool {
switch (lhs.tag()) {
- .repeated => return lhs.castTag(.repeated).?.data.compareWithZero(op),
+ .repeated => return lhs.castTag(.repeated).?.data.compareWithZeroAdvanced(op, sema_kit),
.aggregate => {
for (lhs.castTag(.aggregate).?.data) |elem_val| {
- if (!elem_val.compareWithZero(op)) return false;
+ if (!(try elem_val.compareWithZeroAdvanced(op, sema_kit))) return false;
}
return true;
},
else => {},
}
- return orderAgainstZero(lhs).compare(op);
+ return (try orderAgainstZeroAdvanced(lhs, sema_kit)).compare(op);
}
/// This function is used by hash maps and so treats floating-point NaNs as equal
@@ -2032,9 +1942,20 @@ pub const Value = extern union {
/// This function has to be able to support implicit coercion of `a` to `ty`. That is,
/// `ty` will be an exactly correct Type for `b` but it may be a post-coerced Type
/// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication
- /// is required in order to make generic function instantiation effecient - specifically
+ /// is required in order to make generic function instantiation efficient - specifically
/// the insertion into the monomorphized function table.
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
+ return eqlAdvanced(a, b, ty, mod, null) catch unreachable;
+ }
+
+ /// If `null` is provided for `sema_kit` then it is guaranteed no error will be returned.
+ pub fn eqlAdvanced(
+ a: Value,
+ b: Value,
+ ty: Type,
+ mod: *Module,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!bool {
const target = mod.getTarget();
const a_tag = a.tag();
const b_tag = b.tag();
@@ -2055,31 +1976,33 @@ pub const Value = extern union {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
var buffer: Type.Payload.ElemType = undefined;
- return eql(a_payload, b_payload, ty.optionalChild(&buffer), mod);
+ return eqlAdvanced(a_payload, b_payload, ty.optionalChild(&buffer), mod, sema_kit);
},
.slice => {
const a_payload = a.castTag(.slice).?.data;
const b_payload = b.castTag(.slice).?.data;
- if (!eql(a_payload.len, b_payload.len, Type.usize, mod)) return false;
+ if (!(try eqlAdvanced(a_payload.len, b_payload.len, Type.usize, mod, sema_kit))) {
+ return false;
+ }
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- return eql(a_payload.ptr, b_payload.ptr, ptr_ty, mod);
+ return eqlAdvanced(a_payload.ptr, b_payload.ptr, ptr_ty, mod, sema_kit);
},
.elem_ptr => {
const a_payload = a.castTag(.elem_ptr).?.data;
const b_payload = b.castTag(.elem_ptr).?.data;
if (a_payload.index != b_payload.index) return false;
- return eql(a_payload.array_ptr, b_payload.array_ptr, ty, mod);
+ return eqlAdvanced(a_payload.array_ptr, b_payload.array_ptr, ty, mod, sema_kit);
},
.field_ptr => {
const a_payload = a.castTag(.field_ptr).?.data;
const b_payload = b.castTag(.field_ptr).?.data;
if (a_payload.field_index != b_payload.field_index) return false;
- return eql(a_payload.container_ptr, b_payload.container_ptr, ty, mod);
+ return eqlAdvanced(a_payload.container_ptr, b_payload.container_ptr, ty, mod, sema_kit);
},
.@"error" => {
const a_name = a.castTag(.@"error").?.data.name;
@@ -2089,7 +2012,7 @@ pub const Value = extern union {
.eu_payload => {
const a_payload = a.castTag(.eu_payload).?.data;
const b_payload = b.castTag(.eu_payload).?.data;
- return eql(a_payload, b_payload, ty.errorUnionPayload(), mod);
+ return eqlAdvanced(a_payload, b_payload, ty.errorUnionPayload(), mod, sema_kit);
},
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
.opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@@ -2107,7 +2030,9 @@ pub const Value = extern union {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types) |field_ty, i| {
- if (!eql(a_field_vals[i], b_field_vals[i], field_ty, mod)) return false;
+ if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field_ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
}
@@ -2116,7 +2041,9 @@ pub const Value = extern union {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
- if (!eql(a_field_vals[i], b_field_vals[i], field.ty, mod)) return false;
+ if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field.ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
}
@@ -2125,7 +2052,9 @@ pub const Value = extern union {
for (a_field_vals) |a_elem, i| {
const b_elem = b_field_vals[i];
- if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
+ if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
},
@@ -2135,7 +2064,7 @@ pub const Value = extern union {
switch (ty.containerLayout()) {
.Packed, .Extern => {
const tag_ty = ty.unionTagTypeHypothetical();
- if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
+ if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
// In this case, we must disregard mismatching tags and compare
// based on the in-memory bytes of the payloads.
@panic("TODO comptime comparison of extern union values with mismatching tags");
@@ -2143,13 +2072,13 @@ pub const Value = extern union {
},
.Auto => {
const tag_ty = ty.unionTagTypeHypothetical();
- if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
+ if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
return false;
}
},
}
const active_field_ty = ty.unionFieldType(a_union.tag, mod);
- return a_union.val.eql(b_union.val, active_field_ty, mod);
+ return a_union.val.eqlAdvanced(b_union.val, active_field_ty, mod, sema_kit);
},
else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
@@ -2183,7 +2112,7 @@ pub const Value = extern union {
const b_val = b.enumToInt(ty, &buf_b);
var buf_ty: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf_ty);
- return eql(a_val, b_val, int_ty, mod);
+ return eqlAdvanced(a_val, b_val, int_ty, mod, sema_kit);
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -2194,7 +2123,9 @@ pub const Value = extern union {
while (i < len) : (i += 1) {
const a_elem = elemValueBuffer(a, mod, i, &a_buf);
const b_elem = elemValueBuffer(b, mod, i, &b_buf);
- if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
+ if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
},
@@ -2218,12 +2149,12 @@ pub const Value = extern union {
.base = .{ .tag = .opt_payload },
.data = a,
};
- return eql(Value.initPayload(&buffer.base), b, ty, mod);
+ return eqlAdvanced(Value.initPayload(&buffer.base), b, ty, mod, sema_kit);
}
},
else => {},
}
- return order(a, b, target).compare(.eq);
+ return (try orderAdvanced(a, b, target, sema_kit)).compare(.eq);
}
/// This function is used by hash maps and so treats floating-point NaNs as equal
@@ -2502,6 +2433,7 @@ pub const Value = extern union {
.bool_true,
.the_only_possible_value,
.lazy_align,
+ .lazy_size,
=> return hashInt(ptr_val, hasher, target),
else => unreachable,
@@ -2882,54 +2814,6 @@ pub const Value = extern union {
}
}
- pub fn floatToInt(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
- if (float_ty.zigTypeTag() == .Vector) {
- const result_data = try arena.alloc(Value, float_ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), target);
- }
- return Value.Tag.aggregate.create(arena, result_data);
- }
- return floatToIntScalar(val, arena, int_ty, target);
- }
-
- pub fn floatToIntScalar(val: Value, arena: Allocator, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
- const Limb = std.math.big.Limb;
-
- var value = val.toFloat(f64); // TODO: f128 ?
- if (std.math.isNan(value) or std.math.isInf(value)) {
- return error.FloatCannotFit;
- }
-
- const isNegative = std.math.signbit(value);
- value = @fabs(value);
-
- const floored = @floor(value);
-
- var rational = try std.math.big.Rational.init(arena);
- defer rational.deinit();
- rational.setFloat(f64, floored) catch |err| switch (err) {
- error.NonFiniteFloat => unreachable,
- error.OutOfMemory => return error.OutOfMemory,
- };
-
- // The float is reduced in rational.setFloat, so we assert that denominator is equal to one
- const bigOne = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
- assert(rational.q.toConst().eqAbs(bigOne));
-
- const result_limbs = try arena.dupe(Limb, rational.p.toConst().limbs);
- const result = if (isNegative)
- try Value.Tag.int_big_negative.create(arena, result_limbs)
- else
- try Value.Tag.int_big_positive.create(arena, result_limbs);
-
- if (result.intFitsInType(int_ty, target)) {
- return result;
- } else {
- return error.FloatCannotFit;
- }
- }
-
fn calcLimbLenFloat(scalar: anytype) usize {
if (scalar == 0) {
return 1;
@@ -2945,96 +2829,7 @@ pub const Value = extern union {
wrapped_result: Value,
};
- pub fn intAddWithOverflow(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !OverflowArithmeticResult {
- if (ty.zigTypeTag() == .Vector) {
- const overflowed_data = try arena.alloc(Value, ty.vectorLen());
- const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- const of_math_result = try intAddWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
- overflowed_data[i] = of_math_result.overflowed;
- scalar.* = of_math_result.wrapped_result;
- }
- return OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
- .wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
- };
- }
- return intAddWithOverflowScalar(lhs, rhs, ty, arena, target);
- }
-
- pub fn intAddWithOverflowScalar(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !OverflowArithmeticResult {
- const info = ty.intInfo(target);
-
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs = try arena.alloc(
- std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const result = try fromBigInt(arena, result_bigint.toConst());
- return OverflowArithmeticResult{
- .overflowed = makeBool(overflowed),
- .wrapped_result = result,
- };
- }
-
- /// Supports both (vectors of) floats and ints; handles undefined scalars.
- pub fn numberAddWrap(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
- }
- return Value.Tag.aggregate.create(arena, result_data);
- }
- return numberAddWrapScalar(lhs, rhs, ty, arena, target);
- }
-
- /// Supports both floats and ints; handles undefined.
- pub fn numberAddWrapScalar(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
-
- if (ty.zigTypeTag() == .ComptimeInt) {
- return intAdd(lhs, rhs, ty, arena, target);
- }
-
- if (ty.isAnyFloat()) {
- return floatAdd(lhs, rhs, ty, arena, target);
- }
-
- const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target);
- return overflow_result.wrapped_result;
- }
-
- fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
+ pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
if (big_int.positive) {
if (big_int.to(u64)) |x| {
return Value.Tag.int_u64.create(arena, x);
@@ -3094,95 +2889,6 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intSubWithOverflow(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !OverflowArithmeticResult {
- if (ty.zigTypeTag() == .Vector) {
- const overflowed_data = try arena.alloc(Value, ty.vectorLen());
- const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- const of_math_result = try intSubWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
- overflowed_data[i] = of_math_result.overflowed;
- scalar.* = of_math_result.wrapped_result;
- }
- return OverflowArithmeticResult{
- .overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
- .wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
- };
- }
- return intSubWithOverflowScalar(lhs, rhs, ty, arena, target);
- }
-
- pub fn intSubWithOverflowScalar(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !OverflowArithmeticResult {
- const info = ty.intInfo(target);
-
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs = try arena.alloc(
- std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const wrapped_result = try fromBigInt(arena, result_bigint.toConst());
- return OverflowArithmeticResult{
- .overflowed = makeBool(overflowed),
- .wrapped_result = wrapped_result,
- };
- }
-
- /// Supports both (vectors of) floats and ints; handles undefined scalars.
- pub fn numberSubWrap(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
- }
- return Value.Tag.aggregate.create(arena, result_data);
- }
- return numberSubWrapScalar(lhs, rhs, ty, arena, target);
- }
-
- /// Supports both floats and ints; handles undefined.
- pub fn numberSubWrapScalar(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
-
- if (ty.zigTypeTag() == .ComptimeInt) {
- return intSub(lhs, rhs, ty, arena, target);
- }
-
- if (ty.isAnyFloat()) {
- return floatSub(lhs, rhs, ty, arena, target);
- }
-
- const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target);
- return overflow_result.wrapped_result;
- }
-
/// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intSubSat(
lhs: Value,
@@ -3559,60 +3265,6 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intAdd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
- }
- return Value.Tag.aggregate.create(allocator, result_data);
- }
- return intAddScalar(lhs, rhs, allocator, target);
- }
-
- pub fn intAddScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.add(lhs_bigint, rhs_bigint);
- return fromBigInt(allocator, result_bigint.toConst());
- }
-
- pub fn intSub(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
- }
- return Value.Tag.aggregate.create(allocator, result_data);
- }
- return intSubScalar(lhs, rhs, allocator, target);
- }
-
- pub fn intSubScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.sub(lhs_bigint, rhs_bigint);
- return fromBigInt(allocator, result_bigint.toConst());
- }
-
pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
@@ -4129,114 +3781,6 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn floatAdd(
- lhs: Value,
- rhs: Value,
- float_type: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (float_type.zigTypeTag() == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
- }
- return Value.Tag.aggregate.create(arena, result_data);
- }
- return floatAddScalar(lhs, rhs, float_type, arena, target);
- }
-
- pub fn floatAddScalar(
- lhs: Value,
- rhs: Value,
- float_type: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- switch (float_type.floatBits(target)) {
- 16 => {
- const lhs_val = lhs.toFloat(f16);
- const rhs_val = rhs.toFloat(f16);
- return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
- },
- 32 => {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- return Value.Tag.float_32.create(arena, lhs_val + rhs_val);
- },
- 64 => {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- return Value.Tag.float_64.create(arena, lhs_val + rhs_val);
- },
- 80 => {
- const lhs_val = lhs.toFloat(f80);
- const rhs_val = rhs.toFloat(f80);
- return Value.Tag.float_80.create(arena, lhs_val + rhs_val);
- },
- 128 => {
- const lhs_val = lhs.toFloat(f128);
- const rhs_val = rhs.toFloat(f128);
- return Value.Tag.float_128.create(arena, lhs_val + rhs_val);
- },
- else => unreachable,
- }
- }
-
- pub fn floatSub(
- lhs: Value,
- rhs: Value,
- float_type: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (float_type.zigTypeTag() == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
- }
- return Value.Tag.aggregate.create(arena, result_data);
- }
- return floatSubScalar(lhs, rhs, float_type, arena, target);
- }
-
- pub fn floatSubScalar(
- lhs: Value,
- rhs: Value,
- float_type: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- switch (float_type.floatBits(target)) {
- 16 => {
- const lhs_val = lhs.toFloat(f16);
- const rhs_val = rhs.toFloat(f16);
- return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
- },
- 32 => {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- return Value.Tag.float_32.create(arena, lhs_val - rhs_val);
- },
- 64 => {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- return Value.Tag.float_64.create(arena, lhs_val - rhs_val);
- },
- 80 => {
- const lhs_val = lhs.toFloat(f80);
- const rhs_val = rhs.toFloat(f80);
- return Value.Tag.float_80.create(arena, lhs_val - rhs_val);
- },
- 128 => {
- const lhs_val = lhs.toFloat(f128);
- const rhs_val = rhs.toFloat(f128);
- return Value.Tag.float_128.create(arena, lhs_val - rhs_val);
- },
- else => unreachable,
- }
- }
-
pub fn floatNeg(
val: Value,
float_type: Type,