diff options
| -rw-r--r-- | src/Air.zig | 2 | ||||
| -rw-r--r-- | src/Module.zig | 12 | ||||
| -rw-r--r-- | src/Sema.zig | 257 | ||||
| -rw-r--r-- | src/TypedValue.zig | 13 | ||||
| -rw-r--r-- | src/arch/aarch64/CodeGen.zig | 4 | ||||
| -rw-r--r-- | src/arch/arm/CodeGen.zig | 4 | ||||
| -rw-r--r-- | src/arch/riscv64/CodeGen.zig | 4 | ||||
| -rw-r--r-- | src/arch/sparc64/CodeGen.zig | 4 | ||||
| -rw-r--r-- | src/arch/wasm/CodeGen.zig | 10 | ||||
| -rw-r--r-- | src/arch/x86_64/CodeGen.zig | 12 | ||||
| -rw-r--r-- | src/codegen.zig | 8 | ||||
| -rw-r--r-- | src/codegen/c.zig | 64 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 18 | ||||
| -rw-r--r-- | src/codegen/spirv.zig | 10 | ||||
| -rw-r--r-- | src/type.zig | 44 | ||||
| -rw-r--r-- | src/value.zig | 43 |
16 files changed, 286 insertions, 223 deletions
diff --git a/src/Air.zig b/src/Air.zig index 549583e697..8059b9e57f 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1485,7 +1485,7 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); diff --git a/src/Module.zig b/src/Module.zig index f56235c933..3f5dc8039e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5750,7 +5750,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue(mod)) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -6887,6 +6887,16 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.debug.runtime_safety) { + // TODO: decide if this also works for ABI int types like enums + const tag = ty.zigTypeTag(mod); + assert(tag == .Int or tag == .ComptimeInt); + } + if (@TypeOf(x) == comptime_int) { + if (comptime std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (comptime std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + @compileError("Out-of-range comptime_int passed to Module.intValue"); + } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index dc5bb1cdea..9b1da74982 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3062,9 +3062,9 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) else - Value.zero; + try mod.intValue(enum_obj.tag_ty, 0); last_tag_val = tag_val; const copied_tag_val = try tag_val.copy(decl_arena_allocator); const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ @@ -4709,7 +4709,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(mod, i)) |opv| { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -8132,7 +8132,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (!op_ty.isAnyError()) { const names = op_ty.errorSetNames(); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), else => {}, } @@ -8167,7 +8167,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -9656,7 +9656,7 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ @@ -9665,7 +9665,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9705,8 +9705,9 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); } else dest_max_val; const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -9747,7 +9748,7 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9759,7 +9760,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -11250,7 +11251,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one, operand_ty); + item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty); }) { cases_len += 1; @@ -11696,7 +11697,7 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { @@ -11705,7 +11706,7 @@ const RangeSetUnhandledIterator = struct { it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { @@ -12150,7 +12151,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12235,14 +12236,14 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12348,7 +12349,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12417,14 +12418,14 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -13156,9 +13157,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13180,9 +13181,9 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13293,9 +13294,14 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13318,7 +13324,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13427,9 +13433,14 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13507,8 +13518,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13519,7 +13535,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13592,9 +13608,14 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13612,7 +13633,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13708,9 +13729,14 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13727,7 +13753,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13862,8 +13888,9 @@ fn addDivByZeroSafety( if (maybe_rhs_val != null) return; const mod = sema.mod; + const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else Value.float_zero; // TODO migrate to internpool const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13874,7 +13901,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13946,9 +13973,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14325,6 +14357,7 @@ fn zirOverflowArithmetic( wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero = try mod.intValue(dest_ty.scalarType(mod), 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -14332,12 +14365,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14358,7 +14391,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14373,12 +14406,13 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } } @@ -14386,9 +14420,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef()) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } } @@ -14410,12 +14444,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14766,6 +14800,11 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.isNan()) { @@ -14783,11 +14822,11 @@ fn analyzeArithmetic( break :lz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14813,11 +14852,11 @@ fn analyzeArithmetic( break :rz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14849,15 +14888,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14869,11 +14913,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14892,15 +14936,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14911,11 +14960,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14968,7 +15017,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -15785,7 +15834,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15798,7 +15847,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16148,7 +16197,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16256,7 +16305,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16344,7 +16393,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16454,7 +16503,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16496,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16692,7 +16741,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -17884,7 +17933,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -18544,8 +18593,8 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool(mod)) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -19761,7 +19810,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19804,17 +19853,17 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -21398,7 +21447,7 @@ fn analyzeShuffle( expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); } const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); @@ -24504,7 +24553,7 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { - if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24815,7 +24864,7 @@ fn tupleFieldValByIndex( const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24828,7 +24877,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -25205,7 +25254,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25256,13 +25305,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25812,7 +25861,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25879,7 +25928,7 @@ fn coerceExtra( try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = Value.zero, + .len = try mod.intValue(Type.usize, 0), }); return sema.addConstant(dest_ty, slice_val); } @@ -28234,7 +28283,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -32532,9 +32581,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try val.copy(decl_arena_allocator); } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; break :blk try val.copy(decl_arena_allocator); @@ -32903,7 +32952,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -33049,7 +33098,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; } @@ -33066,7 +33115,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { switch (enum_obj.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; }, @@ -33078,14 +33127,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 28212a164c..828fb610d4 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,7 +73,7 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -95,7 +98,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -112,7 +115,7 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -129,7 +132,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 95a8350c7d..ea3814a20e 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4311,7 +4311,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6154,7 +6154,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cc2bc3a613..967a6dd753 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4291,7 +4291,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6101,7 +6101,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1008d527f6..5cf621488e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1743,7 +1743,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -2551,7 +2551,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4231222d4b..2cb35460c2 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1343,7 +1343,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4575,7 +4575,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, }); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 327e2c13e0..36b805cf94 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -789,7 +789,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { assert(!gop.found_existing); const mod = func.bin_file.base.options.module.?; - const val = func.air.value(ref, mod).?; + const val = (try func.air.value(ref, mod)).?; const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; @@ -2195,7 +2195,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); @@ -3138,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.zero; + const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3792,7 +3792,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref, mod).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -5048,7 +5048,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51c6bc79e6..b208656a41 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2768,7 +2768,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const full_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), - .child = src_ty.childType(mod).ip_index, + .child = elem_ty.ip_index, }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -11265,7 +11265,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); @@ -11337,7 +11337,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); @@ -11601,7 +11601,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, })); break :tracking gop.value_ptr; }, @@ -11614,7 +11614,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { diff --git a/src/codegen.zig b/src/codegen.zig index 9c9868892f..8bd478bf7c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -675,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.zero else typed_value.val; + const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -690,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -722,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1280,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.zero; + const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9443c2298a..aaeec05562 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -287,7 +287,7 @@ pub const Function = struct { if (gop.found_existing) return gop.value_ptr.*; const mod = f.object.dg.module; - const val = f.air.value(ref, mod).?; + const val = (try f.air.value(ref, mod)).?; const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { @@ -356,7 +356,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -369,7 +369,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -383,7 +383,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -397,7 +397,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -690,7 +690,7 @@ pub const DeclGen = struct { location, ); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } @@ -1253,7 +1253,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; + const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); @@ -3611,7 +3611,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4183,7 +4183,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4269,7 +4269,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4735,7 +4735,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -5069,7 +5069,7 @@ fn airIsNull( // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = Value.zero } + TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); @@ -5325,7 +5325,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { }, .end => { try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5378,7 +5378,7 @@ fn fieldPtr( .end => { try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5546,7 +5546,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer); } try writer.writeAll(";\n"); return local; @@ -5673,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n "); return operand; @@ -5681,7 +5681,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) @@ -5737,7 +5737,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, err_ty, try mod.intValue(err_ty, 0), .Other); try a.end(f, writer); } return local; @@ -5768,11 +5768,11 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); return local; } @@ -5798,7 +5798,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); - try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); @@ -6022,7 +6022,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.i32, Value.zero), + try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6278,7 +6278,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6326,7 +6326,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6677,27 +6677,27 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => Value.zero, + .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { .unsigned => try scalar_ty.maxIntScalar(mod), - .signed => Value.negative_one, + .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.zero, + .Bool => try mod.intValue(scalar_ty, 0), .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Mul => Value.one, + .Mul => try mod.intValue(Type.comptime_int, 1), }, .Initializer); try writer.writeAll(";\n"); @@ -7686,13 +7686,13 @@ const Vectorize = struct { try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9d8c3edaf5..c42719d07c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2854,7 +2854,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(mod); - assert(elem_ty.onePossibleValue(mod) == null); + if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -3588,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.zero; + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.zero else tv.val, + .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -4476,7 +4476,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.typeOf(inst), - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -6873,7 +6873,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -8203,7 +8203,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8494,7 +8494,7 @@ pub const FuncGen = struct { const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); - if (self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9323,7 +9323,7 @@ pub const FuncGen = struct { var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; @@ -9344,7 +9344,7 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32e0c13c37..3842da5f7b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -232,7 +232,7 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; - if (self.air.value(inst, mod)) |val| { + if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.zero); + try self.addInt(Type.usize, Value.zero_usize); // TODO: Add dependency return; }, @@ -803,7 +803,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.zero; + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2801,7 +2801,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -3141,7 +3141,7 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item, mod) orelse { + const value = (try self.air.value(item, mod)) orelse { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { diff --git a/src/type.zig b/src/type.zig index e60d216085..e6d0af9f46 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3377,7 +3377,7 @@ pub const Type = struct { } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *const Module) Type { + pub fn scalarType(ty: Type, mod: *Module) Type { return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(mod), else => ty, @@ -3941,13 +3941,13 @@ pub const Type = struct { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -3956,13 +3956,13 @@ pub const Type = struct { .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); - if (array_type.child.toType().onePossibleValue(mod) != null) + if ((try array_type.child.toType().onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); - if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; }, .opt_type => |child| { @@ -4055,7 +4055,7 @@ pub const Type = struct { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue(mod) != null) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4066,7 +4066,7 @@ pub const Type = struct { for (tuple.values, 0..) |val, i| { const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue(mod) != null) continue; + if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4089,7 +4089,7 @@ pub const Type = struct { switch (enum_full.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_full.values.keys()[0]; }, @@ -4100,24 +4100,24 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasRuntimeBits(mod)) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue(mod) orelse return null; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -4128,7 +4128,7 @@ pub const Type = struct { .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.childType(mod).onePossibleValue(mod) != null) + if ((try ty.childType(mod).onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4365,8 +4365,8 @@ pub const Type = struct { /// Asserts that the type is an integer. pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return Value.zero; - if (info.bits == 0) return Value.negative_one; + if (info.signedness == .unsigned) return mod.intValue(ty, 0); + if (info.bits == 0) return mod.intValue(ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); @@ -4392,17 +4392,17 @@ pub const Type = struct { } /// Asserts that the type is an integer. - pub fn maxIntScalar(self: Type, mod: *Module) !Value { - const info = self.intInfo(mod); + pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => Value.negative_one, - .unsigned => Value.zero, + .signed => mod.intValue(ty, -1), + .unsigned => mod.intValue(ty, 0), }, 1 => return switch (info.signedness) { - .signed => Value.zero, - .unsigned => Value.one, + .signed => mod.intValue(ty, 0), + .unsigned => mod.intValue(ty, 0), }, else => {}, } @@ -4662,7 +4662,7 @@ pub const Type = struct { } } - pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; diff --git a/src/value.zig b/src/value.zig index eced9ba345..8268d1dde1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1022,7 +1022,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { - return writeToMemory(Value.zero, Type.usize, mod, buffer); + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); } }, else => return error.Unimplemented, @@ -1124,7 +1124,7 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1141,7 +1141,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { - return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -1173,7 +1173,7 @@ pub const Value = struct { const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return Value.zero; + if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { @@ -1290,12 +1290,12 @@ pub const Value = struct { } }, .Int, .Enum => { - if (buffer.len == 0) return Value.zero; + if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), @@ -2091,11 +2091,11 @@ pub const Value = struct { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue(mod) != null; + return (try ty.onePossibleValue(mod)) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue(mod) != null) { + if ((try ty.onePossibleValue(mod)) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2604,7 +2604,7 @@ pub const Value = struct { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; }, @@ -2723,7 +2723,7 @@ pub const Value = struct { }; } - pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2737,14 +2737,14 @@ pub const Value = struct { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(mod, index)) |some| { + if (try ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -2968,7 +2968,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .the_only_possible_value => return Value.zero, // for i0, u0 + .the_only_possible_value => return Value.float_zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3402,7 +3402,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3803,7 +3803,7 @@ pub const Value = struct { bits: u16, mod: *Module, ) !Value { - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, mod); @@ -4011,9 +4011,9 @@ pub const Value = struct { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return Value.zero; + return mod.intValue(ty, 0); } else { - return Value.negative_one; + return mod.intValue(ty, -1); } } @@ -5151,10 +5151,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; - pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; - pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; + pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; + pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; @@ -5169,7 +5168,9 @@ pub const Value = struct { } pub fn boolToInt(x: bool) Value { - return if (x) Value.one else Value.zero; + const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + const one: Value = .{ .ip_index = .one, .legacy = undefined }; + return if (x) one else zero; } pub const RuntimeIndex = enum(u32) { |
