diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-05-06 19:20:52 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-06-10 20:42:29 -0700 |
| commit | 75900ec1b5a250935a6abe050a006738fba99e66 (patch) | |
| tree | 9d3dd571b59648a585c3ce5bcdb2dbb50a574d58 | |
| parent | 73720b6975e2650ece48cc5f38495c091360c6c9 (diff) | |
| download | zig-75900ec1b5a250935a6abe050a006738fba99e66.tar.gz zig-75900ec1b5a250935a6abe050a006738fba99e66.zip | |
stage2: move integer values to InternPool
| -rw-r--r-- | src/Air.zig | 1 | ||||
| -rw-r--r-- | src/InternPool.zig | 18 | ||||
| -rw-r--r-- | src/Module.zig | 73 | ||||
| -rw-r--r-- | src/RangeSet.zig | 6 | ||||
| -rw-r--r-- | src/Sema.zig | 738 | ||||
| -rw-r--r-- | src/TypedValue.zig | 21 | ||||
| -rw-r--r-- | src/Zir.zig | 1 | ||||
| -rw-r--r-- | src/arch/wasm/CodeGen.zig | 36 | ||||
| -rw-r--r-- | src/arch/x86_64/CodeGen.zig | 22 | ||||
| -rw-r--r-- | src/codegen.zig | 65 | ||||
| -rw-r--r-- | src/codegen/c.zig | 240 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 105 | ||||
| -rw-r--r-- | src/codegen/spirv.zig | 46 | ||||
| -rw-r--r-- | src/link/Dwarf.zig | 3 | ||||
| -rw-r--r-- | src/type.zig | 89 | ||||
| -rw-r--r-- | src/value.zig | 1431 |
16 files changed, 1168 insertions, 1727 deletions
diff --git a/src/Air.zig b/src/Air.zig index 43fc55e811..549583e697 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -913,6 +913,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/InternPool.zig b/src/InternPool.zig index fec5e721d0..d2f3bf81fe 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -390,6 +390,8 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, + /// `-1` (comptime_int) + negative_one, /// `std.builtin.CallingConvention.C` calling_convention_c, /// `std.builtin.CallingConvention.Inline` @@ -624,6 +626,11 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = -1 }, + } }, + .{ .enum_tag = .{ .ty = .calling_convention_type, .tag = .{ @@ -999,23 +1006,23 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), - .int_u32 => return .{ .int = .{ + .int_u32 => .{ .int = .{ .ty = .u32_type, .storage = .{ .u64 = data }, } }, - .int_i32 => return .{ .int = .{ + .int_i32 => .{ .int = .{ .ty = .i32_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, - .int_usize => return .{ .int = .{ + .int_usize => .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_u32 => return .{ .int = .{ + .int_comptime_int_u32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_i32 => return .{ .int = .{ + .int_comptime_int_i32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, @@ -1137,6 +1144,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { + .none => unreachable, .u32_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { diff --git a/src/Module.zig b/src/Module.zig index 01e2403377..9315c9efa7 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6597,7 +6597,7 @@ pub fn populateTestFunctions( field_vals.* = .{ try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), + .len = try mod.intValue(Type.usize, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl_index), // func Value.null, // async_frame_size @@ -6628,7 +6628,7 @@ pub fn populateTestFunctions( new_var.* = decl.val.castTag(.variable).?.data.*; new_var.init = try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), + .len = try mod.intValue(Type.usize, mod.test_functions.count()), }); const new_val = try Value.Tag.variable.create(arena, new_var); @@ -6875,6 +6875,38 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return intValue_big(mod, ty, big_int.toConst()); +} + +pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .big_int = x }, + } }); + return i.toValue(); +} + +pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .u64 = x }, + } }); + return i.toValue(); +} + +pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .i64 = x }, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -6907,32 +6939,27 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + + const key = mod.intern_pool.indexToKey(val.ip_index); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; }, - else => { - const x = val.toUnsignedInt(mod); + .u64 => |x| { return Type.smallestUnsignedBits(x) + @boolToInt(sign); }, + .big_int => |big| { + if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqZero()) return 0; + + return @intCast(u16, big.bitCountTwosComp()); + }, } } diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 2e28a562c6..a015c7b568 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -35,8 +35,8 @@ pub fn add( src: SwitchProngSrc, ) !?SwitchProngSrc { for (self.ranges.items) |range| { - if (last.compareAll(.gte, range.first, ty, self.module) and - first.compareAll(.lte, range.last, ty, self.module)) + if (last.compareScalar(.gte, range.first, ty, self.module) and + first.compareScalar(.lte, range.last, ty, self.module)) { return range.src; // They overlap. } @@ -53,7 +53,7 @@ const LessThanContext = struct { ty: Type, module: *Module }; /// Assumes a and b do not overlap fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module); + return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module); } pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index 43aa7e056e..3aa845c10b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2995,7 +2995,6 @@ fn zirEnumDecl( var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; - var tag_val_buf: Value.Payload.U64 = undefined; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; @@ -3084,11 +3083,7 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } } else { - tag_val_buf = .{ - .base = .{ .tag = .int_u64 }, - .data = field_i, - }; - last_tag_val = Value.initPayload(&tag_val_buf.base); + last_tag_val = try mod.intValue(enum_obj.tag_ty, field_i); } if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { @@ -5180,16 +5175,23 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const arena = sema.arena; + const mod = sema.mod; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; - const limbs = try arena.alloc(std.math.big.Limb, int.len); + + // TODO: this allocation and copy is only needed because the limbs may be unaligned. + // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these + // two lines can be removed. + const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( Type.comptime_int, - try Value.Tag.int_big_positive.create(arena, limbs), + try mod.intValue_big(Type.comptime_int, .{ + .limbs = limbs, + .positive = true, + }), ); } @@ -8095,6 +8097,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8107,12 +8110,13 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } switch (val.tag()) { .@"error" => { - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(Type.err_int, Value.initPayload(&payload.base)); + return sema.addConstant( + Type.err_int, + try mod.intValue( + Type.err_int, + (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, + ), + ); }, // This is not a valid combination with the type `anyerror`. @@ -8280,8 +8284,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { - var buffer: Value.Payload.U64 = undefined; - const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); + const val = try enum_tag_val.enumToInt(enum_tag_ty, mod); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } @@ -9685,7 +9688,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9946,7 +9949,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, target)); + return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -10470,7 +10473,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, sema.mod); + var range_set = RangeSet.init(gpa, mod); var true_count: u8 = 0; var false_count: u8 = 0; @@ -10596,11 +10599,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .{field_name}, ); } - try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(sema.mod), + try mod.errNoteNonLazy( + operand_ty.declSrcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); break :msg msg; }; @@ -10827,7 +10830,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const min_int = try operand_ty.minInt(arena.allocator(), mod); - const max_int = try operand_ty.maxInt(arena.allocator(), mod); + const max_int = try operand_ty.maxIntScalar(mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -10926,13 +10929,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty, - .mod = sema.mod, + .mod = mod, }); defer seen_values.deinit(); @@ -10996,7 +10999,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } @@ -11054,7 +11057,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11080,7 +11083,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11128,7 +11131,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and + if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11182,7 +11185,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11245,9 +11248,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item_last_ref = try sema.resolveInst(last_ref); const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareAll(.lte, item_last, operand_ty, sema.mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one); + item = try sema.intAddScalar(item, Value.one, operand_ty); }) { cases_len += 1; @@ -11260,7 +11263,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11289,14 +11292,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11333,7 +11336,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else @@ -11461,7 +11464,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (seen_enum_fields, 0..) |f, i| { @@ -11476,7 +11479,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = child_block.wip_capture_scope; const analyze_body = if (union_originally) blk: { - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11499,7 +11502,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ErrorSet => { if (operand_ty.isAnyError()) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (operand_ty.errorSetNames()) |error_name| { @@ -11587,7 +11590,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), }; @@ -11598,7 +11601,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = wip_captures.scope; case_block.inline_case_capture = .none; - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and + if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); @@ -11679,7 +11682,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(sema.arena, mod); - const max = try ty.maxInt(sema.arena, mod); + const max = try ty.maxIntScalar(mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11693,19 +11696,19 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { return it.cur; } it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lte, it.max, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { return it.cur; } return null; @@ -11750,7 +11753,7 @@ fn validateSwitchRange( ) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareAll(.gt, last_val, operand_ty, sema.mod)) { + if (first_val.compareScalar(.gt, last_val, operand_ty, sema.mod)) { const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } @@ -12208,16 +12211,11 @@ fn zirShl( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12236,8 +12234,7 @@ fn zirShl( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12309,7 +12306,7 @@ fn zirShl( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12396,16 +12393,11 @@ fn zirShr( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12424,8 +12416,7 @@ fn zirShr( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12465,7 +12456,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12587,10 +12578,9 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); } return sema.addConstant( @@ -12695,6 +12685,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12714,11 +12705,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); }; const resolved_elem_ty = t: { @@ -12780,8 +12771,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); - const mod = sema.mod; + const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod); const ptr_addrspace = p: { if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); @@ -12815,7 +12805,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12825,7 +12815,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12842,12 +12832,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_elem_ty, .@"addrspace" = ptr_as, }); @@ -13009,6 +12999,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -13025,10 +13016,9 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation - const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { @@ -13048,7 +13038,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod); const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); @@ -13065,7 +13055,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. if (lhs_len == 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); + const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try Value.Tag.repeated.create(sema.arena, elem_val); } @@ -13074,7 +13064,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_i); + const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); element_vals[elem_i] = elem_val; elem_i += 1; } @@ -13090,12 +13080,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, lhs_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = lhs_info.elem_type, .@"addrspace" = ptr_as, }); @@ -13797,7 +13787,7 @@ fn addDivIntOverflowSafety( } const min_int = try resolved_type.minInt(sema.arena, mod); - const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); + const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else @@ -13806,12 +13796,12 @@ fn addDivIntOverflowSafety( // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; } var ok: Air.Inst.Ref = .none; @@ -14038,23 +14028,18 @@ fn intRem( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intRemScalar(lhs, rhs); + return sema.intRemScalar(lhs, rhs, ty); } -fn intRemScalar( - sema: *Sema, - lhs: Value, - rhs: Value, -) CompileError!Value { +fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -14079,7 +14064,7 @@ fn intRemScalar( var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return Value.fromBigInt(sema.arena, result_r.toConst()); + return mod.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -15063,7 +15048,7 @@ fn analyzePtrArithmetic( .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); + const new_ptr_val = try mod.intValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { @@ -15826,9 +15811,9 @@ fn zirBuiltinSrc( // fn_name: [:0]const u8, field_values[1] = func_name_val; // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); + field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); // column: u32, - field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); + field_values[3] = try mod.intValue(Type.u32, extra.column + 1); return sema.addConstant( try sema.getBuiltinType("SourceLocation"), @@ -15977,7 +15962,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, param_vals.len), + .len = try mod.intValue(Type.usize, param_vals.len), }); }; @@ -15994,7 +15979,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), + try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -16022,7 +16007,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai @enumToInt(info.signedness), ); // bits: comptime_int, - field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); + field_values[1] = try mod.intValue(Type.comptime_int, info.bits); return sema.addConstant( type_info_ty, @@ -16035,7 +16020,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); + field_values[0] = try mod.intValue(Type.comptime_int, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -16048,7 +16033,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, info.@"align") + try mod.intValue(Type.comptime_int, info.@"align") else try info.pointee_type.lazyAbiAlignment(mod, sema.arena); @@ -16084,7 +16069,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); // sentinel: ?*const anyopaque, @@ -16102,7 +16087,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); @@ -16202,7 +16187,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = new_decl_val, - .len = try Value.Tag.int_u64.create(sema.arena, vals.len), + .len = try mod.intValue(Type.usize, vals.len), }); break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); } else Value.null; @@ -16263,8 +16248,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const tag_val = Value.initPayload(&tag_val_payload.base); - var buffer: Value.Payload.U64 = undefined; - const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); + const int_val = try tag_val.enumToInt(ty, mod); const name = enum_fields.keys()[i]; const name_val = v: { @@ -16379,7 +16363,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); } @@ -16398,7 +16382,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, union_field_vals.len), + .len = try mod.intValue(Type.usize, union_field_vals.len), }); }; @@ -16476,7 +16460,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16518,7 +16502,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16540,7 +16524,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field.is_comptime), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16561,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, struct_field_vals.len), + .len = try mod.intValue(Type.usize, struct_field_vals.len), }); }; @@ -16636,6 +16620,7 @@ fn typeInfoDecls( type_info_ty: Type, opt_namespace: ?*Module.Namespace, ) CompileError!Value { + const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16646,9 +16631,9 @@ fn typeInfoDecls( type_info_ty.getNamespace().?, "Declaration", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); + const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16676,7 +16661,7 @@ fn typeInfoDecls( ); return try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, decl_vals.items.len), + .len = try mod.intValue(Type.usize, decl_vals.items.len), }); } @@ -16713,7 +16698,7 @@ fn typeInfoNamespaceDecls( ); break :v try Value.Tag.slice.create(decls_anon_decl, .{ .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try Value.Tag.int_u64.create(decls_anon_decl, bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -18620,10 +18605,9 @@ fn zirUnaryMath( if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); } return sema.addConstant( @@ -18717,7 +18701,12 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return block.addUnOp(.tag_name, casted_operand); } -fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirReify( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const mod = sema.mod; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -18730,7 +18719,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const union_val = val.cast(Value.Payload.Union).?.data; const target = mod.getTarget(); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18845,10 +18834,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); @@ -18893,7 +18882,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); return sema.addType(ty); }, .Optional => { @@ -18938,13 +18927,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; while (i < len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = slice_val.ptr.elemValueBuffer(mod, i, &buf); + const elem_val = try slice_val.ptr.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19061,7 +19049,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var field_i: usize = 0; while (field_i < fields_len) : (field_i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, field_i); + const elem_val = try fields_val.elemValue(mod, field_i); const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19072,7 +19060,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { @@ -19183,7 +19171,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { + else switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", }; @@ -19236,7 +19224,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19249,7 +19237,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (enum_field_names) |set| { @@ -19260,7 +19248,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const enum_has_field = names.orderedRemove(field_name); if (!enum_has_field) { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19293,10 +19281,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); @@ -19305,10 +19293,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.failWithOwnedErrorMsg(msg); } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); try sema.addDeclaredHereNote(msg, field_ty); @@ -19386,8 +19374,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var noalias_bits: u32 = 0; var i: usize = 0; while (i < args_len) : (i += 1) { - var arg_buf: Value.ElemValueBuffer = undefined; - const arg = args_slice_val.ptr.elemValueBuffer(mod, i, &arg_buf); + const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19486,7 +19473,7 @@ fn reifyStruct( try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19892,12 +19879,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = addr, - }; - return sema.addConstant(ptr_ty, Value.initPayload(&val_payload.base)); + return sema.addConstant(ptr_ty, try mod.intValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -19908,14 +19890,9 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_align > 1) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, ptr_align - 1), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -20254,10 +20231,9 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), ); } - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); } return sema.addConstant( @@ -20302,14 +20278,9 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (block.wantSafety() and dest_align > 1 and try sema.typeHasRuntimeBits(ptr_info.pointee_type)) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = dest_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, dest_align - 1), ); const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) @@ -20359,13 +20330,12 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try Value.Tag.int_u64.create(sema.arena, count); + elem.* = try mod.intValue(scalar_ty, count); } return sema.addConstant( result_ty, @@ -20429,10 +20399,9 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( @@ -20478,10 +20447,9 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( @@ -21241,11 +21209,10 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); - var elem_buf: Value.ElemValueBuffer = undefined; + var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); + const elem_val = try operand_val.elemValue(mod, i); switch (operation) { .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), @@ -21359,8 +21326,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(sema.mod, i, &buf); + const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef()) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; @@ -21398,8 +21364,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); + const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef()) { values[i] = Value.undef; continue; @@ -21407,9 +21372,9 @@ fn analyzeShuffle( const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try a_val.elemValue(sema.mod, unsigned); } else { - values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try b_val.elemValue(sema.mod, unsigned); } } const res_val = try Value.Tag.aggregate.create(sema.arena, values); @@ -21430,7 +21395,7 @@ fn analyzeShuffle( const expand_mask_values = try sema.arena.alloc(Value, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try Value.Tag.int_u64.create(sema.arena, i); + expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { expand_mask_values[i] = Value.negative_one; @@ -21509,15 +21474,14 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef()) return sema.addConstUndef(vec_ty); - var buf: Value.ElemValueBuffer = undefined; const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); + const pred_elem_val = try pred_val.elemValue(sema.mod, i); const should_choose_a = pred_elem_val.toBool(mod); if (should_choose_a) { - elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try a_val.elemValue(sema.mod, i); } else { - elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try b_val.elemValue(sema.mod, i); } } @@ -22067,12 +22031,10 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem_val = try cur_val.elemValue(mod, i); + const rhs_elem_val = try operand_val.elemValue(mod, i); elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( @@ -22105,10 +22067,10 @@ fn analyzeMinMax( if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_min: Value = try val.elemValue(mod, 0); var cur_max: Value = cur_min; for (1..len) |idx| { - const elem_val = try val.elemValue(mod, sema.arena, idx); + const elem_val = try val.elemValue(mod, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; @@ -23987,7 +23949,7 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); @@ -24179,7 +24141,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -25352,7 +25314,7 @@ fn elemValArray( } if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); + const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_ty, elem_val); } } @@ -25914,7 +25876,7 @@ fn coerceExtra( // we use a dummy pointer value with the required alignment. const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = if (dest_info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") + try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, @@ -26022,7 +25984,7 @@ fn coerceExtra( .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -26030,7 +25992,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); if (!val.eql(result_val, inst_ty, sema.mod)) { return sema.fail( block, @@ -27431,11 +27393,13 @@ fn storePtrVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), }; operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), @@ -27589,7 +27553,7 @@ fn beginComptimePtrMutation( assert(bytes.len >= dest_len); const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); + elem.* = try mod.intValue(elem_ty, bytes[i]); } val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27618,7 +27582,7 @@ fn beginComptimePtrMutation( const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); + elems[i] = try mod.intValue(elem_ty, byte); } if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); @@ -28111,7 +28075,7 @@ fn beginComptimePtrLoad( maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { const mod = sema.mod; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { .null_value => { @@ -28128,7 +28092,7 @@ fn beginComptimePtrLoad( else => unreachable, }; const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; @@ -28150,7 +28114,7 @@ fn beginComptimePtrLoad( // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); } if (elem_ptr.index != 0) { @@ -28184,11 +28148,11 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), } else null; break :blk deref; } @@ -28209,7 +28173,7 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), + .val = try array_tv.val.elemValue(mod, elem_ptr.index), }; break :blk deref; }, @@ -28329,12 +28293,6 @@ fn beginComptimePtrLoad( break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .variable, .extern_fn, .function, @@ -28342,7 +28300,10 @@ fn beginComptimePtrLoad( else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => return error.RuntimeLoad, + else => unreachable, + }, }; if (deref.pointee) |tv| { @@ -28373,9 +28334,9 @@ fn bitCast( if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(mod), dest_bits, - old_ty.fmt(sema.mod), + old_ty.fmt(mod), old_bits, }); } @@ -28407,6 +28368,7 @@ fn bitCastVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), @@ -28427,7 +28389,7 @@ fn coerceArrayPtrToSlice( const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), + .len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28781,7 +28743,7 @@ fn coerceArrayLike( for (element_vals, 0..) |*elem, i| { const index_ref = try sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(sema.arena, i), + try mod.intValue(Type.usize, i), ); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location @@ -29634,7 +29596,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29643,8 +29605,8 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveMaybeUndefVal(end)) |end_val| { - const len_s_val = try Value.Tag.int_u64.create( - sema.arena, + const len_s_val = try mod.intValue( + Type.usize, array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { @@ -29689,12 +29651,10 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - var int_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), - }; - const slice_len_val = Value.initPayload(&int_payload.base); - if (!(try sema.compareAll(end_val, .lte, slice_len_val, Type.usize))) { + const slice_len = slice_val.sliceLen(mod); + const len_plus_sent = slice_len + @boolToInt(has_sentinel); + const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -29712,13 +29672,10 @@ fn analyzeSlice( ); } - // If the slice has a sentinel, we subtract one so that - // end_is_len is only true if it equals the length WITHOUT - // the sentinel, so we don't add a sentinel type. - if (has_sentinel) { - int_payload.data -= 1; - } - + // If the slice has a sentinel, we consider end_is_len + // is only true if it equals the length WITHOUT the + // sentinel, so we don't add a sentinel type. + const slice_len_val = try mod.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -30134,7 +30091,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (lhs_val.floatHasFraction()) { if (lhs_is_signed) { @@ -30193,7 +30150,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (rhs_val.floatHasFraction()) { if (rhs_is_signed) { @@ -31835,6 +31792,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, .void_value => unreachable, @@ -32462,11 +32420,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (fields_len > 0) { - var field_count_val: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = fields_len - 1, - }; - if (!(try sema.intFitsInType(Value.initPayload(&field_count_val.base), int_tag_ty, null))) { + const field_count_val = try mod.intValue(int_tag_ty, fields_len - 1); + if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); @@ -33207,7 +33162,8 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); + const mod = sema.mod; + return sema.addConstant(ty, try mod.intValue(ty, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { @@ -33223,7 +33179,11 @@ pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { .tag = .interned, .data = .{ .interned = val.ip_index }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + assert(Type.eql(sema.typeOf(result), ty, sema.mod)); + return result; } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); @@ -33833,19 +33793,18 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intAddScalar(lhs, rhs); + return sema.intAddScalar(lhs, rhs, ty); } -fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33859,7 +33818,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33884,28 +33843,22 @@ fn numberAddWrapScalar( return overflow_result.wrapped_result; } -fn intSub( - sema: *Sema, - lhs: Value, - rhs: Value, - ty: Type, -) !Value { +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intSubScalar(lhs, rhs); + return sema.intSubScalar(lhs, rhs, ty); } -fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33919,7 +33872,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33954,10 +33907,8 @@ fn floatAdd( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33971,31 +33922,32 @@ fn floatAddScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); }, else => unreachable, @@ -34012,10 +33964,8 @@ fn floatSub( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34029,31 +33979,32 @@ fn floatSubScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); }, else => unreachable, @@ -34071,10 +34022,8 @@ fn intSubWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34106,7 +34055,7 @@ fn intSubWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = wrapped_result, @@ -34126,8 +34075,7 @@ fn floatToInt( const elem_ty = float_ty.childType(mod); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(sema.mod, i, &buf); + const elem_val = try val.elemValue(sema.mod, i); scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34168,9 +34116,9 @@ fn floatToIntScalar( float_ty: Type, int_ty: Type, ) CompileError!Value { - const Limb = std.math.big.Limb; + const mod = sema.mod; - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(sema.mod), @@ -34185,11 +34133,7 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs); - const result = if (!big_int.isPositive()) - try Value.Tag.int_big_negative.create(sema.arena, result_limbs) - else - try Value.Tag.int_big_positive.create(sema.arena, result_limbs); + const result = try mod.intValue_big(int_ty, big_int.toConst()); if (!(try sema.intFitsInType(result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ @@ -34209,8 +34153,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { + if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; - const target = mod.getTarget(); switch (val.ip_index) { .undef, .zero, @@ -34218,103 +34162,26 @@ fn intFitsInType( .zero_u8, => return true, - .one, - .one_usize, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .none => switch (val.tag()) { - .zero => return true, - - .one => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .lazy_align => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .int_u64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_align => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, - .int_big_negative => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_size => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, .the_only_possible_value => { @@ -34327,17 +34194,14 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; - }, - .ComptimeInt => return true, - else => unreachable, + => { + const info = ty.intInfo(mod); + const target = mod.getTarget(); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; }, .aggregate => { @@ -34354,22 +34218,22 @@ fn intFitsInType( else => unreachable, }, - else => @panic("TODO"), + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| { + const info = ty.intInfo(mod); + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + else => unreachable, + }, } } -fn intInRange( - sema: *Sema, - tag_ty: Type, - int_val: Value, - end: usize, -) !bool { +fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { + const mod = sema.mod; if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); + const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } @@ -34426,10 +34290,8 @@ fn intAddWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34461,7 +34323,7 @@ fn intAddWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = result, @@ -34483,10 +34345,8 @@ fn compareAll( if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } @@ -34532,10 +34392,8 @@ fn compareVector( assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ee9a8abf0f..28212a164c 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { return tv.val.hash(tv.ty, hasher, mod); } -pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { - return tv.val.enumToInt(tv.ty, buffer); +pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value { + return tv.val.enumToInt(tv.ty, mod); } const max_aggregate_items = 100; @@ -157,14 +157,8 @@ pub fn print( return writer.writeAll(" }"); }, - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), .the_only_possible_value => return writer.writeAll("0"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; const x = sub_ty.abiAlignment(mod); @@ -313,8 +307,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; if (elem_val.isUndef()) break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } @@ -330,10 +325,12 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; try print(.{ .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), + .val = elem_val, }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 8c03dfd060..34479cce5e 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2120,6 +2120,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ea7134c603..327e2c13e0 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3083,20 +3083,21 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, - 32 => return WValue{ .float32 = val.toFloat(f32) }, - 64 => return WValue{ .float64 = val.toFloat(f64) }, + 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, + 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, + 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, else => unreachable, }, - .Pointer => switch (val.ip_index) { - .null_value => return WValue{ .imm32 = 0 }, + .Pointer => return switch (val.ip_index) { + .null_value => WValue{ .imm32 = 0 }, .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .zero => return WValue{ .imm32 = 0 }, + .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, + else => unreachable, + }, }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { @@ -3137,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); + const err_val = if (!is_pl) val else Value.zero; return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3160,11 +3161,10 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - var payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = std.mem.readIntLittle(u64, &buf), - }; - const int_val = Value.initPayload(&payload.base); + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { @@ -4899,8 +4899,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const value = (try mask.elemValue(mod, index)).toSignedInt(mod); try func.emitWValue(result); @@ -4920,8 +4919,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ee604afd0f..51c6bc79e6 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2757,11 +2757,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { dst_ty.fmt(self.bin_file.options.module.?), }); - var mask_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), - }; - const mask_val = Value.initPayload(&mask_pl.base); + const elem_ty = src_ty.childType(mod); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); var splat_pl = Value.Payload.SubValue{ .base = .{ .tag = .repeated }, @@ -4906,18 +4903,6 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { defer arena.deinit(); const ExpectedContents = struct { - scalar: union { - i64: Value.Payload.I64, - big: struct { - limbs: [ - @max( - std.math.big.int.Managed.default_capacity, - std.math.big.int.calcTwosCompLimbCount(128), - ) - ]std.math.big.Limb, - pl: Value.Payload.BigInt, - }, - }, repeated: Value.Payload.SubValue, }; var stack align(@alignOf(ExpectedContents)) = @@ -11429,8 +11414,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); - var tag_int_pl: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) diff --git a/src/codegen.zig b/src/codegen.zig index 5f5a3f66be..9c9868892f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -214,15 +214,15 @@ pub fn generateSymbol( }, .Float => { switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), + 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), + 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), + 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), 80 => { - writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); + writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), + 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), else => unreachable, } return Result.ok; @@ -328,20 +328,6 @@ pub fn generateSymbol( return Result.ok; }, .none => switch (typed_value.val.tag()) { - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( bin_file, src_loc, @@ -399,7 +385,23 @@ pub fn generateSymbol( ), }, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { + switch (target.ptrBitWidth()) { + 32 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); + }, + 64 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u64, try code.addManyAsArray(8), x, endian); + }, + else => unreachable, + } + return Result.ok; + }, + else => unreachable, + }, }, .Int => { const info = typed_value.ty.intInfo(mod); @@ -449,8 +451,7 @@ pub fn generateSymbol( return Result.ok; }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = typed_value.enumToInt(&int_buffer); + const int_val = try typed_value.enumToInt(mod); const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { @@ -674,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; + const err_val = if (is_payload) Value.zero else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -689,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -721,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -961,13 +962,9 @@ fn lowerDeclRef( } // generate length - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(mod), - }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1196,13 +1193,13 @@ pub fn genTypedValue( .null_value => { return GenResult.mcv(.{ .immediate = 0 }); }, - .none => switch (typed_value.val.tag()) { - .int_u64 => { + .none => {}, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, }, - else => {}, }, }, .Int => { @@ -1283,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + const err_val = if (!is_pl) typed_value.val else Value.zero; return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 039c75de67..9443c2298a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -568,11 +568,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); - var len_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(mod), - }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); if (location == .StaticInitializer) { return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); @@ -596,11 +592,17 @@ pub const DeclGen = struct { if (need_typecast) try writer.writeByte(')'); } - // Renders a "parent" pointer by recursing to the root decl/variable - // that its contents are defined with respect to. - // - // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + /// Renders a "parent" pointer by recursing to the root decl/variable + /// that its contents are defined with respect to. + /// + /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr + fn renderParentPtr( + dg: *DeclGen, + writer: anytype, + ptr_val: Value, + ptr_ty: Type, + location: ValueRenderLocation, + ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; if (!ptr_ty.isSlice(mod)) { @@ -608,8 +610,11 @@ pub const DeclGen = struct { try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); } + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), + else => unreachable, + }; switch (ptr_val.tag()) { - .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), .decl_ref_mut, .decl_ref, .variable => { const decl_index = switch (ptr_val.tag()) { .decl_ref => ptr_val.castTag(.decl_ref).?.data, @@ -661,11 +666,7 @@ pub const DeclGen = struct { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); @@ -891,7 +892,7 @@ pub const DeclGen = struct { }, .Array, .Vector => { const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { var literal = stringLiteral(writer); try literal.start(); const c_len = ty.arrayLenIncludingSentinel(mod); @@ -949,7 +950,7 @@ pub const DeclGen = struct { }, .Float => { const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128); + const f128_val = val.toFloat(f128, mod); // All unsigned ints matching float types are pre-allocated. const repr_ty = mod.intType(.unsigned, bits) catch unreachable; @@ -963,21 +964,15 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))), + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), 128 => repr_val_big.set(@bitCast(u128, f128_val)), else => unreachable, } - var repr_val_pl = Value.Payload.BigInt{ - .base = .{ - .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative, - }, - .data = repr_val_big.limbs[0..repr_val_big.len], - }; - const repr_val = Value.initPayload(&repr_val_pl.base); + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -988,10 +983,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16)}), - 32 => try writer.print("{x}", .{val.toFloat(f32)}), - 64 => try writer.print("{x}", .{val.toFloat(f64)}), - 80 => try writer.print("{x}", .{val.toFloat(f80)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1031,10 +1026,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}), + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; @@ -1060,19 +1055,6 @@ pub const DeclGen = struct { try writer.writeAll(")NULL)"); }, .none => switch (val.tag()) { - .zero => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, .variable => { const decl = val.castTag(.variable).?.data.owner_decl; return dg.renderDeclValue(writer, ty, val, decl, location); @@ -1101,7 +1083,7 @@ pub const DeclGen = struct { const extern_fn = val.castTag(.extern_fn).?.data; try dg.renderDeclName(writer, extern_fn.owner_decl, 0); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { try writer.writeAll("(("); try dg.renderType(writer, ty); return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); @@ -1116,7 +1098,14 @@ pub const DeclGen = struct { else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + else => unreachable, + }, }, .Array, .Vector => { if (location == .FunctionArgument) { @@ -1155,7 +1144,7 @@ pub const DeclGen = struct { .bytes => val.castTag(.bytes).?.data, .str_lit => bytes: { const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; }, else => unreachable, }; @@ -1170,21 +1159,18 @@ pub const DeclGen = struct { else => {}, } // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); try literal.start(); var index: usize = 0; while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } @@ -1198,7 +1184,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } @@ -1213,7 +1199,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } if (ai.sentinel) |s| { @@ -1361,8 +1347,7 @@ pub const DeclGen = struct { const bits = Type.smallestUnsignedBits(int_info.bits - 1); const bit_offset_ty = try mod.intType(.unsigned, bits); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var eff_num_fields: usize = 0; for (0..field_vals.len) |field_i| { @@ -1394,12 +1379,13 @@ pub const DeclGen = struct { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try writer.writeAll("zig_shl_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); } else { @@ -1409,7 +1395,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1427,15 +1413,16 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeByte(')'); - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try dg.renderValue(writer, field_ty, field_val, .Other); try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); } else { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1451,7 +1438,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; + const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { @@ -1951,10 +1938,10 @@ pub const DeclGen = struct { if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); - var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits }; + const bits_ty = if (is_big) Type.u16 else Type.u8; try writer.print(", {}", .{try dg.fmtIntLiteral( - if (is_big) Type.u16 else Type.u8, - Value.initPayload(&bits_pl.base), + bits_ty, + try mod.intValue(bits_ty, int_info.bits), .FunctionArgument, )}); } @@ -2495,8 +2482,7 @@ pub fn genErrDecls(o: *Object) !void { for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other), @@ -2548,8 +2534,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(enum_ty, &int_pl); + const int_val = try tag_val.enumToInt(enum_ty, mod); const name_ty = try mod.arrayType(.{ .len = name.len, @@ -2560,8 +2545,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try w.print(" case {}: {{\n static ", .{ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other), @@ -3396,12 +3380,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); @@ -3563,14 +3542,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - - const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); + const mask_val = try inst_scalar_ty.maxIntScalar(mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3581,11 +3553,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - var shift_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = c_bits - dest_bits, - }; - const shift_val = Value.initPayload(&shift_pl.base); + const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3705,12 +3673,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const src_bits = src_ty.bitSize(mod); @@ -3725,11 +3688,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - var mask_pl = Value.Payload.BigInt{ - .base = .{ .tag = .int_big_positive }, - .data = mask.limbs[0..mask.len()], - }; - const mask_val = Value.initPayload(&mask_pl.base); + const mask_val = try mod.intValue_big(host_ty, mask.toConst()); try f.writeCValueDeref(writer, ptr_val); try v.elem(f, writer); @@ -5356,11 +5315,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5412,11 +5367,7 @@ fn fieldPtr( u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5466,11 +5417,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); const field_int_signedness = if (inst_ty.isAbiInt(mod)) inst_ty.intInfo(mod).signedness @@ -5492,13 +5440,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); @@ -5854,9 +5802,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(mod); - var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); @@ -6632,26 +6578,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands for (0..extra.mask_len) |index| { - var dst_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, index), - }; - try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); - var src_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, mask_elem ^ mask_elem >> 63), - }; + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); + const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, src_val, .Other); try writer.writeAll("];\n"); } @@ -6730,8 +6667,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { defer arena.deinit(); const ExpectedContents = union { - u: Value.Payload.U64, - i: Value.Payload.I64, f16: Value.Payload.Float_16, f32: Value.Payload.Float_32, f64: Value.Payload.Float_64, @@ -6746,13 +6681,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, else => switch (scalar_ty.intInfo(mod).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), mod), + .unsigned => try scalar_ty.maxIntScalar(mod), .signed => Value.negative_one, }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), mod), + .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6879,8 +6814,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var empty = true; for (0..elements.len) |field_i| { @@ -6925,12 +6859,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } @@ -6976,8 +6911,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(tag_ty, &int_pl); + const int_val = try tag_val.enumToInt(tag_ty, mod); const a = try Assignment.start(f, writer, tag_ty); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); @@ -7640,10 +7574,6 @@ fn formatIntLiteral( c_limb_int_info.signedness = .unsigned; c_limb_cty = c_limb_info.cty; } - var c_limb_val_pl = Value.Payload.BigInt{ - .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative }, - .data = c_limb_mut.limbs[0..c_limb_mut.len], - }; if (limb_offset > 0) try writer.writeAll(", "); try formatIntLiteral(.{ @@ -7651,7 +7581,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .cty = c_limb_cty, - .val = Value.initPayload(&c_limb_val_pl.base), + .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7750,7 +7680,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; + const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod)); const local = try f.allocLocal(inst, Type.usize); @@ -7759,7 +7689,7 @@ const Vectorize = struct { try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ - try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)), + try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5ad3c2b8c5..9b62c5448d 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -12,6 +12,7 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); @@ -1535,8 +1536,7 @@ pub const Object = struct { defer gpa.free(field_name_z); buf_field_index.data = @intCast(u32, i); - var buf_u64: Value.Payload.U64 = undefined; - const field_int_val = field_index_val.enumToInt(ty, &buf_u64); + const field_int_val = try field_index_val.enumToInt(ty, mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = field_int_val.toBigInt(&bigint_space, mod); @@ -3255,8 +3255,6 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); }, - // TODO this duplicates code with Pointer but they should share the handling - // of the tv.val.tag() and then Int should do extra constPtrToInt on top .Int => switch (tv.val.ip_index) { .none => switch (tv.val.tag()) { .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), @@ -3277,8 +3275,7 @@ pub const DeclGen = struct { }, }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = tv.enumToInt(&int_buffer); + const int_val = try tv.enumToInt(mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = int_val.toBigInt(&bigint_space, mod); @@ -3307,25 +3304,25 @@ pub const DeclGen = struct { const llvm_ty = try dg.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16)); + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); const llvm_i16 = dg.context.intType(16); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32)); + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); const llvm_i32 = dg.context.intType(32); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64)); + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); const llvm_i64 = dg.context.intType(64); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 80 => { - const float = tv.val.toFloat(f80); + const float = tv.val.toFloat(f80, mod); const repr = std.math.break_f80(float); const llvm_i80 = dg.context.intType(80); var x = llvm_i80.constInt(repr.exp, .False); @@ -3338,7 +3335,7 @@ pub const DeclGen = struct { } }, 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { @@ -3388,7 +3385,7 @@ pub const DeclGen = struct { }; return dg.context.constStruct(&fields, fields.len, .False); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); @@ -3396,10 +3393,6 @@ pub const DeclGen = struct { .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, - .zero => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); @@ -3408,7 +3401,10 @@ pub const DeclGen = struct { tv.ty.fmtDebug(), tag, }), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }, }, .Array => switch (tv.val.tag()) { .bytes => { @@ -3592,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.initTag(.zero); + const err_val = if (!is_pl) tv.val else Value.zero; return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3600,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.initTag(.zero) else tv.val, + .val = if (is_pl) Value.zero else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -3882,14 +3878,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3940,14 +3931,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3974,6 +3960,13 @@ pub const DeclGen = struct { } } + fn lowerIntAsPtr(dg: *DeclGen, int: InternPool.Key.Int) *llvm.Value { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + const llvm_int = lowerBigInt(dg, Type.usize, bigint); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); + } + fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { const mod = dg.module; const int_info = ty.intInfo(mod); @@ -4018,6 +4011,10 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }; switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4031,18 +4028,6 @@ pub const DeclGen = struct { const decl = ptr_val.castTag(.variable).?.data.owner_decl; return dg.lowerParentPtrDecl(ptr_val, decl); }, - .int_i64 => { - const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, - .int_u64 => { - const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); @@ -4185,10 +4170,6 @@ pub const DeclGen = struct { if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(mod), - }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ .ty = ptr_ty, @@ -4196,7 +4177,7 @@ pub const DeclGen = struct { }), try self.lowerValue(.{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), }), }; return self.context.constStruct(&fields, fields.len, .False); @@ -8507,8 +8488,7 @@ pub const FuncGen = struct { const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const module = self.dg.module; - const target = module.getTarget(); + const target = mod.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); @@ -8526,7 +8506,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - if (safety and module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8536,8 +8516,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - var value_buffer: Value.Payload.U64 = undefined; - if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val, @@ -8829,16 +8808,10 @@ pub const FuncGen = struct { for (names) |name| { const err_int = mod.global_error_set.get(name).?; - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = err_int, - }; - break :int try self.dg.lowerValue(.{ - .ty = Type.err_int, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, err_int), + }); switch_instr.addCase(this_tag_int_value, valid_block); } self.builder.positionBuilderAtEnd(valid_block); @@ -9122,8 +9095,7 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); for (values, 0..) |*val, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(mod, i, &buf); + const elem = try mask.elemValue(mod, i); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { @@ -9457,8 +9429,7 @@ pub const FuncGen = struct { .data = @intCast(u32, enum_field_index), }; const tag_val = Value.initPayload(&tag_val_payload.base); - var int_payload: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index e3b5d24ed9..32e0c13c37 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -555,15 +555,15 @@ pub const DeclGen = struct { // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { 16 => { - const float_bits = val.toFloat(f16); + const float_bits = val.toFloat(f16, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 32 => { - const float_bits = val.toFloat(f32); + const float_bits = val.toFloat(f32, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 64 => { - const float_bits = val.toFloat(f64); + const float_bits = val.toFloat(f64, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, else => unreachable, @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.initTag(.zero)); + try self.addInt(Type.usize, Value.zero); // TODO: Add dependency return; }, @@ -743,8 +743,7 @@ pub const DeclGen = struct { try self.addUndef(padding); }, .Enum => { - var int_val_buffer: Value.Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &int_val_buffer); + const int_val = try val.enumToInt(ty, mod); const int_ty = ty.intTagType(); @@ -787,22 +786,24 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); + .ErrorSet => switch (val.ip_index) { + .none => switch (val.tag()) { + .@"error" => { + const err_name = val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + else => unreachable, }, - .zero => { - // Unactivated error set. - try self.addConstInt(u16, 0); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), + else => unreachable, }, - else => unreachable, }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); + const error_val = if (!is_pl) val else Value.zero; const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -993,9 +994,9 @@ pub const DeclGen = struct { .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))), }, .Float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64) } } }), + 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }), + 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }), + 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }), 80, 128 => unreachable, // TODO else => unreachable, }, @@ -1531,6 +1532,7 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { + if (true) @panic("TODO: update SPIR-V backend for InternPool changes"); const mod = self.module; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); @@ -2087,8 +2089,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.module, i, &buf); + const elem = try mask.elemValue(self.module, i); if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { @@ -3146,9 +3147,8 @@ pub const DeclGen = struct { const int_val = switch (cond_ty.zigTypeTag(mod)) { .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { - var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants + break :blk (try value.enumToInt(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 3e4e90951e..c971b5b26f 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -421,8 +421,7 @@ pub const DeclState = struct { const value = vals.keys()[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - var int_buffer: Value.Payload.U64 = undefined; - const field_int_val = value.enumToInt(ty, &int_buffer); + const field_int_val = try value.enumToInt(ty, mod); break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); diff --git a/src/type.zig b/src/type.zig index 592eb9a21e..5b18245323 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2077,10 +2077,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -2468,10 +2468,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -4310,8 +4310,8 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4319,38 +4319,28 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag(mod) == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { - assert(ty.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - if (info.signedness == .unsigned) { - return Value.zero; - } + if (info.signedness == .unsigned) return Value.zero; + if (info.bits == 0) return Value.negative_one; if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); } - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + pub fn maxInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4358,41 +4348,39 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { - assert(self.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn maxIntScalar(self: Type, mod: *Module) !Value { const info = self.intInfo(mod); - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - switch (info.bits - @boolToInt(info.signedness == .signed)) { - 0 => return Value.zero, - 1 => return Value.one, + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => Value.negative_one, + .unsigned => Value.zero, + }, + 1 => return switch (info.signedness) { + .signed => Value.zero, + .unsigned => Value.one, + }, else => {}, } if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return Value.Tag.int_u64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, }; - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } /// Asserts the type is an enum or a union. @@ -4497,12 +4485,11 @@ pub const Type = struct { const S = struct { fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { if (int_val.compareAllWithZero(.lt, m)) return null; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, + const end_val = m.intValue(int_ty, end) catch |err| switch (err) { + // TODO: eliminate this failure condition + error.OutOfMemory => @panic("OOM"), }; - const end_val = Value.initPayload(&end_payload.base); - if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; + if (int_val.compareScalar(.gte, end_val, int_ty, m)) return null; return @intCast(usize, int_val.toUnsignedInt(m)); } }; diff --git a/src/value.zig b/src/value.zig index f8188c64ab..c0ea9e149f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,8 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - zero, - one, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, @@ -43,10 +41,6 @@ pub const Value = struct { // After this, the tag requires a payload. ty, - int_u64, - int_i64, - int_big_positive, - int_big_negative, function, extern_fn, /// A comptime-known pointer can point to the address of a global @@ -129,17 +123,11 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .zero, - .one, .the_only_possible_value, .empty_struct_value, .empty_array, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - .int_big_positive, - .int_big_negative, - => Payload.BigInt, - .extern_fn => Payload.ExternFn, .decl_ref => Payload.Decl, @@ -169,8 +157,6 @@ pub const Value = struct { .lazy_size, => Payload.Ty, - .int_u64 => Payload.U64, - .int_i64 => Payload.I64, .function => Payload.Function, .variable => Payload.Variable, .decl_ref_mut => Payload.DeclRefMut, @@ -281,8 +267,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .zero, - .one, .the_only_possible_value, .empty_array, .empty_struct_value, @@ -300,20 +284,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), - .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), - .int_big_positive, .int_big_negative => { - const old_payload = self.cast(Payload.BigInt).?; - const new_payload = try arena.create(Payload.BigInt); - new_payload.* = .{ - .base = .{ .tag = self.legacy.ptr_otherwise.tag }, - .data = try arena.dupe(std.math.big.Limb, old_payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), .variable => return self.copyPayloadShallow(arena, Payload.Variable), @@ -525,8 +495,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .zero => return out_stream.writeAll("0"), - .one => return out_stream.writeAll("1"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { @@ -539,10 +507,6 @@ pub const Value = struct { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), - .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .runtime_value => return out_stream.writeAll("[runtime value]"), .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), .extern_fn => return out_stream.writeAll("(extern function)"), @@ -661,9 +625,8 @@ pub const Value = struct { fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { const result = try allocator.alloc(u8, @intCast(usize, len)); - var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); + const elem_val = try val.elemValue(mod, i); elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; @@ -695,7 +658,7 @@ pub const Value = struct { } } - pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value { + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { @@ -717,11 +680,7 @@ pub const Value = struct { return enum_full.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_full.tag_ty, field_index); } }, .enum_numbered => { @@ -730,20 +689,13 @@ pub const Value = struct { return enum_obj.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_obj.tag_ty, field_index); } }, .enum_simple => { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + const tag_ty = ty.intTagType(); + return mod.intValue(tag_ty, field_index); }, else => unreachable, } @@ -802,12 +754,9 @@ pub const Value = struct { .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => BigIntMutable.init(&space.limbs, 0).toConst(), - .one => BigIntMutable.init(&space.limbs, 1).toConst(), - .enum_field_index => { const index = val.castTag(.enum_field_index).?.data; return BigIntMutable.init(&space.limbs, index).toConst(); @@ -816,11 +765,6 @@ pub const Value = struct { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, - .int_u64 => BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => val.castTag(.int_big_negative).?.asBigInt(), - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -869,17 +813,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return val.castTag(.int_u64).?.data, - .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -922,17 +858,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), - .int_i64 => return val.castTag(.int_i64).?.data, - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; return @intCast(i64, ty.abiAlignment(mod)); @@ -959,22 +887,7 @@ pub const Value = struct { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => switch (val.tag()) { - .one => true, - .zero => false, - - .int_u64 => switch (val.castTag(.int_u64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - .int_i64 => switch (val.castTag(.int_i64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - else => unreachable, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| !big_int.eqZero(), @@ -1004,6 +917,7 @@ pub const Value = struct { ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented, + OutOfMemory, }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -1022,16 +936,14 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (byte_count <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), - else => unreachable, + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), + .big_int => unreachable, }; for (buffer[0..byte_count], 0..) |_, i| switch (endian) { .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), @@ -1044,11 +956,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Array => { @@ -1056,10 +968,9 @@ pub const Value = struct { const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, elem_i); try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } @@ -1122,7 +1033,13 @@ pub const Value = struct { /// /// Both the start and the end of the provided buffer must be tight, since /// big-endian packed memory layouts start at the end of the buffer. - pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void { + pub fn writeToPackedMemory( + val: Value, + ty: Type, + mod: *Module, + buffer: []u8, + bit_offset: usize, + ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { @@ -1147,16 +1064,14 @@ pub const Value = struct { const bits = ty.intInfo(mod).bits; const abi_size = @intCast(usize, ty.abiSize(mod)); - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), else => unreachable, }; std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); @@ -1167,11 +1082,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Vector => { @@ -1181,11 +1096,10 @@ pub const Value = struct { var bits: u16 = 0; var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (elem_i < len) : (elem_i += 1) { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; - const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, tgt_elem_i); try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); bits += elem_bit_size; } @@ -1264,11 +1178,13 @@ pub const Value = struct { if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -1277,7 +1193,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1381,8 +1297,8 @@ pub const Value = struct { const bits = int_info.bits; if (bits == 0) return Value.zero; if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), + .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), + .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); @@ -1390,7 +1306,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1444,32 +1360,29 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type) T { - return switch (val.tag()) { - .float_16 => @floatCast(T, val.castTag(.float_16).?.data), - .float_32 => @floatCast(T, val.castTag(.float_32).?.data), - .float_64 => @floatCast(T, val.castTag(.float_64).?.data), - .float_80 => @floatCast(T, val.castTag(.float_80).?.data), - .float_128 => @floatCast(T, val.castTag(.float_128).?.data), - - .zero => 0, - .one => 1, - .int_u64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_u64).?.data); + pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => @floatCast(T, val.castTag(.float_16).?.data), + .float_32 => @floatCast(T, val.castTag(.float_32).?.data), + .float_64 => @floatCast(T, val.castTag(.float_64).?.data), + .float_80 => @floatCast(T, val.castTag(.float_80).?.data), + .float_128 => @floatCast(T, val.castTag(.float_128).?.data), + + else => unreachable, }, - .int_i64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_i64).?.data); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @intToFloat(T, x); + }, + }, + else => unreachable, }, - - .int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)), - .int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)), - else => unreachable, }; } @@ -1498,24 +1411,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => ty_bits - 1, .none => switch (val.tag()) { - .zero => ty_bits, - .one => ty_bits - 1, - - .int_u64 => { - const big = @clz(val.castTag(.int_u64).?.data); - return big + ty_bits - 64; - }, - .int_i64 => { - @panic("TODO implement i64 Value clz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.clz(ty_bits); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value clz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1546,24 +1441,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => 0, .none => switch (val.tag()) { - .zero => ty_bits, - .one => 0, - - .int_u64 => { - const big = @ctz(val.castTag(.int_u64).?.data); - return if (big == 64) ty_bits else big; - }, - .int_i64 => { - @panic("TODO implement i64 Value ctz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.ctz(); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value ctz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1596,20 +1473,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, - .none => switch (val.tag()) { - .zero => return 0, - .one => return 1, - - .int_u64 => return @popCount(val.castTag(.int_u64).?.data), - - else => { - const info = ty.intInfo(mod); - - var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, mod); - return @intCast(u64, int.popCount(info.bits)); - }, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| { const info = ty.intInfo(mod); @@ -1622,7 +1486,7 @@ pub const Value = struct { } } - pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1637,10 +1501,10 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1658,7 +1522,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Asserts the value is an integer and not undefined. @@ -1669,19 +1533,7 @@ pub const Value = struct { .bool_false => 0, .bool_true => 1, .none => switch (self.tag()) { - .zero, - .the_only_possible_value, - => 0, - - .one => 1, - - .int_u64 => { - const x = self.castTag(.int_u64).?.data; - if (x == 0) return 0; - return @intCast(usize, std.math.log2(x) + 1); - }, - .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .the_only_possible_value => 0, .decl_ref_mut, .comptime_field_ptr, @@ -1715,13 +1567,14 @@ pub const Value = struct { /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value { + pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, mod: *const Module) !Value { + const target = mod.getTarget(); switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128, mod)), else => unreachable, } } @@ -1729,10 +1582,6 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value) bool { return switch (self.tag()) { - .zero, - .one, - => false, - .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, @@ -1757,11 +1606,8 @@ pub const Value = struct { .bool_false => return .eq, .bool_true => return .gt, .none => return switch (lhs.tag()) { - .zero, - .the_only_possible_value, - => .eq, + .the_only_possible_value => .eq, - .one, .decl_ref, .decl_ref_mut, .comptime_field_ptr, @@ -1777,10 +1623,6 @@ pub const Value = struct { const val = lhs.castTag(.runtime_value).?.data; return val.orderAgainstZeroAdvanced(mod, opt_sema); }, - .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), - .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), - .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; @@ -1878,8 +1720,8 @@ pub const Value = struct { } } if (lhs_float or rhs_float) { - const lhs_f128 = lhs.toFloat(f128); - const rhs_f128 = rhs.toFloat(f128); + const lhs_f128 = lhs.toFloat(f128, mod); + const rhs_f128 = rhs.toFloat(f128, mod); return std.math.order(lhs_f128, rhs_f128); } @@ -1929,15 +1771,13 @@ pub const Value = struct { /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. - pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { + pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { if (ty.zigTypeTag(mod) == .Vector) { - var i: usize = 0; - while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { + const scalar_ty = ty.scalarType(mod); + for (0..ty.vectorLen(mod)) |i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { return false; } } @@ -2203,10 +2043,8 @@ pub const Value = struct { return a_type.eql(b_type, mod); }, .Enum => { - var buf_a: Payload.U64 = undefined; - var buf_b: Payload.U64 = undefined; - const a_val = a.enumToInt(ty, &buf_a); - const b_val = b.enumToInt(ty, &buf_b); + const a_val = try a.enumToInt(ty, mod); + const b_val = try b.enumToInt(ty, mod); const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, @@ -2214,11 +2052,9 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var i: usize = 0; - var a_buf: ElemValueBuffer = undefined; - var b_buf: ElemValueBuffer = undefined; while (i < len) : (i += 1) { - const a_elem = elemValueBuffer(a, mod, i, &a_buf); - const b_elem = elemValueBuffer(b, mod, i, &b_buf); + const a_elem = try elemValue(a, mod, i); + const b_elem = try elemValue(b, mod, i); if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) { return false; } @@ -2282,17 +2118,17 @@ pub const Value = struct { }, .Float => { switch (ty.floatBits(target)) { - 16 => return @bitCast(u16, a.toFloat(f16)) == @bitCast(u16, b.toFloat(f16)), - 32 => return @bitCast(u32, a.toFloat(f32)) == @bitCast(u32, b.toFloat(f32)), - 64 => return @bitCast(u64, a.toFloat(f64)) == @bitCast(u64, b.toFloat(f64)), - 80 => return @bitCast(u80, a.toFloat(f80)) == @bitCast(u80, b.toFloat(f80)), - 128 => return @bitCast(u128, a.toFloat(f128)) == @bitCast(u128, b.toFloat(f128)), + 16 => return @bitCast(u16, a.toFloat(f16, mod)) == @bitCast(u16, b.toFloat(f16, mod)), + 32 => return @bitCast(u32, a.toFloat(f32, mod)) == @bitCast(u32, b.toFloat(f32, mod)), + 64 => return @bitCast(u64, a.toFloat(f64, mod)) == @bitCast(u64, b.toFloat(f64, mod)), + 80 => return @bitCast(u80, a.toFloat(f80, mod)) == @bitCast(u80, b.toFloat(f80, mod)), + 128 => return @bitCast(u128, a.toFloat(f128, mod)) == @bitCast(u128, b.toFloat(f128, mod)), else => unreachable, } }, .ComptimeFloat => { - const a_float = a.toFloat(f128); - const b_float = b.toFloat(f128); + const a_float = a.toFloat(f128, mod); + const b_float = b.toFloat(f128, mod); const a_nan = std.math.isNan(a_float); const b_nan = std.math.isNan(b_float); @@ -2354,16 +2190,16 @@ pub const Value = struct { .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), + 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), + 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), + 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), + 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), else => unreachable, } }, .ComptimeFloat => { - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); const is_nan = std.math.isNan(float); std.hash.autoHash(hasher, is_nan); if (!is_nan) { @@ -2387,9 +2223,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hash(elem_ty, hasher, mod); } }, @@ -2438,8 +2276,8 @@ pub const Value = struct { hasher.update(val.getError().?); }, .Enum => { - var enum_space: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_space); + // This panic will go away when enum values move to be stored in the intern pool. + const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); hashInt(int_val, hasher, mod); }, .Union => { @@ -2494,7 +2332,7 @@ pub const Value = struct { .Type => { val.toType().hashWithHasher(hasher, mod); }, - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; @@ -2508,9 +2346,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hashUncoerced(elem_ty, hasher, mod); } }, @@ -2661,12 +2501,6 @@ pub const Value = struct { hashPtr(opt_ptr.container_ptr, hasher, mod); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .the_only_possible_value, .lazy_align, .lazy_size, @@ -2720,23 +2554,7 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { - return elemValueAdvanced(val, mod, index, arena, undefined); - } - - pub const ElemValueBuffer = Payload.U64; - - pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value { - return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable; - } - - pub fn elemValueAdvanced( - val: Value, - mod: *Module, - index: usize, - arena: ?Allocator, - buffer: *ElemValueBuffer, - ) error{OutOfMemory}!Value { + pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2751,43 +2569,27 @@ pub const Value = struct { .bytes => { const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, // No matter the index; all the elements are the same! .repeated => return val.castTag(.repeated).?.data, .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), + .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), + .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), .elem_ptr => { const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); + return data.array_ptr.elemValue(mod, index + data.index); }, .field_ptr => { const data = val.castTag(.field_ptr).?.data; @@ -2795,7 +2597,7 @@ pub const Value = struct { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); + return field_val.elemValue(mod, index); } else unreachable; }, @@ -2803,11 +2605,11 @@ pub const Value = struct { // to have only one possible value itself. .the_only_possible_value => return val, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), + .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, @@ -3004,7 +2806,7 @@ pub const Value = struct { /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) bool { + pub fn anyUndef(self: Value, mod: *Module) !bool { switch (self.ip_index) { .undef => return true, .none => switch (self.tag()) { @@ -3012,18 +2814,16 @@ pub const Value = struct { const payload = self.castTag(.slice).?; const len = payload.data.len.toUnsignedInt(mod); - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; + for (0..len) |i| { + const elem_val = try payload.data.ptr.elemValue(mod, i); + if (try elem_val.anyUndef(mod)) return true; } }, .aggregate => { const payload = self.castTag(.aggregate).?; for (payload.data) |val| { - if (val.anyUndef(mod)) return true; + if (try val.anyUndef(mod)) return true; } }, else => {}, @@ -3036,35 +2836,37 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + pub fn isNull(val: Value, mod: *const Module) bool { + return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, - .null_value => true, - .none => switch (self.tag()) { + + .null_value, + .zero, + .zero_usize, + .zero_u8, + => true, + + .none => switch (val.tag()) { .opt_payload => false, // If it's not one of those two tags then it must be a C pointer value, // in which case the value 0 is null and other values are non-null. - .zero, - .the_only_possible_value, - => true, - - .one => false, - - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero(mod).compare(.eq), + .the_only_possible_value => true, .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, else => false, }, - else => false, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.eqZero(), + inline .u64, .i64 => |x| x == 0, + }, + else => unreachable, + }, }; } @@ -3078,17 +2880,13 @@ pub const Value = struct { .unreachable_value => unreachable, .none => switch (self.tag()) { .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + .eu_payload => null, + .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, - - else => null, + else => unreachable, }, - else => null, + else => unreachable, }; } @@ -3147,10 +2945,10 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intToFloatScalar(elem_val, arena, scalar_ty, mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3162,24 +2960,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, + .the_only_possible_value => return Value.zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3198,7 +2979,16 @@ pub const Value = struct { }, else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return floatToValue(float, arena, float_ty, target); + }, + inline .u64, .i64 => |x| intToFloatInner(x, arena, float_ty, target), + }, + else => unreachable, + }, } } @@ -3238,22 +3028,6 @@ pub const Value = struct { wrapped_result: Value, }; - pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return Value.Tag.int_u64.create(arena, x); - } else |_| { - return Value.Tag.int_big_positive.create(arena, big_int.limbs); - } - } else { - if (big_int.to(i64)) |x| { - return Value.Tag.int_i64.create(arena, x); - } else |_| { - return Value.Tag.int_big_negative.create(arena, big_int.limbs); - } - } - } - /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intAddSat( lhs: Value, @@ -3264,12 +3038,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3299,7 +3072,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3312,12 +3085,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3347,7 +3119,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intMulWithOverflow( @@ -3360,12 +3132,11 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3408,7 +3179,7 @@ pub const Value = struct { return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(arena, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -3423,10 +3194,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3467,10 +3236,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3510,7 +3277,7 @@ pub const Value = struct { ); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -3542,8 +3309,7 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); + const elem_val = try val.elemValue(mod, i); scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3572,7 +3338,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3580,19 +3346,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, mod); + return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3608,7 +3372,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3616,10 +3380,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3632,12 +3394,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - - const all_ones = if (ty.isSignedInt(mod)) - try Value.Tag.int_i64.create(arena, -1) - else - try ty.maxInt(arena, mod); - + const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3646,19 +3403,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, mod); + return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3673,27 +3428,26 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, mod); + return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3709,25 +3463,24 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, mod); + return intDivScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3749,25 +3502,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, mod); + return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3789,25 +3541,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, mod); + return intModScalar(lhs, rhs, ty, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3829,7 +3580,7 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_r.toConst()); + return mod.intValue_big(ty, result_r.toConst()); } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. @@ -3877,46 +3628,44 @@ pub const Value = struct { } pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatRemScalar(lhs, rhs, float_type, arena, target); + return floatRemScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val)); }, else => unreachable, @@ -3924,46 +3673,44 @@ pub const Value = struct { } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatModScalar(lhs, rhs, float_type, arena, target); + return floatModScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val)); }, else => unreachable, @@ -3973,19 +3720,18 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, mod); + return intMulScalar(lhs, rhs, ty, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4003,20 +3749,20 @@ pub const Value = struct { ); defer allocator.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, mod); + return intTruncScalar(val, ty, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4030,19 +3776,25 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - var bits_buf: Value.ElemValueBuffer = undefined; - const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + const elem_val = try val.elemValue(mod, i); + const bits_elem = try bits.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { + pub fn intTruncScalar( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: u16, + mod: *Module, + ) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; @@ -4055,25 +3807,24 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(val_bigint, signedness, bits); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, mod); + return shlScalar(lhs, rhs, ty, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4089,7 +3840,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlWithOverflow( @@ -4103,10 +3854,8 @@ pub const Value = struct { const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -4146,7 +3895,7 @@ pub const Value = struct { } return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(allocator, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -4160,10 +3909,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4195,7 +3942,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlTrunc( @@ -4208,10 +3955,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4235,19 +3980,18 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, mod); + return shrScalar(lhs, rhs, ty, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4275,7 +4019,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftRight(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn floatNeg( @@ -4284,31 +4028,30 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatNegScalar(val, float_type, arena, target); + return floatNegScalar(val, float_type, arena, mod); } pub fn floatNegScalar( val: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128, mod)), else => unreachable, } } @@ -4320,19 +4063,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivScalar(lhs, rhs, float_type, arena, target); + return floatDivScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivScalar( @@ -4340,32 +4080,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val / rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val / rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val / rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val / rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val / rhs_val); }, else => unreachable, @@ -4379,19 +4120,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivFloorScalar(lhs, rhs, float_type, arena, target); + return floatDivFloorScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivFloorScalar( @@ -4399,32 +4137,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val)); }, else => unreachable, @@ -4438,19 +4177,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivTruncScalar(lhs, rhs, float_type, arena, target); + return floatDivTruncScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivTruncScalar( @@ -4458,32 +4194,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val)); }, else => unreachable, @@ -4497,19 +4234,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatMulScalar(lhs, rhs, float_type, arena, target); + return floatMulScalar(lhs, rhs, float_type, arena, mod); } pub fn floatMulScalar( @@ -4517,32 +4251,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val * rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val * rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val * rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val * rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val * rhs_val); }, else => unreachable, @@ -4550,39 +4285,38 @@ pub const Value = struct { } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sqrtScalar(val, float_type, arena, target); + return sqrtScalar(val, float_type, arena, mod); } - pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sqrt(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sqrt(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sqrt(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sqrt(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sqrt(f)); }, else => unreachable, @@ -4590,39 +4324,38 @@ pub const Value = struct { } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sinScalar(val, float_type, arena, target); + return sinScalar(val, float_type, arena, mod); } - pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sin(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sin(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sin(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sin(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sin(f)); }, else => unreachable, @@ -4630,39 +4363,38 @@ pub const Value = struct { } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return cosScalar(val, float_type, arena, target); + return cosScalar(val, float_type, arena, mod); } - pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @cos(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @cos(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @cos(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @cos(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @cos(f)); }, else => unreachable, @@ -4670,39 +4402,38 @@ pub const Value = struct { } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return tanScalar(val, float_type, arena, target); + return tanScalar(val, float_type, arena, mod); } - pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @tan(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @tan(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @tan(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @tan(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @tan(f)); }, else => unreachable, @@ -4710,39 +4441,38 @@ pub const Value = struct { } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return expScalar(val, float_type, arena, target); + return expScalar(val, float_type, arena, mod); } - pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn expScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp(f)); }, else => unreachable, @@ -4750,39 +4480,38 @@ pub const Value = struct { } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return exp2Scalar(val, float_type, arena, target); + return exp2Scalar(val, float_type, arena, mod); } - pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp2(f)); }, else => unreachable, @@ -4790,39 +4519,38 @@ pub const Value = struct { } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return logScalar(val, float_type, arena, target); + return logScalar(val, float_type, arena, mod); } - pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn logScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log(f)); }, else => unreachable, @@ -4830,39 +4558,38 @@ pub const Value = struct { } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log2Scalar(val, float_type, arena, target); + return log2Scalar(val, float_type, arena, mod); } - pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log2(f)); }, else => unreachable, @@ -4870,39 +4597,38 @@ pub const Value = struct { } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log10Scalar(val, float_type, arena, target); + return log10Scalar(val, float_type, arena, mod); } - pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log10(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log10(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log10(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log10(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log10(f)); }, else => unreachable, @@ -4910,39 +4636,38 @@ pub const Value = struct { } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return fabsScalar(val, float_type, arena, target); + return fabsScalar(val, float_type, arena, mod); } - pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @fabs(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @fabs(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @fabs(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @fabs(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @fabs(f)); }, else => unreachable, @@ -4950,39 +4675,38 @@ pub const Value = struct { } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floorScalar(val, float_type, arena, target); + return floorScalar(val, float_type, arena, mod); } - pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @floor(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @floor(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @floor(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @floor(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @floor(f)); }, else => unreachable, @@ -4990,39 +4714,38 @@ pub const Value = struct { } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return ceilScalar(val, float_type, arena, target); + return ceilScalar(val, float_type, arena, mod); } - pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @ceil(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @ceil(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @ceil(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @ceil(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @ceil(f)); }, else => unreachable, @@ -5030,39 +4753,38 @@ pub const Value = struct { } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return roundScalar(val, float_type, arena, target); + return roundScalar(val, float_type, arena, mod); } - pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @round(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @round(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @round(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @round(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @round(f)); }, else => unreachable, @@ -5070,39 +4792,38 @@ pub const Value = struct { } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return truncScalar(val, float_type, arena, target); + return truncScalar(val, float_type, arena, mod); } - pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @trunc(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @trunc(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @trunc(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @trunc(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @trunc(f)); }, else => unreachable, @@ -5117,28 +4838,24 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var mulend1_buf: Value.ElemValueBuffer = undefined; - const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); - var mulend2_buf: Value.ElemValueBuffer = undefined; - const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf); - var addend_buf: Value.ElemValueBuffer = undefined; - const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); + const mulend1_elem = try mulend1.elemValue(mod, i); + const mulend2_elem = try mulend2.elemValue(mod, i); + const addend_elem = try addend.elemValue(mod, i); scalar.* = try mulAddScalar( float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, arena, - target, + mod, ); } return Value.Tag.aggregate.create(arena, result_data); } - return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target); + return mulAddScalar(float_type, mulend1, mulend2, addend, arena, mod); } pub fn mulAddScalar( @@ -5147,37 +4864,38 @@ pub const Value = struct { mulend2: Value, addend: Value, arena: Allocator, - target: Target, + mod: *const Module, ) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const m1 = mulend1.toFloat(f16); - const m2 = mulend2.toFloat(f16); - const a = addend.toFloat(f16); + const m1 = mulend1.toFloat(f16, mod); + const m2 = mulend2.toFloat(f16, mod); + const a = addend.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a)); }, 32 => { - const m1 = mulend1.toFloat(f32); - const m2 = mulend2.toFloat(f32); - const a = addend.toFloat(f32); + const m1 = mulend1.toFloat(f32, mod); + const m2 = mulend2.toFloat(f32, mod); + const a = addend.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a)); }, 64 => { - const m1 = mulend1.toFloat(f64); - const m2 = mulend2.toFloat(f64); - const a = addend.toFloat(f64); + const m1 = mulend1.toFloat(f64, mod); + const m2 = mulend2.toFloat(f64, mod); + const a = addend.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a)); }, 80 => { - const m1 = mulend1.toFloat(f80); - const m2 = mulend2.toFloat(f80); - const a = addend.toFloat(f80); + const m1 = mulend1.toFloat(f80, mod); + const m2 = mulend2.toFloat(f80, mod); + const a = addend.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a)); }, 128 => { - const m1 = mulend1.toFloat(f128); - const m2 = mulend2.toFloat(f128); - const a = addend.toFloat(f128); + const m1 = mulend1.toFloat(f128, mod); + const m2 = mulend2.toFloat(f128, mod); + const a = addend.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a)); }, else => unreachable, @@ -5186,13 +4904,14 @@ pub const Value = struct { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { + pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?Value { const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, // TODO: The writeToMemory function was originally created for the purpose // of comptime pointer casting. However, it is now additionally being used @@ -5206,11 +4925,7 @@ pub const Value = struct { for (byte_buffer[1..]) |byte| { if (byte != first_byte) return null; } - value_buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = first_byte, - }; - return initPayload(&value_buffer.base); + return try mod.intValue(Type.u8, first_byte); } pub fn isGenericPoison(val: Value) bool { @@ -5226,30 +4941,6 @@ pub const Value = struct { data: u32, }; - pub const U64 = struct { - base: Payload, - data: u64, - }; - - pub const I64 = struct { - base: Payload, - data: i64, - }; - - pub const BigInt = struct { - base: Payload, - data: []const std.math.big.Limb, - - pub fn asBigInt(self: BigInt) BigIntConst { - const positive = switch (self.base.tag) { - .int_big_positive => true, - .int_big_negative => false, - else => unreachable, - }; - return BigIntConst{ .limbs = self.data, .positive = positive }; - } - }; - pub const Function = struct { base: Payload, data: *Module.Fn, @@ -5452,12 +5143,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero = initTag(.zero); - pub const one = initTag(.one); - pub const negative_one: Value = .{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, - }; + pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; @@ -5515,8 +5203,3 @@ pub const Value = struct { } } }; - -var negative_one_payload: Value.Payload.I64 = .{ - .base = .{ .tag = .int_i64 }, - .data = -1, -}; |
