aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2022-12-19 16:13:12 +0200
committerVeikka Tuominen <git@vexu.eu>2022-12-19 17:01:44 +0200
commitee334aea801c71cbcc567b1d19be9c04d911beda (patch)
tree4134fa8f51d359fd638e7c2c92563b9d482ec718 /src
parent22d46e1d7753ea2a9accc180e8613206120739c5 (diff)
downloadzig-ee334aea801c71cbcc567b1d19be9c04d911beda.tar.gz
zig-ee334aea801c71cbcc567b1d19be9c04d911beda.zip
value: remove `indexVectorlike`
Vectors can represented in all the same values as arrays so this was never a valid shortcut.
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig220
-rw-r--r--src/value.zig441
2 files changed, 434 insertions, 227 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index e8ed010a7c..02f6b24e2d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -9225,7 +9225,7 @@ fn intCast(
// If the destination type is signed, then we need to double its
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
- const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, target);
+ const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod);
break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty);
} else dest_max_val;
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
@@ -11683,9 +11683,11 @@ fn zirShl(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.gte, bit_value, target)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
@@ -11701,9 +11703,11 @@ fn zirShl(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
@@ -11726,7 +11730,7 @@ fn zirShl(
const val = switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, target);
+ const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod);
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
break :val shifted.wrapped_result;
}
@@ -11737,14 +11741,14 @@ fn zirShl(
},
.shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
- try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, target),
+ try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod),
.shl => if (scalar_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
- try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, target),
+ try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod),
else => unreachable,
};
@@ -11867,9 +11871,11 @@ fn zirShr(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.gte, bit_value, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.gte, bit_value, target)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
@@ -11885,9 +11891,11 @@ fn zirShr(
if (rhs_ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
- if (rhs_val.indexVectorlike(i).compareHetero(.lt, Value.zero, target)) {
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
+ if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_val.indexVectorlike(i).fmtValue(scalar_ty, sema.mod),
+ rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
@@ -11903,12 +11911,12 @@ fn zirShr(
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
- const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target);
+ const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod);
if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
- const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, target);
+ const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod);
return sema.addConstant(lhs_ty, val);
} else {
break :rs lhs_src;
@@ -11992,7 +12000,6 @@ fn zirBitwise(
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
- const target = sema.mod.getTarget();
if (!is_int) {
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) });
@@ -12004,9 +12011,9 @@ fn zirBitwise(
if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| {
const result_val = switch (air_tag) {
- .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, target),
- .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, target),
- .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, target),
+ .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod),
+ .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod),
+ .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod),
else => unreachable,
};
return sema.addConstant(resolved_type, result_val);
@@ -12033,7 +12040,6 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand = try sema.resolveInst(inst_data.operand);
const operand_type = sema.typeOf(operand);
const scalar_type = operand_type.scalarType();
- const target = sema.mod.getTarget();
if (scalar_type.zigTypeTag() != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
@@ -12050,14 +12056,14 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf);
- elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target);
+ elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
return sema.addConstant(
operand_type,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
- const result_val = try val.bitwiseNot(operand_type, sema.arena, target);
+ const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod);
return sema.addConstant(operand_type, result_val);
}
}
@@ -12586,8 +12592,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// We handle float negation here to ensure negative zero is represented in the bits.
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty);
- const target = sema.mod.getTarget();
- return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, target));
+ return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
@@ -12679,7 +12684,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -12690,7 +12694,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// If lhs % rhs is 0, it doesn't matter.
const lhs_val = maybe_lhs_val orelse unreachable;
const rhs_val = maybe_rhs_val orelse unreachable;
- const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target) catch unreachable;
+ const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
@@ -12766,7 +12770,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
@@ -12775,7 +12779,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else {
@@ -12839,7 +12843,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -12884,24 +12887,24 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target);
+ const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
}
return sema.addConstant(resolved_type, res);
} else {
- const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target);
+ const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
return sema.addConstant(
resolved_type,
- try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -13004,7 +13007,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13064,12 +13066,12 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (is_int) {
return sema.addConstant(
resolved_type,
- try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -13121,7 +13123,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13178,7 +13179,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
@@ -13187,7 +13188,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
@@ -13365,7 +13366,6 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13442,7 +13442,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
return sema.addConstant(
resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
@@ -13471,7 +13471,11 @@ fn intRem(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -13541,7 +13545,6 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13573,7 +13576,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod),
);
}
break :rs lhs_src;
@@ -13597,7 +13600,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -13644,7 +13647,6 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13700,7 +13702,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -13739,7 +13741,6 @@ fn zirOverflowArithmetic(
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const mod = sema.mod;
- const target = mod.getTarget();
// Note, the types of lhs/rhs (also for shifting)/ptr are already correct as ensured by astgen.
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
@@ -13839,7 +13840,7 @@ fn zirOverflowArithmetic(
break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
}
- const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, target);
+ const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = overflowed, .wrapped = wrapped };
@@ -13866,7 +13867,7 @@ fn zirOverflowArithmetic(
break :result .{ .overflowed = try sema.addConstUndef(overflowed_ty), .wrapped = try sema.addConstUndef(dest_ty) };
}
- const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, target);
+ const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod);
const overflowed = try sema.addConstant(overflowed_ty, result.overflowed);
const wrapped = try sema.addConstant(dest_ty, result.wrapped_result);
break :result .{ .overflowed = overflowed, .wrapped = wrapped };
@@ -13979,13 +13980,12 @@ fn analyzeArithmetic(
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
const mod = sema.mod;
- const target = mod.getTarget();
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
switch (zir_tag) {
.add => {
- // For integers:
+ // For integers:intAddSat
// If either of the operands are zero, then the other operand is
// returned, even if it is undefined.
// If either of the operands are undefined, it's a compile error
@@ -14080,7 +14080,7 @@ fn analyzeArithmetic(
const val = if (scalar_tag == .ComptimeInt)
try sema.intAdd(lhs_val, rhs_val, resolved_type)
else
- try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
@@ -14177,7 +14177,7 @@ fn analyzeArithmetic(
const val = if (scalar_tag == .ComptimeInt)
try sema.intSub(lhs_val, rhs_val, resolved_type)
else
- try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
@@ -14258,7 +14258,7 @@ fn analyzeArithmetic(
}
}
if (is_int) {
- const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target);
+ const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index);
@@ -14267,7 +14267,7 @@ fn analyzeArithmetic(
} else {
return sema.addConstant(
resolved_type,
- try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod),
);
}
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
@@ -14311,7 +14311,7 @@ fn analyzeArithmetic(
}
return sema.addConstant(
resolved_type,
- try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, target),
+ try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod),
);
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
@@ -14353,9 +14353,9 @@ fn analyzeArithmetic(
}
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intMul(rhs_val, resolved_type, sema.arena, target)
+ try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod)
else
- try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, target);
+ try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
@@ -17947,7 +17947,7 @@ fn zirUnaryMath(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptime eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value,
+ comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -17956,7 +17956,6 @@ fn zirUnaryMath(
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = sema.typeOf(operand);
- const target = sema.mod.getTarget();
switch (operand_ty.zigTypeTag()) {
.ComptimeFloat, .Float => {},
@@ -17983,7 +17982,7 @@ fn zirUnaryMath(
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try eval(elem_val, scalar_ty, sema.arena, target);
+ elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
return sema.addConstant(
result_ty,
@@ -17998,7 +17997,7 @@ fn zirUnaryMath(
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
if (operand_val.isUndef())
return sema.addConstUndef(operand_ty);
- const result_val = try eval(operand_val, operand_ty, sema.arena, target);
+ const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod);
return sema.addConstant(operand_ty, result_val);
}
@@ -19220,8 +19219,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
_ = try sema.checkIntType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- const target = sema.mod.getTarget();
- const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, target, sema);
+ const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema);
return sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag() == .ComptimeFloat) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known");
@@ -19547,14 +19545,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (!is_vector) {
return sema.addConstant(
dest_ty,
- try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, target),
+ try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod),
);
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, target);
+ elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
return sema.addConstant(
dest_ty,
@@ -20523,13 +20521,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
while (i < vec_len) : (i += 1) {
const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf);
switch (operation) {
- .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, target),
- .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, target),
- .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, target),
+ .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod),
+ .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod),
+ .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod),
.Min => accum = accum.numberMin(elem_val, target),
.Max => accum = accum.numberMax(elem_val, target),
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
- .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, target),
+ .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod),
}
}
return sema.addConstant(scalar_ty, accum);
@@ -20925,10 +20923,10 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Xchg => operand_val,
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
- .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target),
- .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target),
- .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target),
- .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target),
+ .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod),
+ .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod),
+ .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod),
+ .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod),
.Max => stored_val.numberMax (operand_val, target),
.Min => stored_val.numberMin (operand_val, target),
// zig fmt: on
@@ -21001,8 +20999,6 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src);
const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src);
- const target = sema.mod.getTarget();
-
const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1);
const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2);
const maybe_addend = try sema.resolveMaybeUndefVal(addend);
@@ -21018,7 +21014,7 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (maybe_addend) |addend_val| {
if (addend_val.isUndef()) return sema.addConstUndef(ty);
- const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, target);
+ const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
return sema.addConstant(ty, result_val);
} else {
break :rs addend_src;
@@ -24830,7 +24826,7 @@ fn coerceExtra(
}
break :int;
};
- const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, target, sema);
+ const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty, mod)) {
@@ -32263,7 +32259,11 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32297,7 +32297,11 @@ fn numberAddWrap(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.numberAddWrapScalar(lhs_elem, rhs_elem, ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32334,7 +32338,11 @@ fn intSub(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i));
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32368,7 +32376,11 @@ fn numberSubWrap(
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.numberSubWrapScalar(lhs_elem, rhs_elem, ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32405,7 +32417,11 @@ fn floatAdd(
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32458,7 +32474,11 @@ fn floatSub(
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32512,7 +32532,11 @@ fn intSubWithOverflow(
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try sema.intSubWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
@@ -32562,7 +32586,9 @@ fn floatToInt(
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sema.floatToIntScalar(block, src, val.indexVectorlike(i), elem_ty, int_ty.scalarType());
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
+ scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -32857,7 +32883,11 @@ fn intAddWithOverflow(
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try sema.intAddWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
@@ -32909,7 +32939,11 @@ fn compareAll(
if (ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
- if (!(try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()))) {
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) {
return false;
}
}
@@ -32953,7 +32987,11 @@ fn compareVector(
assert(ty.zigTypeTag() == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const res_bool = try sema.compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType());
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType());
scalar.* = Value.makeBool(res_bool);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
diff --git a/src/value.zig b/src/value.zig
index 96242331f9..2ef18e6198 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -2044,7 +2044,11 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
- if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) {
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) {
return false;
}
}
@@ -2793,27 +2797,6 @@ pub const Value = extern union {
};
}
- /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`.
- /// Some scalar values are considered vector-like to avoid needing to allocate
- /// a new `repeated` each time a constant is used.
- pub fn indexVectorlike(val: Value, index: usize) Value {
- return switch (val.tag()) {
- .aggregate => val.castTag(.aggregate).?.data[index],
-
- .repeated => val.castTag(.repeated).?.data,
- // These values will implicitly be treated as `repeated`.
- .zero,
- .one,
- .bool_false,
- .bool_true,
- .int_i64,
- .int_u64,
- => val,
-
- else => unreachable,
- };
- }
-
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value {
@@ -2889,18 +2872,21 @@ pub const Value = extern union {
// to have only one possible value itself.
.the_only_possible_value => return val,
- // pointer to integer casted to pointer of array
- .int_u64, .int_i64 => {
- assert(index == 0);
- return val;
- },
-
.opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer),
.eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer),
.opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer),
.eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer),
+ // These values will implicitly be treated as `repeated`.
+ .zero,
+ .one,
+ .bool_false,
+ .bool_true,
+ .int_i64,
+ .int_u64,
+ => return val,
+
else => unreachable,
}
}
@@ -3172,18 +3158,21 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value {
- return intToFloatAdvanced(val, arena, int_ty, float_ty, target, null) catch |err| switch (err) {
+ pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value {
+ return intToFloatAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => unreachable,
};
}
- pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value {
+ pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
+ const target = mod.getTarget();
if (int_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), target, opt_sema);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3289,12 +3278,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3333,12 +3327,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3376,13 +3375,18 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try intMulWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
@@ -3435,16 +3439,20 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return numberMulWrapScalar(lhs, rhs, ty, arena, target);
+ return numberMulWrapScalar(lhs, rhs, ty, arena, mod);
}
/// Supports both floats and ints; handles undefined.
@@ -3453,19 +3461,19 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intMul(lhs, rhs, ty, arena, target);
+ return intMul(lhs, rhs, ty, arena, mod);
}
if (ty.isAnyFloat()) {
- return floatMul(lhs, rhs, ty, arena, target);
+ return floatMul(lhs, rhs, ty, arena, mod);
}
- const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target);
+ const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod);
return overflow_result.wrapped_result;
}
@@ -3475,12 +3483,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3547,11 +3560,14 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3583,11 +3599,16 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3615,37 +3636,46 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return bitwiseNandScalar(lhs, rhs, ty, arena, target);
+ return bitwiseNandScalar(lhs, rhs, ty, arena, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- const anded = try bitwiseAnd(lhs, rhs, ty, arena, target);
+ const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
const all_ones = if (ty.isSignedInt())
try Value.Tag.int_i64.create(arena, -1)
else
- try ty.maxInt(arena, target);
+ try ty.maxInt(arena, mod.getTarget());
- return bitwiseXor(anded, all_ones, ty, arena, target);
+ return bitwiseXor(anded, all_ones, ty, arena, mod);
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3672,11 +3702,16 @@ pub const Value = extern union {
}
/// operands must be (vectors of) integers; handles undefined scalars.
- pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3703,11 +3738,16 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3739,11 +3779,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3775,11 +3820,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3846,11 +3896,16 @@ pub const Value = extern union {
};
}
- pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3888,11 +3943,16 @@ pub const Value = extern union {
}
}
- pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3930,11 +3990,16 @@ pub const Value = extern union {
}
}
- pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3962,11 +4027,14 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value {
+ pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -3980,12 +4048,17 @@ pub const Value = extern union {
allocator: Allocator,
signedness: std.builtin.Signedness,
bits: Value,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt(target)), target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ var bits_buf: Value.ElemValueBuffer = undefined;
+ const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4008,11 +4081,16 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4043,13 +4121,18 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
allocator: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- const of_math_result = try shlWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target);
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
@@ -4097,12 +4180,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4141,16 +4229,20 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return shlTruncScalar(lhs, rhs, ty, arena, target);
+ return shlTruncScalar(lhs, rhs, ty, arena, mod);
}
pub fn shlTruncScalar(
@@ -4158,19 +4250,24 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
- const shifted = try lhs.shl(rhs, ty, arena, target);
- const int_info = ty.intInfo(target);
- const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, target);
+ const shifted = try lhs.shl(rhs, ty, arena, mod);
+ const int_info = ty.intInfo(mod.getTarget());
+ const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod);
return truncated;
}
- pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
@@ -4212,12 +4309,15 @@ pub const Value = extern union {
val: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatNegScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4245,12 +4345,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4299,12 +4404,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4353,12 +4463,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4407,12 +4522,17 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var lhs_buf: Value.ElemValueBuffer = undefined;
+ var rhs_buf: Value.ElemValueBuffer = undefined;
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
+ scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4456,11 +4576,14 @@ pub const Value = extern union {
}
}
- pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4493,11 +4616,14 @@ pub const Value = extern union {
}
}
- pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4530,11 +4656,14 @@ pub const Value = extern union {
}
}
- pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4567,11 +4696,14 @@ pub const Value = extern union {
}
}
- pub fn tan(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try tanScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4604,11 +4736,14 @@ pub const Value = extern union {
}
}
- pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4641,11 +4776,14 @@ pub const Value = extern union {
}
}
- pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4678,11 +4816,14 @@ pub const Value = extern union {
}
}
- pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4715,11 +4856,14 @@ pub const Value = extern union {
}
}
- pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4752,11 +4896,14 @@ pub const Value = extern union {
}
}
- pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4789,11 +4936,14 @@ pub const Value = extern union {
}
}
- pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4826,11 +4976,14 @@ pub const Value = extern union {
}
}
- pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4863,11 +5016,14 @@ pub const Value = extern union {
}
}
- pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4900,11 +5056,14 @@ pub const Value = extern union {
}
}
- pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4937,11 +5096,14 @@ pub const Value = extern union {
}
}
- pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
- scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ var buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(mod, i, &buf);
+ scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4980,16 +5142,23 @@ pub const Value = extern union {
mulend2: Value,
addend: Value,
arena: Allocator,
- target: Target,
- ) Allocator.Error!Value {
+ mod: *Module,
+ ) !Value {
+ const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
+ var mulend1_buf: Value.ElemValueBuffer = undefined;
+ const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);
+ var mulend2_buf: Value.ElemValueBuffer = undefined;
+ const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf);
+ var addend_buf: Value.ElemValueBuffer = undefined;
+ const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf);
scalar.* = try mulAddScalar(
float_type.scalarType(),
- mulend1.indexVectorlike(i),
- mulend2.indexVectorlike(i),
- addend.indexVectorlike(i),
+ mulend1_elem,
+ mulend2_elem,
+ addend_elem,
arena,
target,
);