aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig513
-rw-r--r--src/value.zig771
2 files changed, 1006 insertions, 278 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 20622cb98a..500dec5246 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2105,7 +2105,7 @@ fn zirEnumDecl(
});
} else if (any_values) {
const tag_val = if (last_tag_val) |val|
- try val.intAdd(Value.one, sema.arena)
+ try val.intAdd(Value.one, enum_obj.tag_ty, sema.arena)
else
Value.zero;
last_tag_val = tag_val;
@@ -8192,14 +8192,22 @@ fn zirShl(
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const target = sema.mod.getTarget();
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+
+ const scalar_ty = lhs_ty.scalarType();
+ const scalar_rhs_ty = rhs_ty.scalarType();
// TODO coerce rhs if air_tag is not shl_sat
- const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, sema.typeOf(rhs));
+ const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs);
@@ -8213,35 +8221,31 @@ fn zirShl(
}
}
- const lhs_ty = sema.typeOf(lhs);
- const rhs_ty = sema.typeOf(rhs);
- const target = sema.mod.getTarget();
-
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse break :rs rhs_src;
const val = switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shl(rhs_val, sema.arena);
- if (lhs_ty.zigTypeTag() == .ComptimeInt) {
+ const shifted = try lhs_val.shl(rhs_val, lhs_ty, sema.arena);
+ if (scalar_ty.zigTypeTag() == .ComptimeInt) {
break :val shifted;
}
- const int_info = lhs_ty.intInfo(target);
- const truncated = try shifted.intTrunc(sema.arena, int_info.signedness, int_info.bits);
- if (truncated.compareHetero(.eq, shifted)) {
+ const int_info = scalar_ty.intInfo(target);
+ const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits);
+ if (truncated.compare(.eq, shifted, lhs_ty)) {
break :val shifted;
}
return sema.addConstUndef(lhs_ty);
},
- .shl_sat => if (lhs_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, sema.arena)
+ .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena)
else
try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, target),
- .shl => if (lhs_ty.zigTypeTag() == .ComptimeInt)
- try lhs_val.shl(rhs_val, sema.arena)
+ .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena)
else
try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, target),
@@ -8256,7 +8260,7 @@ fn zirShl(
const new_rhs = if (air_tag == .shl_sat) rhs: {
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
if (rhs_is_comptime_int or
- rhs_ty.intInfo(target).bits > lhs_ty.intInfo(target).bits)
+ scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits)
{
const max_int = try sema.addConstant(
lhs_ty,
@@ -8283,15 +8287,18 @@ fn zirShr(
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const runtime_src = if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| rs: {
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
- const lhs_ty = sema.typeOf(lhs);
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(lhs_ty);
}
@@ -8301,13 +8308,12 @@ fn zirShr(
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
- const bits = @intCast(u16, rhs_val.toUnsignedInt());
- const truncated = try lhs_val.intTrunc(sema.arena, .unsigned, bits);
+ const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val);
if (!truncated.compareWithZero(.eq)) {
return sema.addConstUndef(lhs_ty);
}
}
- const val = try lhs_val.shr(rhs_val, sema.arena);
+ const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena);
return sema.addConstant(lhs_ty, val);
} else {
// Even if lhs is not comptime known, we can still deduce certain things based
@@ -8342,32 +8348,15 @@ fn zirBitwise(
const rhs = sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } });
- const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
- const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
-
- const scalar_type = if (resolved_type.zigTypeTag() == .Vector)
- resolved_type.elemType()
- else
- resolved_type;
-
+ const scalar_type = resolved_type.scalarType();
const scalar_tag = scalar_type.zigTypeTag();
- if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) {
- if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) {
- return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{
- lhs_ty.arrayLen(),
- rhs_ty.arrayLen(),
- });
- }
- } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) {
- return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{
- lhs_ty,
- rhs_ty,
- });
- }
+ const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
+ const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
@@ -8377,16 +8366,13 @@ fn zirBitwise(
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
- if (resolved_type.zigTypeTag() == .Vector) {
- return sema.fail(block, src, "TODO implement zirBitwise for vectors at comptime", .{});
- }
const result_val = switch (air_tag) {
- .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena),
- .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena),
- .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena),
+ .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena),
+ .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena),
+ .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena),
else => unreachable,
};
- return sema.addConstant(scalar_type, result_val);
+ return sema.addConstant(resolved_type, result_val);
}
}
@@ -8413,9 +8399,9 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
if (val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(operand_type);
} else if (operand_type.zigTypeTag() == .Vector) {
- const vec_len = try sema.usizeCast(block, operand_src, operand_type.arrayLen());
+ const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
@@ -8427,8 +8413,8 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
- const result_val = try val.bitwiseNot(scalar_type, sema.arena, target);
- return sema.addConstant(scalar_type, result_val);
+ const result_val = try val.bitwiseNot(operand_type, sema.arena, target);
+ return sema.addConstant(operand_type, result_val);
}
}
@@ -8780,8 +8766,19 @@ fn zirNegate(
const src = inst_data.src();
const lhs_src = src;
const rhs_src = src; // TODO better source location
- const lhs = sema.resolveInst(.zero);
+
const rhs = sema.resolveInst(inst_data.operand);
+ const rhs_ty = sema.typeOf(rhs);
+ const rhs_scalar_ty = rhs_ty.scalarType();
+
+ if (tag_override == .sub and rhs_scalar_ty.isUnsignedInt()) {
+ return sema.fail(block, src, "negation of type '{}'", .{rhs_ty});
+ }
+
+ const lhs = if (rhs_ty.zigTypeTag() == .Vector)
+ try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero))
+ else
+ sema.resolveInst(.zero);
return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src);
}
@@ -8999,18 +8996,8 @@ fn analyzeArithmetic(
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
- if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) {
- if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) {
- return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{
- lhs_ty.arrayLen(), rhs_ty.arrayLen(),
- });
- }
- return sema.fail(block, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{});
- } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) {
- return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{
- lhs_ty, rhs_ty,
- });
- }
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) {
.One, .Slice => {},
.Many, .C => {
@@ -9033,15 +9020,13 @@ fn analyzeArithmetic(
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]LazySrcLoc{ lhs_src, rhs_src },
});
+
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const scalar_type = if (resolved_type.zigTypeTag() == .Vector)
- resolved_type.elemType()
- else
- resolved_type;
-
- const scalar_tag = scalar_type.zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType();
+ const rhs_scalar_ty = rhs_ty.scalarType();
+ const scalar_tag = resolved_type.scalarType().zigTypeTag();
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat;
@@ -9075,7 +9060,7 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (rhs_val.compareWithZero(.eq)) {
@@ -9087,19 +9072,19 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intAdd(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intAdd(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatAdd(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .add };
@@ -9116,15 +9101,15 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.numberAddWrap(rhs_val, resolved_type, sema.arena, target),
);
} else break :rs .{ .src = lhs_src, .air_tag = .addwrap };
} else break :rs .{ .src = rhs_src, .air_tag = .addwrap };
@@ -9140,18 +9125,18 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intAdd(rhs_val, sema.arena)
+ try lhs_val.intAdd(rhs_val, resolved_type, sema.arena)
else
- try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target);
+ try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target);
- return sema.addConstant(scalar_type, val);
+ return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
} else break :rs .{ .src = rhs_src, .air_tag = .add_sat };
},
@@ -9168,7 +9153,7 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (rhs_val.compareWithZero(.eq)) {
@@ -9180,19 +9165,19 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intSub(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intSub(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatSub(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatSub(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .sub };
@@ -9204,7 +9189,7 @@ fn analyzeArithmetic(
// If either of the operands are undefined, the result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
return casted_lhs;
@@ -9212,12 +9197,12 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.numberSubWrap(rhs_val, resolved_type, sema.arena, target),
);
} else break :rs .{ .src = rhs_src, .air_tag = .subwrap };
} else break :rs .{ .src = lhs_src, .air_tag = .subwrap };
@@ -9228,7 +9213,7 @@ fn analyzeArithmetic(
// If either of the operands are undefined, result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
return casted_lhs;
@@ -9236,15 +9221,15 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intSub(rhs_val, sema.arena)
+ try lhs_val.intSub(rhs_val, resolved_type, sema.arena)
else
- try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target);
+ try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target);
- return sema.addConstant(scalar_type, val);
+ return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
} else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
},
@@ -9274,7 +9259,7 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
}
}
@@ -9288,27 +9273,27 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
- return sema.addConstUndef(scalar_type);
+ if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) {
+ return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intDiv(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intDiv(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
);
}
} else {
@@ -9349,7 +9334,7 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
}
}
@@ -9363,27 +9348,27 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
- return sema.addConstUndef(scalar_type);
+ if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) {
+ return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intDiv(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intDiv(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
@@ -9412,7 +9397,7 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
}
}
@@ -9426,27 +9411,27 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
- return sema.addConstUndef(scalar_type);
+ if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) {
+ return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intDivFloor(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_floor };
@@ -9474,7 +9459,7 @@ fn analyzeArithmetic(
return sema.failWithUseOfUndef(block, rhs_src);
} else {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
}
}
@@ -9491,14 +9476,14 @@ fn analyzeArithmetic(
if (is_int) {
// TODO: emit compile error if there is a remainder
return sema.addConstant(
- scalar_type,
- try lhs_val.intDiv(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intDiv(rhs_val, resolved_type, sema.arena),
);
} else {
// TODO: emit compile error if there is a remainder
return sema.addConstant(
- scalar_type,
- try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = .div_exact };
@@ -9516,9 +9501,9 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (lhs_val.compare(.eq, Value.one, lhs_ty)) {
return casted_rhs;
}
}
@@ -9528,13 +9513,13 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (rhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (rhs_val.compare(.eq, Value.one, rhs_ty)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -9542,18 +9527,18 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
}
if (is_int) {
return sema.addConstant(
- scalar_type,
- try lhs_val.intMul(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intMul(rhs_val, resolved_type, sema.arena),
);
} else {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatMul(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, target),
);
}
} else break :rs .{ .src = lhs_src, .air_tag = .mul };
@@ -9567,30 +9552,30 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (lhs_val.compare(.eq, Value.one, lhs_ty)) {
return casted_rhs;
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (rhs_val.compare(.eq, Value.one, rhs_ty)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
return sema.addConstant(
- scalar_type,
- try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, target),
);
} else break :rs .{ .src = lhs_src, .air_tag = .mulwrap };
} else break :rs .{ .src = rhs_src, .air_tag = .mulwrap };
@@ -9603,34 +9588,34 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef()) {
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (lhs_val.compare(.eq, Value.one, lhs_ty)) {
return casted_rhs;
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (rhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ if (rhs_val.compare(.eq, Value.one, rhs_ty)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intMul(rhs_val, sema.arena)
+ try lhs_val.intMul(rhs_val, resolved_type, sema.arena)
else
- try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target);
+ try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, target);
- return sema.addConstant(scalar_type, val);
+ return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
} else break :rs .{ .src = rhs_src, .air_tag = .mul_sat };
},
@@ -9654,9 +9639,9 @@ fn analyzeArithmetic(
return sema.failWithUseOfUndef(block, lhs_src);
}
if (lhs_val.compareWithZero(.eq)) {
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- } else if (lhs_ty.isSignedInt()) {
+ } else if (lhs_scalar_ty.isSignedInt()) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
if (maybe_rhs_val) |rhs_val| {
@@ -9667,7 +9652,7 @@ fn analyzeArithmetic(
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
- const rem_result = try lhs_val.intRem(rhs_val, sema.arena);
+ const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena);
// If this answer could possibly be different by doing `intMod`,
// we must emit a compile error. Otherwise, it's OK.
if (rhs_val.compareWithZero(.lt) != lhs_val.compareWithZero(.lt) and
@@ -9681,12 +9666,12 @@ fn analyzeArithmetic(
}
if (lhs_val.compareWithZero(.lt)) {
// Negative
- return sema.addConstant(scalar_type, Value.zero);
+ return sema.addConstant(resolved_type, Value.zero);
}
- return sema.addConstant(scalar_type, rem_result);
+ return sema.addConstant(resolved_type, rem_result);
}
break :rs .{ .src = lhs_src, .air_tag = .rem };
- } else if (rhs_ty.isSignedInt()) {
+ } else if (rhs_scalar_ty.isSignedInt()) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
} else {
break :rs .{ .src = rhs_src, .air_tag = .rem };
@@ -9708,8 +9693,8 @@ fn analyzeArithmetic(
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
return sema.addConstant(
- scalar_type,
- try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
);
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
@@ -9745,8 +9730,8 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.intRem(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intRem(rhs_val, resolved_type, sema.arena),
);
}
break :rs .{ .src = lhs_src, .air_tag = .rem };
@@ -9765,12 +9750,12 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
);
} else break :rs .{ .src = rhs_src, .air_tag = .rem };
} else break :rs .{ .src = lhs_src, .air_tag = .rem };
@@ -9802,8 +9787,8 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.intMod(rhs_val, sema.arena),
+ resolved_type,
+ try lhs_val.intMod(rhs_val, resolved_type, sema.arena),
);
}
break :rs .{ .src = lhs_src, .air_tag = .mod };
@@ -9822,12 +9807,12 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- return sema.addConstUndef(scalar_type);
+ return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
- scalar_type,
- try lhs_val.floatMod(rhs_val, scalar_type, sema.arena, target),
+ resolved_type,
+ try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target),
);
} else break :rs .{ .src = rhs_src, .air_tag = .mod };
} else break :rs .{ .src = lhs_src, .air_tag = .mod };
@@ -10178,6 +10163,11 @@ fn analyzeCmp(
) CompileError!Air.Inst.Ref {
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+
+ if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) {
+ return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
+ }
if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) {
// This operation allows any combination of integer and float types, regardless of the
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
@@ -10212,6 +10202,12 @@ fn cmpSelf(
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
+ if (resolved_type.zigTypeTag() == .Vector) {
+ const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool");
+ const cmp_val = try lhs_val.compareVector(op, rhs_val, resolved_type, sema.arena);
+ return sema.addConstant(result_ty, cmp_val);
+ }
+
if (lhs_val.compare(op, rhs_val, resolved_type)) {
return Air.Inst.Ref.bool_true;
} else {
@@ -10237,16 +10233,12 @@ fn cmpSelf(
}
};
try sema.requireRuntimeBlock(block, runtime_src);
-
- const tag: Air.Inst.Tag = switch (op) {
- .lt => .cmp_lt,
- .lte => .cmp_lte,
- .eq => .cmp_eq,
- .gte => .cmp_gte,
- .gt => .cmp_gt,
- .neq => .cmp_neq,
- };
- // TODO handle vectors
+ if (resolved_type.zigTypeTag() == .Vector) {
+ const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool");
+ const result_ty_ref = try sema.addType(result_ty);
+ return block.addCmpVector(casted_lhs, casted_rhs, op, result_ty_ref);
+ }
+ const tag = Air.Inst.Tag.fromCmpOp(op);
return block.addBinOp(tag, casted_lhs, casted_rhs);
}
@@ -11367,7 +11359,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
const elem_ty = operand.elemType2();
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
return Type.Tag.vector.create(sema.arena, .{
- .len = operand.arrayLen(),
+ .len = operand.vectorLen(),
.elem_type = log2_elem_ty,
});
},
@@ -13298,7 +13290,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
- const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) {
+ const result_val = val.floatToInt(sema.arena, operand_ty, dest_ty, target) catch |err| switch (err) {
error.FloatCannotFit => {
return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty });
},
@@ -13325,7 +13317,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
- const result_val = try val.intToFloat(sema.arena, dest_ty, target);
+ const result_val = try val.intToFloat(sema.arena, operand_ty, dest_ty, target);
return sema.addConstant(dest_ty, result_val);
}
@@ -13535,14 +13527,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (!is_vector) {
return sema.addConstant(
dest_ty,
- try val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits),
+ try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits),
);
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(i, &elem_buf);
- elem.* = try elem_val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits);
+ elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits);
}
return sema.addConstant(
dest_ty,
@@ -14097,13 +14089,40 @@ fn checkSimdBinOp(
) CompileError!SimdBinOp {
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
- var vec_len: ?usize = null;
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+ var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null;
+ const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
+ const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src);
+ const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src);
+
+ return SimdBinOp{
+ .len = vec_len,
+ .lhs = lhs,
+ .rhs = rhs,
+ .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs),
+ .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs),
+ .result_ty = result_ty,
+ .scalar_ty = result_ty.scalarType(),
+ };
+}
+
+fn checkVectorizableBinaryOperands(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ lhs_src: LazySrcLoc,
+ rhs_src: LazySrcLoc,
+) CompileError!void {
+ const lhs_zig_ty_tag = lhs_ty.zigTypeTag();
+ const rhs_zig_ty_tag = rhs_ty.zigTypeTag();
if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) {
- const lhs_len = lhs_ty.arrayLen();
- const rhs_len = rhs_ty.arrayLen();
+ const lhs_len = lhs_ty.vectorLen();
+ const rhs_len = rhs_ty.vectorLen();
if (lhs_len != rhs_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "vector length mismatch", .{});
@@ -14114,7 +14133,6 @@ fn checkSimdBinOp(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- vec_len = try sema.usizeCast(block, lhs_src, lhs_len);
} else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{
@@ -14132,21 +14150,6 @@ fn checkSimdBinOp(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
- .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
- });
- const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src);
- const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src);
-
- return SimdBinOp{
- .len = vec_len,
- .lhs = lhs,
- .rhs = rhs,
- .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs),
- .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs),
- .result_ty = result_ty,
- .scalar_ty = result_ty.scalarType(),
- };
}
fn resolveExportOptions(
@@ -14376,9 +14379,9 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
while (i < vec_len) : (i += 1) {
const elem_val = operand_val.elemValueBuffer(i, &elem_buf);
switch (operation) {
- .And => accum = try accum.bitwiseAnd(elem_val, sema.arena),
- .Or => accum = try accum.bitwiseOr(elem_val, sema.arena),
- .Xor => accum = try accum.bitwiseXor(elem_val, sema.arena),
+ .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena),
+ .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena),
+ .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena),
.Min => accum = accum.numberMin(elem_val),
.Max => accum = accum.numberMax(elem_val),
.Add => accum = try accum.numberAddWrap(elem_val, scalar_ty, sema.arena, target),
@@ -14697,10 +14700,10 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Xchg => operand_val,
.Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target),
.Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target),
- .And => try stored_val.bitwiseAnd (operand_val, sema.arena),
+ .And => try stored_val.bitwiseAnd (operand_val, operand_ty, sema.arena),
.Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target),
- .Or => try stored_val.bitwiseOr (operand_val, sema.arena),
- .Xor => try stored_val.bitwiseXor (operand_val, sema.arena),
+ .Or => try stored_val.bitwiseOr (operand_val, operand_ty, sema.arena),
+ .Xor => try stored_val.bitwiseXor (operand_val, operand_ty, sema.arena),
.Max => stored_val.numberMax (operand_val),
.Min => stored_val.numberMin (operand_val),
// zig fmt: on
@@ -17523,7 +17526,7 @@ fn coerce(
if (val.floatHasFraction()) {
return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val.fmtValue(inst_ty), dest_ty });
}
- const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) {
+ const result_val = val.floatToInt(sema.arena, inst_ty, dest_ty, target) catch |err| switch (err) {
error.FloatCannotFit => {
return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty });
},
@@ -17586,7 +17589,7 @@ fn coerce(
},
.Int, .ComptimeInt => int: {
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :int;
- const result_val = try val.intToFloat(sema.arena, dest_ty, target);
+ const result_val = try val.intToFloat(sema.arena, inst_ty, dest_ty, target);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty)) {
@@ -17823,8 +17826,21 @@ fn coerceInMemoryAllowed(
return .ok;
}
+ // Vectors
+ if (dest_tag == .Vector and src_tag == .Vector) vectors: {
+ const dest_len = dest_ty.vectorLen();
+ const src_len = src_ty.vectorLen();
+ if (dest_len != src_len) break :vectors;
+
+ const dest_elem_ty = dest_ty.scalarType();
+ const src_elem_ty = src_ty.scalarType();
+ const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
+ if (child == .no_match) break :vectors;
+
+ return .ok;
+ }
+
// TODO: non-pointer-like optionals
- // TODO: vectors
return .no_match;
}
@@ -19697,19 +19713,6 @@ fn cmpNumeric(
const lhs_ty_tag = lhs_ty.zigTypeTag();
const rhs_ty_tag = rhs_ty.zigTypeTag();
- if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) {
- if (lhs_ty.vectorLen() != rhs_ty.vectorLen()) {
- return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{
- lhs_ty.vectorLen(), rhs_ty.vectorLen(),
- });
- }
- return sema.fail(block, src, "TODO implement support for vectors in cmpNumeric", .{});
- } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) {
- return sema.fail(block, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{
- lhs_ty, rhs_ty,
- });
- }
-
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
@@ -19895,6 +19898,46 @@ fn cmpNumeric(
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
+/// Asserts that lhs and rhs types are both vectors.
+fn cmpVector(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ lhs: Air.Inst.Ref,
+ rhs: Air.Inst.Ref,
+ op: std.math.CompareOperator,
+ lhs_src: LazySrcLoc,
+ rhs_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ assert(lhs_ty.zigTypeTag() == .Vector);
+ assert(rhs_ty.zigTypeTag() == .Vector);
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+
+ const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.@"bool");
+
+ const runtime_src: LazySrcLoc = src: {
+ if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
+ if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
+ if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ return sema.addConstUndef(result_ty);
+ }
+ const cmp_val = try lhs_val.compareVector(op, rhs_val, lhs_ty, sema.arena);
+ return sema.addConstant(result_ty, cmp_val);
+ } else {
+ break :src rhs_src;
+ }
+ } else {
+ break :src lhs_src;
+ }
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ const result_ty_inst = try sema.addType(result_ty);
+ return block.addCmpVector(lhs, rhs, op, result_ty_inst);
+}
+
fn wrapOptional(
sema: *Sema,
block: *Block,
@@ -21201,7 +21244,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
map.putAssumeCapacityContext(copied_val, {}, .{ .ty = int_tag_ty });
} else {
const val = if (last_tag_val) |val|
- try val.intAdd(Value.one, sema.arena)
+ try val.intAdd(Value.one, int_tag_ty, sema.arena)
else
Value.zero;
last_tag_val = val;
diff --git a/src/value.zig b/src/value.zig
index c5e082485a..6f0f786072 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1846,8 +1846,23 @@ pub const Value = extern union {
return order(lhs, rhs).compare(op);
}
- /// Asserts the value is comparable. Both operands have type `ty`.
+ /// Asserts the values are comparable. Both operands have type `ty`.
+ /// Vector results will be reduced with AND.
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool {
+ if (ty.zigTypeTag() == .Vector) {
+ var i: usize = 0;
+ while (i < ty.vectorLen()) : (i += 1) {
+ if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType())) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return compareScalar(lhs, op, rhs, ty);
+ }
+
+ /// Asserts the values are comparable. Both operands have type `ty`.
+ pub fn compareScalar(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool {
return switch (op) {
.eq => lhs.eql(rhs, ty),
.neq => !lhs.eql(rhs, ty),
@@ -1855,18 +1870,25 @@ pub const Value = extern union {
};
}
+ /// Asserts the values are comparable vectors of type `ty`.
+ pub fn compareVector(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ assert(ty.zigTypeTag() == .Vector);
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType());
+ scalar.* = if (res_bool) Value.@"true" else Value.@"false";
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+
/// Asserts the value is comparable.
- /// For vectors this is only valid with op == .eq.
+ /// Vector results will be reduced with AND.
pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool {
switch (lhs.tag()) {
- .repeated => {
- assert(op == .eq);
- return lhs.castTag(.repeated).?.data.compareWithZero(.eq);
- },
+ .repeated => return lhs.castTag(.repeated).?.data.compareWithZero(op),
.aggregate => {
- assert(op == .eq);
for (lhs.castTag(.aggregate).?.data) |elem_val| {
- if (!elem_val.compareWithZero(.eq)) return false;
+ if (!elem_val.compareWithZero(op)) return false;
}
return true;
},
@@ -2404,6 +2426,27 @@ pub const Value = extern union {
};
}
+ /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`.
+ /// Some scalar values are considered vector-like to avoid needing to allocate
+ /// a new `repeated` each time a constant is used.
+ pub fn indexVectorlike(val: Value, index: usize) Value {
+ return switch (val.tag()) {
+ .aggregate => val.castTag(.aggregate).?.data[index],
+
+ .repeated => val.castTag(.repeated).?.data,
+ // These values will implicitly be treated as `repeated`.
+ .zero,
+ .one,
+ .bool_false,
+ .bool_true,
+ .int_i64,
+ .int_u64,
+ => val,
+
+ else => unreachable,
+ };
+ }
+
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value {
@@ -2646,25 +2689,38 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value {
+ if (int_ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, int_ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), float_ty.scalarType(), target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intToFloatScalar(val, arena, int_ty, float_ty, target);
+ }
+
+ pub fn intToFloatScalar(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value {
+ assert(int_ty.isNumeric() and !int_ty.isAnyFloat());
+ assert(float_ty.isAnyFloat());
switch (val.tag()) {
.undef, .zero, .one => return val,
.the_only_possible_value => return Value.initTag(.zero), // for i0, u0
.int_u64 => {
- return intToFloatInner(val.castTag(.int_u64).?.data, arena, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target);
},
.int_i64 => {
- return intToFloatInner(val.castTag(.int_i64).?.data, arena, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target);
},
.int_big_positive => {
const limbs = val.castTag(.int_big_positive).?.data;
const float = bigIntToFloat(limbs, true);
- return floatToValue(float, arena, dest_ty, target);
+ return floatToValue(float, arena, float_ty, target);
},
.int_big_negative => {
const limbs = val.castTag(.int_big_negative).?.data;
const float = bigIntToFloat(limbs, false);
- return floatToValue(float, arena, dest_ty, target);
+ return floatToValue(float, arena, float_ty, target);
},
else => unreachable,
}
@@ -2694,7 +2750,20 @@ pub const Value = extern union {
}
}
- pub fn floatToInt(val: Value, arena: Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
+ pub fn floatToInt(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
+ if (float_ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), int_ty.scalarType(), target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatToIntScalar(val, arena, float_ty, int_ty, target);
+ }
+
+ pub fn floatToIntScalar(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
+ assert(float_ty.isAnyFloat());
+ assert(int_ty.isInt());
const Limb = std.math.big.Limb;
var value = val.toFloat(f64); // TODO: f128 ?
@@ -2724,7 +2793,7 @@ pub const Value = extern union {
else
try Value.Tag.int_big_positive.create(arena, result_limbs);
- if (result.intFitsInType(dest_ty, target)) {
+ if (result.intFitsInType(int_ty, target)) {
return result;
} else {
return error.FloatCannotFit;
@@ -2771,7 +2840,7 @@ pub const Value = extern union {
};
}
- /// Supports both floats and ints; handles undefined.
+ /// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberAddWrap(
lhs: Value,
rhs: Value,
@@ -2779,10 +2848,28 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return numberAddWrapScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberAddWrapScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intAdd(lhs, rhs, arena);
+ return intAdd(lhs, rhs, ty, arena);
}
if (ty.isAnyFloat()) {
@@ -2809,7 +2896,7 @@ pub const Value = extern union {
}
}
- /// Supports integers only; asserts neither operand is undefined.
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intAddSat(
lhs: Value,
rhs: Value,
@@ -2817,6 +2904,24 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intAddSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intAddSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
@@ -2861,7 +2966,7 @@ pub const Value = extern union {
};
}
- /// Supports both floats and ints; handles undefined.
+ /// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberSubWrap(
lhs: Value,
rhs: Value,
@@ -2869,10 +2974,28 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return numberSubWrapScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberSubWrapScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intSub(lhs, rhs, arena);
+ return intSub(lhs, rhs, ty, arena);
}
if (ty.isAnyFloat()) {
@@ -2883,7 +3006,7 @@ pub const Value = extern union {
return overflow_result.wrapped_result;
}
- /// Supports integers only; asserts neither operand is undefined.
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intSubSat(
lhs: Value,
rhs: Value,
@@ -2891,6 +3014,24 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intSubSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intSubSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
@@ -2944,7 +3085,7 @@ pub const Value = extern union {
};
}
- /// Supports both floats and ints; handles undefined.
+ /// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberMulWrap(
lhs: Value,
rhs: Value,
@@ -2952,10 +3093,28 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return numberMulWrapScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberMulWrapScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intMul(lhs, rhs, arena);
+ return intMul(lhs, rhs, ty, arena);
}
if (ty.isAnyFloat()) {
@@ -2966,7 +3125,7 @@ pub const Value = extern union {
return overflow_result.wrapped_result;
}
- /// Supports integers only; asserts neither operand is undefined.
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intMulSat(
lhs: Value,
rhs: Value,
@@ -2974,6 +3133,24 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intMulSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
+ pub fn intMulSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
@@ -3025,8 +3202,20 @@ pub const Value = extern union {
};
}
- /// operands must be integers; handles undefined.
+ /// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return bitwiseNotScalar(val, ty, arena, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (val.isUndef()) return Value.initTag(.undef);
const info = ty.intInfo(target);
@@ -3050,8 +3239,20 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseAndScalar(lhs, rhs, allocator);
+ }
+
/// operands must be integers; handles undefined.
- pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -3070,22 +3271,46 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- /// operands must be integers; handles undefined.
+ /// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return bitwiseNandScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- const anded = try bitwiseAnd(lhs, rhs, arena);
+ const anded = try bitwiseAnd(lhs, rhs, ty, arena);
const all_ones = if (ty.isSignedInt())
try Value.Tag.int_i64.create(arena, -1)
else
try ty.maxInt(arena, target);
- return bitwiseXor(anded, all_ones, arena);
+ return bitwiseXor(anded, all_ones, ty, arena);
+ }
+
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseOrScalar(lhs, rhs, allocator);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseOr(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -3103,8 +3328,20 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseXorScalar(lhs, rhs, allocator);
+ }
+
/// operands must be integers; handles undefined.
- pub fn bitwiseXor(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -3123,7 +3360,18 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intAdd(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intAdd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intAddScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intAddScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3139,7 +3387,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intSub(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intSub(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intSubScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intSubScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3155,7 +3414,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intDiv(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intDivScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3180,7 +3450,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intDivFloor(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intDivFloorScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3205,7 +3486,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intRemScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3232,7 +3524,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_r.toConst());
}
- pub fn intMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intModScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3270,6 +3573,17 @@ pub const Value = extern union {
}
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatRemScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3304,6 +3618,17 @@ pub const Value = extern union {
}
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatModScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3337,7 +3662,18 @@ pub const Value = extern union {
}
}
- pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intMulScalar(lhs, rhs, allocator);
+ }
+
+ pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3358,7 +3694,30 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
+ pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intTruncScalar(val, allocator, signedness, bits);
+ }
+
+ /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
+ pub fn intTruncBitsAsValue(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: Value) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt()));
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt()));
+ }
+
+ pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
if (bits == 0) return Value.zero;
var val_space: Value.BigIntSpace = undefined;
@@ -3374,7 +3733,18 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn shl(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return shlScalar(lhs, rhs, allocator);
+ }
+
+ pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3431,6 +3801,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return shlSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ pub fn shlSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
const info = ty.intInfo(target);
@@ -3458,13 +3845,41 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
- const shifted = try lhs.shl(rhs, arena);
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return shlTruncScalar(lhs, rhs, ty, arena, target);
+ }
+
+ pub fn shlTruncScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
+ const shifted = try lhs.shl(rhs, ty, arena);
const int_info = ty.intInfo(target);
- const truncated = try shifted.intTrunc(arena, int_info.signedness, int_info.bits);
+ const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits);
return truncated;
}
- pub fn shr(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return shrScalar(lhs, rhs, allocator);
+ }
+
+ pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -3498,6 +3913,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatAddScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatAddScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3535,6 +3967,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatSubScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatSubScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3572,6 +4021,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatDivScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatDivScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3612,6 +4078,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatDivFloorScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatDivFloorScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3652,6 +4135,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatDivTruncScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatDivTruncScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3692,6 +4192,23 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatMulScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatMulScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
@@ -3726,6 +4243,17 @@ pub const Value = extern union {
}
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return sqrtScalar(val, float_type, arena, target);
+ }
+
+ pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3758,6 +4286,17 @@ pub const Value = extern union {
}
pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return sinScalar(val, float_type, arena, target);
+ }
+
+ pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3790,6 +4329,17 @@ pub const Value = extern union {
}
pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return cosScalar(val, float_type, arena, target);
+ }
+
+ pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3822,6 +4372,17 @@ pub const Value = extern union {
}
pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return expScalar(val, float_type, arena, target);
+ }
+
+ pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3854,6 +4415,17 @@ pub const Value = extern union {
}
pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return exp2Scalar(val, float_type, arena, target);
+ }
+
+ pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3886,6 +4458,17 @@ pub const Value = extern union {
}
pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return logScalar(val, float_type, arena, target);
+ }
+
+ pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3918,6 +4501,17 @@ pub const Value = extern union {
}
pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return log2Scalar(val, float_type, arena, target);
+ }
+
+ pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3950,6 +4544,17 @@ pub const Value = extern union {
}
pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return log10Scalar(val, float_type, arena, target);
+ }
+
+ pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -3982,6 +4587,17 @@ pub const Value = extern union {
}
pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return fabsScalar(val, float_type, arena, target);
+ }
+
+ pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -4011,6 +4627,17 @@ pub const Value = extern union {
}
pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floorScalar(val, float_type, arena, target);
+ }
+
+ pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -4040,6 +4667,17 @@ pub const Value = extern union {
}
pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return ceilScalar(val, float_type, arena, target);
+ }
+
+ pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -4069,6 +4707,17 @@ pub const Value = extern union {
}
pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return roundScalar(val, float_type, arena, target);
+ }
+
+ pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -4098,6 +4747,17 @@ pub const Value = extern union {
}
pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return truncScalar(val, float_type, arena, target);
+ }
+
+ pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const f = val.toFloat(f16);
@@ -4134,6 +4794,31 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try mulAddScalar(
+ float_type.scalarType(),
+ mulend1.indexVectorlike(i),
+ mulend2.indexVectorlike(i),
+ addend.indexVectorlike(i),
+ arena,
+ target,
+ );
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target);
+ }
+
+ pub fn mulAddScalar(
+ float_type: Type,
+ mulend1: Value,
+ mulend2: Value,
+ addend: Value,
+ arena: Allocator,
+ target: Target,
+ ) Allocator.Error!Value {
switch (float_type.floatBits(target)) {
16 => {
const m1 = mulend1.toFloat(f16);