aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2022-02-28 10:09:23 +0200
committerAndrew Kelley <andrew@ziglang.org>2022-02-28 13:09:14 -0700
commitdfeffcfbf825c29d89ccec281ab95dd2383317ac (patch)
tree64b86ad830e83fea4b1acf27e5aef17e265f66f4 /src
parent3a65fa269f476ecb9dafd8be77ee9f9c0c4ba996 (diff)
downloadzig-dfeffcfbf825c29d89ccec281ab95dd2383317ac.tar.gz
zig-dfeffcfbf825c29d89ccec281ab95dd2383317ac.zip
stage2: tuple mul/cat
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig153
-rw-r--r--src/type.zig16
-rw-r--r--src/value.zig28
3 files changed, 187 insertions, 10 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index b3aadb37e2..8711b56b2d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -8059,6 +8059,78 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return block.addTyOp(.not, operand_type, operand);
}
+fn analyzeTupleCat(
+ sema: *Sema,
+ block: *Block,
+ src_node: i32,
+ lhs: Air.Inst.Ref,
+ rhs: Air.Inst.Ref,
+) CompileError!Air.Inst.Ref {
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
+
+ const lhs_tuple = lhs_ty.tupleFields();
+ const rhs_tuple = rhs_ty.tupleFields();
+ const dest_fields = lhs_tuple.types.len + rhs_tuple.types.len;
+
+ if (dest_fields == 0) {
+ return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value));
+ }
+
+ const types = try sema.arena.alloc(Type, dest_fields);
+ const values = try sema.arena.alloc(Value, dest_fields);
+
+ const opt_runtime_src = rs: {
+ var runtime_src: ?LazySrcLoc = null;
+ for (lhs_tuple.types) |ty, i| {
+ types[i] = ty;
+ values[i] = lhs_tuple.values[i];
+ const operand_src = lhs_src; // TODO better source location
+ if (values[i].tag() == .unreachable_value) {
+ runtime_src = operand_src;
+ }
+ }
+ const offset = lhs_tuple.types.len;
+ for (rhs_tuple.types) |ty, i| {
+ types[i + offset] = ty;
+ values[i + offset] = rhs_tuple.values[i];
+ const operand_src = rhs_src; // TODO better source location
+ if (rhs_tuple.values[i].tag() == .unreachable_value) {
+ runtime_src = operand_src;
+ }
+ }
+ break :rs runtime_src;
+ };
+
+ const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
+ .types = types,
+ .values = values,
+ });
+
+ const runtime_src = opt_runtime_src orelse {
+ const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
+ return sema.addConstant(tuple_ty, tuple_val);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+
+ const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_fields);
+ for (lhs_tuple.types) |_, i| {
+ const operand_src = lhs_src; // TODO better source location
+ element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, @intCast(u32, i), lhs_ty);
+ }
+ const offset = lhs_tuple.types.len;
+ for (rhs_tuple.types) |_, i| {
+ const operand_src = rhs_src; // TODO better source location
+ element_refs[i + offset] =
+ try sema.tupleFieldValByIndex(block, operand_src, rhs, @intCast(u32, i), rhs_ty);
+ }
+
+ return block.addAggregateInit(tuple_ty, element_refs);
+}
+
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -8069,6 +8141,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs = sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
+
+ if (lhs_ty.isTuple() and rhs_ty.isTuple()) {
+ return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
+ }
+
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
@@ -8165,6 +8242,72 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.R
};
}
+fn analyzeTupleMul(
+ sema: *Sema,
+ block: *Block,
+ src_node: i32,
+ operand: Air.Inst.Ref,
+ factor: u64,
+) CompileError!Air.Inst.Ref {
+ const operand_ty = sema.typeOf(operand);
+ const operand_tuple = operand_ty.tupleFields();
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
+
+ const tuple_len = operand_tuple.types.len;
+ const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch
+ return sema.fail(block, rhs_src, "operation results in overflow", .{});
+
+ if (final_len_u64 == 0) {
+ return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value));
+ }
+
+ const types = try sema.arena.alloc(Type, final_len_u64);
+ const values = try sema.arena.alloc(Value, final_len_u64);
+
+ const opt_runtime_src = rs: {
+ var runtime_src: ?LazySrcLoc = null;
+ for (operand_tuple.types) |ty, i| {
+ types[i] = ty;
+ values[i] = operand_tuple.values[i];
+ const operand_src = lhs_src; // TODO better source location
+ if (values[i].tag() == .unreachable_value) {
+ runtime_src = operand_src;
+ }
+ }
+ var i: usize = 1;
+ while (i < factor) : (i += 1) {
+ mem.copy(Type, types[tuple_len * i ..], operand_tuple.types);
+ mem.copy(Value, values[tuple_len * i ..], operand_tuple.values);
+ }
+ break :rs runtime_src;
+ };
+
+ const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
+ .types = types,
+ .values = values,
+ });
+
+ const runtime_src = opt_runtime_src orelse {
+ const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
+ return sema.addConstant(tuple_ty, tuple_val);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+
+ const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len_u64);
+ for (operand_tuple.types) |_, i| {
+ const operand_src = lhs_src; // TODO better source location
+ element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty);
+ }
+ var i: usize = 1;
+ while (i < factor) : (i += 1) {
+ mem.copy(Air.Inst.Ref, element_refs[tuple_len * i ..], element_refs[0..tuple_len]);
+ }
+
+ return block.addAggregateInit(tuple_ty, element_refs);
+}
+
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -8179,6 +8322,11 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// In `**` rhs has to be comptime-known, but lhs can be runtime-known
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
+
+ if (lhs_ty.isTuple()) {
+ return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor);
+ }
+
const mulinfo = (try sema.getArrayCatInfo(block, lhs_src, lhs)) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
@@ -14754,6 +14902,11 @@ fn tupleFieldVal(
tuple_ty, field_name, @errorName(err),
});
};
+ if (field_index >= tuple_ty.structFieldCount()) {
+ return sema.fail(block, field_name_src, "tuple {} has no such field '{s}'", .{
+ tuple_ty, field_name,
+ });
+ }
return tupleFieldValByIndex(sema, block, src, tuple_byval, field_index, tuple_ty);
}
diff --git a/src/type.zig b/src/type.zig
index a84a0f4520..fb3ab5d28f 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4531,7 +4531,15 @@ pub const Type = extern union {
};
pub fn isTuple(ty: Type) bool {
- return ty.tag() == .tuple;
+ return ty.tag() == .tuple or ty.tag() == .empty_struct_literal;
+ }
+
+ pub fn tupleFields(ty: Type) Payload.Tuple.Data {
+ return switch (ty.tag()) {
+ .tuple => ty.castTag(.tuple).?.data,
+ .empty_struct_literal => .{ .types = &.{}, .values = &.{} },
+ else => unreachable,
+ };
}
/// The sub-types are named after what fields they contain.
@@ -4683,11 +4691,13 @@ pub const Type = extern union {
pub const Tuple = struct {
base: Payload = .{ .tag = .tuple },
- data: struct {
+ data: Data,
+
+ pub const Data = struct {
types: []Type,
/// unreachable_value elements are used to indicate runtime-known.
values: []Value,
- },
+ };
};
pub const Union = struct {
diff --git a/src/value.zig b/src/value.zig
index 9d2d73928b..51f678aaaa 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1829,7 +1829,7 @@ pub const Value = extern union {
assert(a_tag != .undef);
assert(b_tag != .undef);
if (a_tag == b_tag) switch (a_tag) {
- .void_value, .null_value, .the_only_possible_value => return true,
+ .void_value, .null_value, .the_only_possible_value, .empty_struct_value => return true,
.enum_literal => {
const a_name = a.castTag(.enum_literal).?.data;
const b_name = b.castTag(.enum_literal).?.data;
@@ -1892,10 +1892,18 @@ pub const Value = extern union {
return a_payload == b_payload;
},
.@"struct" => {
- const fields = ty.structFields().values();
const a_field_vals = a.castTag(.@"struct").?.data;
const b_field_vals = b.castTag(.@"struct").?.data;
assert(a_field_vals.len == b_field_vals.len);
+ if (ty.isTuple()) {
+ const types = ty.tupleFields().types;
+ assert(types.len == a_field_vals.len);
+ for (types) |field_ty, i| {
+ if (!eql(a_field_vals[i], b_field_vals[i], field_ty)) return false;
+ }
+ return true;
+ }
+ const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field.ty)) return false;
@@ -1967,11 +1975,10 @@ pub const Value = extern union {
return true;
},
.Struct => {
- // must be a struct with no fields since we checked for if
- // both have the struct tag above.
- const fields = ty.structFields().values();
- assert(fields.len == 0);
- return true;
+ // A tuple can be represented with .empty_struct_value,
+ // the_one_possible_value, .@"struct" in which case we could
+ // end up here and the values are equal if the type has zero fields.
+ return ty.structFieldCount() != 0;
},
else => return order(a, b).compare(.eq),
}
@@ -2024,6 +2031,13 @@ pub const Value = extern union {
}
},
.Struct => {
+ if (ty.isTuple()) {
+ const fields = ty.tupleFields();
+ for (fields.values) |field_val, i| {
+ field_val.hash(fields.types[i], hasher);
+ }
+ return;
+ }
const fields = ty.structFields().values();
if (fields.len == 0) return;
const field_values = val.castTag(.@"struct").?.data;