aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig148
-rw-r--r--src/value.zig236
2 files changed, 232 insertions, 152 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 6fc4f85174..84907a2044 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -10433,7 +10433,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
}
-fn structInitEmpty(sema: *Sema, block: *Block, obj_ty: Type, dest_src: LazySrcLoc, init_src: LazySrcLoc) CompileError!Air.Inst.Ref {
+fn structInitEmpty(
+ sema: *Sema,
+ block: *Block,
+ obj_ty: Type,
+ dest_src: LazySrcLoc,
+ init_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
// This logic must be synchronized with that in `zirStructInit`.
const struct_ty = try sema.resolveTypeFields(block, dest_src, obj_ty);
@@ -10477,7 +10483,12 @@ fn zirUnionInitPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
return sema.fail(block, src, "TODO: Sema.zirUnionInitPtr", .{});
}
-fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
+fn zirStructInit(
+ sema: *Sema,
+ block: *Block,
+ inst: Zir.Inst.Index,
+ is_ref: bool,
+) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].pl_node;
@@ -10612,10 +10623,6 @@ fn finishStructInit(
return sema.failWithOwnedErrorMsg(msg);
}
- if (is_ref) {
- return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true", .{});
- }
-
const is_comptime = for (field_inits) |field_init| {
if (!(try sema.isComptimeKnown(block, src, field_init))) {
break false;
@@ -10627,10 +10634,24 @@ fn finishStructInit(
for (field_inits) |field_init, i| {
values[i] = (sema.resolveMaybeUndefVal(block, src, field_init) catch unreachable).?;
}
- return sema.addConstant(struct_ty, try Value.Tag.@"struct".create(sema.arena, values));
+ const struct_val = try Value.Tag.@"struct".create(sema.arena, values);
+ return sema.addConstantMaybeRef(block, src, struct_ty, struct_val, is_ref);
+ }
+
+ if (is_ref) {
+ const alloc = try block.addTy(.alloc, struct_ty);
+ for (field_inits) |field_init, i_usize| {
+ const i = @intCast(u32, i_usize);
+ const field_src = src;
+ const field_ptr = try sema.structFieldPtrByIndex(block, src, alloc, i, struct_obj, field_src);
+ try sema.storePtr(block, src, field_ptr, field_init);
+ }
+
+ return alloc;
}
- return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{});
+ try sema.requireRuntimeBlock(block, src);
+ return block.addVectorInit(struct_ty, field_inits);
}
fn zirStructInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
@@ -10674,51 +10695,43 @@ fn zirArrayInit(
} else null;
const runtime_src = opt_runtime_src orelse {
- var anon_decl = try block.startAnonDecl(src);
- defer anon_decl.deinit();
+ const elem_vals = try sema.arena.alloc(Value, resolved_args.len);
- const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len);
for (resolved_args) |arg, i| {
// We checked that all args are comptime above.
- const arg_val = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?;
- elem_vals[i] = try arg_val.copy(anon_decl.arena());
+ elem_vals[i] = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?;
}
- const val = try Value.Tag.array.create(anon_decl.arena(), elem_vals);
- const decl = try anon_decl.finish(try array_ty.copy(anon_decl.arena()), val);
- if (is_ref) {
- return sema.analyzeDeclRef(decl);
- } else {
- return sema.analyzeDeclVal(block, .unneeded, decl);
- }
+ const array_val = try Value.Tag.array.create(sema.arena, elem_vals);
+ return sema.addConstantMaybeRef(block, src, array_ty, array_val, is_ref);
};
try sema.requireRuntimeBlock(block, runtime_src);
try sema.resolveTypeLayout(block, src, elem_ty);
- const alloc_ty = try Type.ptr(sema.arena, .{
- .pointee_type = array_ty,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- const alloc = try block.addTy(.alloc, alloc_ty);
+ if (is_ref) {
+ const alloc_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = array_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ const alloc = try block.addTy(.alloc, alloc_ty);
- const elem_ptr_ty = try Type.ptr(sema.arena, .{
- .mutable = true,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- .pointee_type = elem_ty,
- });
- const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
+ const elem_ptr_ty = try Type.ptr(sema.arena, .{
+ .mutable = true,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ .pointee_type = elem_ty,
+ });
+ const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
- for (resolved_args) |arg, i| {
- const index = try sema.addIntUnsigned(Type.u64, i);
- const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
- _ = try block.addBinOp(.store, elem_ptr, arg);
- }
- if (is_ref) {
+ for (resolved_args) |arg, i| {
+ const index = try sema.addIntUnsigned(Type.u64, i);
+ const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
+ _ = try block.addBinOp(.store, elem_ptr, arg);
+ }
return alloc;
- } else {
- return sema.analyzeLoad(block, .unneeded, alloc, .unneeded);
}
+
+ return block.addVectorInit(array_ty, resolved_args);
}
fn zirArrayInitAnon(
@@ -10758,17 +10771,11 @@ fn zirArrayInitAnon(
const runtime_src = opt_runtime_src orelse {
const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
- if (!is_ref) return sema.addConstant(tuple_ty, tuple_val);
-
- var anon_decl = try block.startAnonDecl(src);
- defer anon_decl.deinit();
- const decl = try anon_decl.finish(
- try tuple_ty.copy(anon_decl.arena()),
- try tuple_val.copy(anon_decl.arena()),
- );
- return sema.analyzeDeclRef(decl);
+ return sema.addConstantMaybeRef(block, src, tuple_ty, tuple_val, is_ref);
};
+ try sema.requireRuntimeBlock(block, runtime_src);
+
if (is_ref) {
const alloc = try block.addTy(.alloc, tuple_ty);
for (operands) |operand, i_usize| {
@@ -10790,10 +10797,28 @@ fn zirArrayInitAnon(
element_refs[i] = sema.resolveInst(operand);
}
- try sema.requireRuntimeBlock(block, runtime_src);
return block.addVectorInit(tuple_ty, element_refs);
}
+fn addConstantMaybeRef(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ty: Type,
+ val: Value,
+ is_ref: bool,
+) !Air.Inst.Ref {
+ if (!is_ref) return sema.addConstant(ty, val);
+
+ var anon_decl = try block.startAnonDecl(src);
+ defer anon_decl.deinit();
+ const decl = try anon_decl.finish(
+ try ty.copy(anon_decl.arena()),
+ try val.copy(anon_decl.arena()),
+ );
+ return sema.analyzeDeclRef(decl);
+}
+
fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -13444,18 +13469,30 @@ fn structFieldPtr(
field_name_src: LazySrcLoc,
unresolved_struct_ty: Type,
) CompileError!Air.Inst.Ref {
- const arena = sema.arena;
assert(unresolved_struct_ty.zigTypeTag() == .Struct);
- const struct_ptr_ty = sema.typeOf(struct_ptr);
const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index = @intCast(u32, field_index_big);
+
+ return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, struct_obj, field_name_src);
+}
+
+fn structFieldPtrByIndex(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ struct_ptr: Air.Inst.Ref,
+ field_index: u32,
+ struct_obj: *Module.Struct,
+ field_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
const field = struct_obj.fields.values()[field_index];
+ const struct_ptr_ty = sema.typeOf(struct_ptr);
var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = field.ty,
.mutable = struct_ptr_ty.ptrIsMutable(),
@@ -13470,7 +13507,7 @@ fn structFieldPtr(
var offset: u64 = 0;
var running_bits: u16 = 0;
for (struct_obj.fields.values()) |f, i| {
- if (!(try sema.typeHasRuntimeBits(block, field_name_src, f.ty))) continue;
+ if (!(try sema.typeHasRuntimeBits(block, field_src, f.ty))) continue;
const field_align = f.packedAlignment();
if (field_align == 0) {
@@ -13509,12 +13546,12 @@ fn structFieldPtr(
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target));
}
- const ptr_field_ty = try Type.ptr(arena, ptr_ty_data);
+ const ptr_field_ty = try Type.ptr(sema.arena, ptr_ty_data);
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
return sema.addConstant(
ptr_field_ty,
- try Value.Tag.field_ptr.create(arena, .{
+ try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = struct_ptr_val,
.field_index = field_index,
}),
@@ -13546,6 +13583,9 @@ fn structFieldVal(
if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| {
if (struct_val.isUndef()) return sema.addConstUndef(field.ty);
+ if ((try sema.typeHasOnePossibleValue(block, src, field.ty))) |opv| {
+ return sema.addConstant(field.ty, opv);
+ }
const field_values = struct_val.castTag(.@"struct").?.data;
return sema.addConstant(field.ty, field_values[field_index]);
diff --git a/src/value.zig b/src/value.zig
index 39cb1a4dbc..9d9895a6e0 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1530,60 +1530,69 @@ pub const Value = extern union {
const b_tag = b.tag();
assert(a_tag != .undef);
assert(b_tag != .undef);
- if (a_tag == b_tag) {
- switch (a_tag) {
- .void_value, .null_value, .the_only_possible_value => return true,
- .enum_literal => {
- const a_name = a.castTag(.enum_literal).?.data;
- const b_name = b.castTag(.enum_literal).?.data;
- return std.mem.eql(u8, a_name, b_name);
- },
- .enum_field_index => {
- const a_field_index = a.castTag(.enum_field_index).?.data;
- const b_field_index = b.castTag(.enum_field_index).?.data;
- return a_field_index == b_field_index;
- },
- .opt_payload => {
- const a_payload = a.castTag(.opt_payload).?.data;
- const b_payload = b.castTag(.opt_payload).?.data;
- var buffer: Type.Payload.ElemType = undefined;
- return eql(a_payload, b_payload, ty.optionalChild(&buffer));
- },
- .slice => {
- const a_payload = a.castTag(.slice).?.data;
- const b_payload = b.castTag(.slice).?.data;
- if (!eql(a_payload.len, b_payload.len, Type.usize)) return false;
+ if (a_tag == b_tag) switch (a_tag) {
+ .void_value, .null_value, .the_only_possible_value => return true,
+ .enum_literal => {
+ const a_name = a.castTag(.enum_literal).?.data;
+ const b_name = b.castTag(.enum_literal).?.data;
+ return std.mem.eql(u8, a_name, b_name);
+ },
+ .enum_field_index => {
+ const a_field_index = a.castTag(.enum_field_index).?.data;
+ const b_field_index = b.castTag(.enum_field_index).?.data;
+ return a_field_index == b_field_index;
+ },
+ .opt_payload => {
+ const a_payload = a.castTag(.opt_payload).?.data;
+ const b_payload = b.castTag(.opt_payload).?.data;
+ var buffer: Type.Payload.ElemType = undefined;
+ return eql(a_payload, b_payload, ty.optionalChild(&buffer));
+ },
+ .slice => {
+ const a_payload = a.castTag(.slice).?.data;
+ const b_payload = b.castTag(.slice).?.data;
+ if (!eql(a_payload.len, b_payload.len, Type.usize)) return false;
- var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
+ var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- return eql(a_payload.ptr, b_payload.ptr, ptr_ty);
- },
- .elem_ptr => @panic("TODO: Implement more pointer eql cases"),
- .field_ptr => @panic("TODO: Implement more pointer eql cases"),
- .eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
- .opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
- .array => {
- const a_array = a.castTag(.array).?.data;
- const b_array = b.castTag(.array).?.data;
-
- if (a_array.len != b_array.len) return false;
-
- const elem_ty = ty.childType();
- for (a_array) |a_elem, i| {
- const b_elem = b_array[i];
-
- if (!eql(a_elem, b_elem, elem_ty)) return false;
- }
- return true;
- },
- .function => {
- const a_payload = a.castTag(.function).?.data;
- const b_payload = b.castTag(.function).?.data;
- return a_payload == b_payload;
- },
- else => {},
- }
+ return eql(a_payload.ptr, b_payload.ptr, ptr_ty);
+ },
+ .elem_ptr => @panic("TODO: Implement more pointer eql cases"),
+ .field_ptr => @panic("TODO: Implement more pointer eql cases"),
+ .eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
+ .opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
+ .array => {
+ const a_array = a.castTag(.array).?.data;
+ const b_array = b.castTag(.array).?.data;
+
+ if (a_array.len != b_array.len) return false;
+
+ const elem_ty = ty.childType();
+ for (a_array) |a_elem, i| {
+ const b_elem = b_array[i];
+
+ if (!eql(a_elem, b_elem, elem_ty)) return false;
+ }
+ return true;
+ },
+ .function => {
+ const a_payload = a.castTag(.function).?.data;
+ const b_payload = b.castTag(.function).?.data;
+ return a_payload == b_payload;
+ },
+ .@"struct" => {
+ const fields = ty.structFields().values();
+ const a_field_vals = a.castTag(.@"struct").?.data;
+ const b_field_vals = b.castTag(.@"struct").?.data;
+ assert(a_field_vals.len == b_field_vals.len);
+ assert(fields.len == a_field_vals.len);
+ for (fields) |field, i| {
+ if (!eql(a_field_vals[i], b_field_vals[i], field.ty)) return false;
+ }
+ return true;
+ },
+ else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
return false;
}
@@ -1628,6 +1637,13 @@ pub const Value = extern union {
}
return true;
},
+ .Struct => {
+ // must be a struct with no fields since we checked for if
+ // both have the struct tag above.
+ const fields = ty.structFields().values();
+ assert(fields.len == 0);
+ return true;
+ },
else => return order(a, b).compare(.eq),
}
}
@@ -1651,31 +1667,13 @@ pub const Value = extern union {
var buf: ToTypeBuffer = undefined;
return val.toType(&buf).hashWithHasher(hasher);
},
- .Bool => {
- std.hash.autoHash(hasher, val.toBool());
- },
- .Int, .ComptimeInt => {
- var space: BigIntSpace = undefined;
- const big = val.toBigInt(&space);
- std.hash.autoHash(hasher, big.positive);
- for (big.limbs) |limb| {
- std.hash.autoHash(hasher, limb);
- }
- },
.Float, .ComptimeFloat => {
// TODO double check the lang spec. should we to bitwise hashing here,
// or a hash that normalizes the float value?
const float = val.toFloat(f128);
std.hash.autoHash(hasher, @bitCast(u128, float));
},
- .Pointer => switch (val.tag()) {
- .decl_ref_mut,
- .extern_fn,
- .decl_ref,
- .function,
- .variable,
- => std.hash.autoHash(hasher, val.pointerDecl().?),
-
+ .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) {
.slice => {
const slice = val.castTag(.slice).?.data;
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -1684,22 +1682,7 @@ pub const Value = extern union {
hash(slice.len, Type.usize, hasher);
},
- // For these, hash them as hash of a pointer to the decl,
- // combined with a hash of the byte offset from the decl.
- .elem_ptr => @panic("TODO: Implement more pointer hashing cases"),
- .field_ptr => @panic("TODO: Implement more pointer hashing cases"),
- .eu_payload_ptr => @panic("TODO: Implement more pointer hashing cases"),
- .opt_payload_ptr => @panic("TODO: Implement more pointer hashing cases"),
-
- .zero,
- .one,
- .int_u64,
- .int_i64,
- .int_big_positive,
- .int_big_negative,
- => @panic("TODO: Implement pointer hashing for int pointers"),
-
- else => unreachable,
+ else => return hashPtr(val, hasher),
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -1739,14 +1722,7 @@ pub const Value = extern union {
.Enum => {
var enum_space: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_space);
-
- var space: BigIntSpace = undefined;
- const big = int_val.toBigInt(&space);
-
- std.hash.autoHash(hasher, big.positive);
- for (big.limbs) |limb| {
- std.hash.autoHash(hasher, limb);
- }
+ hashInt(int_val, hasher);
},
.Union => {
const union_obj = val.cast(Payload.Union).?.data;
@@ -1757,8 +1733,12 @@ pub const Value = extern union {
union_obj.val.hash(active_field_ty, hasher);
},
.Fn => {
- const func = val.castTag(.function).?.data;
- return std.hash.autoHash(hasher, func.owner_decl);
+ const func: *Module.Fn = val.castTag(.function).?.data;
+ // Note that his hashes the *Fn rather than the *Decl. This is
+ // to differentiate function bodies from function pointers.
+ // This is currently redundant since we already hash the zig type tag
+ // at the top of this function.
+ std.hash.autoHash(hasher, func);
},
.Frame => {
@panic("TODO implement hashing frame values");
@@ -1824,6 +1804,65 @@ pub const Value = extern union {
};
}
+ fn hashInt(int_val: Value, hasher: *std.hash.Wyhash) void {
+ var buffer: BigIntSpace = undefined;
+ const big = int_val.toBigInt(&buffer);
+ std.hash.autoHash(hasher, big.positive);
+ for (big.limbs) |limb| {
+ std.hash.autoHash(hasher, limb);
+ }
+ }
+
+ fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash) void {
+ switch (ptr_val.tag()) {
+ .decl_ref,
+ .decl_ref_mut,
+ .extern_fn,
+ .function,
+ .variable,
+ => {
+ const decl: *Module.Decl = ptr_val.pointerDecl().?;
+ std.hash.autoHash(hasher, decl);
+ },
+
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ hashPtr(elem_ptr.array_ptr, hasher);
+ std.hash.autoHash(hasher, Value.Tag.elem_ptr);
+ std.hash.autoHash(hasher, elem_ptr.index);
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ std.hash.autoHash(hasher, Value.Tag.field_ptr);
+ hashPtr(field_ptr.container_ptr, hasher);
+ std.hash.autoHash(hasher, field_ptr.field_index);
+ },
+ .eu_payload_ptr => {
+ const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
+ std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr);
+ hashPtr(err_union_ptr, hasher);
+ },
+ .opt_payload_ptr => {
+ const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
+ std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr);
+ hashPtr(opt_ptr, hasher);
+ },
+
+ .zero,
+ .one,
+ .int_u64,
+ .int_i64,
+ .int_big_positive,
+ .int_big_negative,
+ .bool_false,
+ .bool_true,
+ .the_only_possible_value,
+ => return hashInt(ptr_val, hasher),
+
+ else => unreachable,
+ }
+ }
+
pub fn markReferencedDeclsAlive(val: Value) void {
switch (val.tag()) {
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.markAlive(),
@@ -1876,7 +1915,8 @@ pub const Value = extern union {
pub fn slicePtr(val: Value) Value {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
- .decl_ref, .decl_ref_mut => val,
+ // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc.
+ .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr => val,
else => unreachable,
};
}