aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-12-09 18:04:26 -0500
committerGitHub <noreply@github.com>2024-12-09 18:04:26 -0500
commit8245d7fac0400d7e9de2a6fd4cfbc3609ad0f201 (patch)
treeb7d46c56f96bbf32397b71479c85f3e363e48672 /src
parent3aab81b0abe2efeab8a2c7cdae2146252b4f703f (diff)
parent135c733eefa8cd43e84e7b39aa292359c680fe1e (diff)
downloadzig-8245d7fac0400d7e9de2a6fd4cfbc3609ad0f201.tar.gz
zig-8245d7fac0400d7e9de2a6fd4cfbc3609ad0f201.zip
Merge pull request #22164 from mlugg/astgen-ref-dedup
AstGen: correctly deduplicate `ref` of `param` and `alloc_inferred`
Diffstat (limited to 'src')
-rw-r--r--src/InternPool.zig4
-rw-r--r--src/Sema.zig46
-rw-r--r--src/codegen/c.zig84
3 files changed, 101 insertions, 33 deletions
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 63cdd7cec8..bbbcb953f6 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -314,7 +314,9 @@ pub fn rehashTrackedInsts(
// We know how big each shard must be, so ensure we have the capacity we need.
for (ip.shards) |*shard| {
- const want_capacity = std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable;
+ const want_capacity = if (shard.mutate.tracked_inst_map.len == 0) 0 else cap: {
+ break :cap std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable;
+ };
const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex
if (have_capacity >= want_capacity) {
@memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
diff --git a/src/Sema.zig b/src/Sema.zig
index 33e2436b90..1e50a73978 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1051,6 +1051,7 @@ fn analyzeBodyInner(
.alloc_inferred_mut => try sema.zirAllocInferred(block, false),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(true),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(false),
+ .resolve_inferred_alloc => try sema.zirResolveInferredAlloc(block, inst),
.alloc_mut => try sema.zirAllocMut(block, inst),
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
@@ -1418,11 +1419,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
- .resolve_inferred_alloc => {
- try sema.zirResolveInferredAlloc(block, inst);
- i += 1;
- continue;
- },
.validate_struct_init_ty => {
try sema.zirValidateStructInitTy(block, inst, false);
i += 1;
@@ -4158,7 +4154,7 @@ fn zirAllocInferred(
return result_index.toRef();
}
-fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
+fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -4175,7 +4171,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) {
.inferred_alloc_comptime => {
// The work was already done for us by `Sema.storeToInferredAllocComptime`.
- // All we need to do is remap the pointer.
+ // All we need to do is return the pointer.
const iac = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc_comptime;
const resolved_ptr = iac.ptr;
@@ -4200,8 +4196,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
}
- // Remap the ZIR operand to the resolved pointer value
- sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(resolved_ptr));
+ return Air.internedToRef(resolved_ptr);
},
.inferred_alloc => {
const ia1 = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc;
@@ -4228,15 +4223,12 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const const_ptr_ty = try sema.makePtrTyConst(final_ptr_ty);
const new_const_ptr = try pt.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty);
- // Remap the ZIR operand to the resolved pointer value
- sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(new_const_ptr.toIntern()));
-
// Unless the block is comptime, `alloc_inferred` always produces
// a runtime constant. The final inferred type needs to be
// fully resolved so it can be lowered in codegen.
try final_elem_ty.resolveFully(pt);
- return;
+ return Air.internedToRef(new_const_ptr.toIntern());
}
if (try final_elem_ty.comptimeOnlySema(pt)) {
@@ -4255,11 +4247,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
.data = .{ .ty = final_ptr_ty },
});
- if (ia1.is_const) {
- // Remap the ZIR operand to the pointer const
- sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, try sema.makePtrConst(block, ptr));
- }
-
// Now we need to go back over all the store instructions, and do the logic as if
// the new result ptr type was available.
@@ -4297,6 +4284,12 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items));
}
+
+ if (ia1.is_const) {
+ return sema.makePtrConst(block, ptr);
+ } else {
+ return ptr;
+ }
},
else => unreachable,
}
@@ -33044,7 +33037,9 @@ fn analyzeRef(
}
}
- try sema.requireRuntimeBlock(block, src, null);
+ // No `requireRuntimeBlock`; it's okay to `ref` to a runtime value in a comptime context,
+ // it's just that we can only use the *type* of the result, since the value is runtime-known.
+
const address_space = target_util.defaultAddressSpace(zcu.getTarget(), .local);
const ptr_type = try pt.ptrTypeSema(.{
.child = operand_ty.toIntern(),
@@ -33058,10 +33053,17 @@ fn analyzeRef(
.flags = .{ .address_space = address_space },
});
const alloc = try block.addTy(.alloc, mut_ptr_type);
- try sema.storePtr(block, src, alloc, operand);
- // TODO: Replace with sema.coerce when that supports adding pointer constness.
- return sema.bitCast(block, ptr_type, alloc, src, null);
+ // In a comptime context, the store would fail, since the operand is runtime-known. But that's
+ // okay; we don't actually need this store to succeed, since we're creating a runtime value in a
+ // comptime scope, so the value can never be used aside from to get its type.
+ if (!block.is_comptime) {
+ try sema.storePtr(block, src, alloc, operand);
+ }
+
+ // Cast to the constant pointer type. We do this directly rather than going via `coerce` to
+ // avoid errors in the `block.is_comptime` case.
+ return block.addBitCast(ptr_type, alloc);
}
fn analyzeLoad(
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 56466b4395..c3e3c7fbdc 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -48,6 +48,60 @@ pub const CValue = union(enum) {
payload_identifier: []const u8,
/// Rendered with fmtCTypePoolString
ctype_pool_string: CType.Pool.String,
+
+ fn eql(lhs: CValue, rhs: CValue) bool {
+ return switch (lhs) {
+ .none => rhs == .none,
+ .new_local, .local => |lhs_local| switch (rhs) {
+ .new_local, .local => |rhs_local| lhs_local == rhs_local,
+ else => false,
+ },
+ .local_ref => |lhs_local| switch (rhs) {
+ .local_ref => |rhs_local| lhs_local == rhs_local,
+ else => false,
+ },
+ .constant => |lhs_val| switch (rhs) {
+ .constant => |rhs_val| lhs_val.toIntern() == rhs_val.toIntern(),
+ else => false,
+ },
+ .arg => |lhs_arg_index| switch (rhs) {
+ .arg => |rhs_arg_index| lhs_arg_index == rhs_arg_index,
+ else => false,
+ },
+ .arg_array => |lhs_arg_index| switch (rhs) {
+ .arg_array => |rhs_arg_index| lhs_arg_index == rhs_arg_index,
+ else => false,
+ },
+ .field => |lhs_field_index| switch (rhs) {
+ .field => |rhs_field_index| lhs_field_index == rhs_field_index,
+ else => false,
+ },
+ .nav => |lhs_nav| switch (rhs) {
+ .nav => |rhs_nav| lhs_nav == rhs_nav,
+ else => false,
+ },
+ .nav_ref => |lhs_nav| switch (rhs) {
+ .nav_ref => |rhs_nav| lhs_nav == rhs_nav,
+ else => false,
+ },
+ .undef => |lhs_ty| switch (rhs) {
+ .undef => |rhs_ty| lhs_ty.toIntern() == rhs_ty.toIntern(),
+ else => false,
+ },
+ .identifier => |lhs_id| switch (rhs) {
+ .identifier => |rhs_id| std.mem.eql(u8, lhs_id, rhs_id),
+ else => false,
+ },
+ .payload_identifier => |lhs_id| switch (rhs) {
+ .payload_identifier => |rhs_id| std.mem.eql(u8, lhs_id, rhs_id),
+ else => false,
+ },
+ .ctype_pool_string => |lhs_str| switch (rhs) {
+ .ctype_pool_string => |rhs_str| lhs_str.index == rhs_str.index,
+ else => false,
+ },
+ };
+ }
};
const BlockData = struct {
@@ -4219,17 +4273,23 @@ fn airCmpOp(
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, writer, lhs_ty);
+ const a = try Assignment.start(f, writer, try f.ctypeFromType(scalar_ty, .complete));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
- try writer.writeAll(" = ");
- if (need_cast) try writer.writeAll("(void*)");
- try f.writeCValue(writer, lhs, .Other);
- try v.elem(f, writer);
- try writer.writeAll(compareOperatorC(operator));
- if (need_cast) try writer.writeAll("(void*)");
- try f.writeCValue(writer, rhs, .Other);
- try v.elem(f, writer);
- try writer.writeAll(";\n");
+ try a.assign(f, writer);
+ if (lhs != .undef and lhs.eql(rhs)) try writer.writeAll(switch (operator) {
+ .lt, .neq, .gt => "false",
+ .lte, .eq, .gte => "true",
+ }) else {
+ if (need_cast) try writer.writeAll("(void*)");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(compareOperatorC(operator));
+ if (need_cast) try writer.writeAll("(void*)");
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ }
+ try a.end(f, writer);
try v.end(f, inst, writer);
return local;
@@ -4270,7 +4330,11 @@ fn airEquality(
try a.assign(f, writer);
const operand_ctype = try f.ctypeFromType(operand_ty, .complete);
- switch (operand_ctype.info(ctype_pool)) {
+ if (lhs != .undef and lhs.eql(rhs)) try writer.writeAll(switch (operator) {
+ .lt, .lte, .gte, .gt => unreachable,
+ .neq => "false",
+ .eq => "true",
+ }) else switch (operand_ctype.info(ctype_pool)) {
.basic, .pointer => {
try f.writeCValue(writer, lhs, .Other);
try writer.writeAll(compareOperatorC(operator));