aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig32
-rw-r--r--src/Sema.zig124
-rw-r--r--src/Zir.zig13
3 files changed, 88 insertions, 81 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index dbb998dc71..74e3ae17b1 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -1964,11 +1964,7 @@ fn labeledBlockExpr(
},
.break_operand => {
// All break operands are values that did not use the result location pointer.
- if (strat.elide_store_to_block_ptr_instructions) {
- try block_scope.setBlockBodyEliding(block_inst);
- } else {
- try block_scope.setBlockBody(block_inst);
- }
+ try block_scope.setBlockBody(block_inst);
const block_ref = indexToRef(block_inst);
switch (rl) {
.ref => return block_ref,
@@ -9734,32 +9730,6 @@ const GenZir = struct {
gz.unstack();
}
- /// Same as `setBlockBody` except we don't copy instructions which are
- /// `store_to_block_ptr` instructions with lhs set to .none.
- /// Assumes nothing stacked on `gz`. Unstacks `gz`.
- fn setBlockBodyEliding(gz: *GenZir, inst: Zir.Inst.Index) !void {
- const gpa = gz.astgen.gpa;
- const body = gz.instructionsSlice();
- try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + body.len);
- const zir_datas = gz.astgen.instructions.items(.data);
- const zir_tags = gz.astgen.instructions.items(.tag);
- const block_pl_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Block{
- .body_len = @intCast(u32, body.len),
- });
- zir_datas[inst].pl_node.payload_index = block_pl_index;
- for (body) |sub_inst| {
- if (zir_tags[sub_inst] == .store_to_block_ptr and
- zir_datas[sub_inst].bin.lhs == .none)
- {
- // Decrement `body_len`.
- gz.astgen.extra.items[block_pl_index] -= 1;
- continue;
- }
- gz.astgen.extra.appendAssumeCapacity(sub_inst);
- }
- gz.unstack();
- }
-
/// Supports `body_gz` stacked on `ret_gz` stacked on `gz`. Unstacks `body_gz` and `ret_gz`.
fn addFunc(gz: *GenZir, args: struct {
src_node: Ast.Node.Index,
diff --git a/src/Sema.zig b/src/Sema.zig
index b79414002a..3ba9761cd8 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3247,22 +3247,31 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
- const ptr = sema.inst_map.get(@enumToInt(bin_inst.lhs) - @as(u32, Zir.Inst.Ref.typed_value_map.len)) orelse {
+ const ptr = sema.inst_map.get(Zir.refToIndex(bin_inst.lhs).?) orelse {
// This is an elided instruction, but AstGen was unable to omit it.
return;
};
- const value = sema.resolveInst(bin_inst.rhs);
- const ptr_ty = try Type.ptr(sema.arena, .{
- .pointee_type = sema.typeOf(value),
- // TODO figure out which address space is appropriate here
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- // TODO detect when this store should be done at compile-time. For example,
- // if expressions should force it when the condition is compile-time known.
- const src: LazySrcLoc = .unneeded;
- try sema.requireRuntimeBlock(block, src);
- const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
- return sema.storePtr(block, src, bitcasted_ptr, value);
+ const operand = sema.resolveInst(bin_inst.rhs);
+ const src: LazySrcLoc = sema.src;
+ blk: {
+ const ptr_inst = Air.refToIndex(ptr) orelse break :blk;
+ if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk;
+ const air_datas = sema.air_instructions.items(.data);
+ const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
+ switch (ptr_val.tag()) {
+ .inferred_alloc_comptime => {
+ const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
+ return sema.storeToInferredAllocComptime(block, src, operand, iac);
+ },
+ .inferred_alloc => {
+ const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
+ return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc);
+ },
+ else => break :blk,
+ }
+ }
+
+ return sema.storePtr(block, src, ptr, operand);
}
fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -3273,46 +3282,71 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const ptr = sema.resolveInst(bin_inst.lhs);
const operand = sema.resolveInst(bin_inst.rhs);
- const operand_ty = sema.typeOf(operand);
const ptr_inst = Air.refToIndex(ptr).?;
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
- if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| {
- // There will be only one store_to_inferred_ptr because we are running at comptime.
- // The alloc will turn into a Decl.
- if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| {
- if (operand_val.tag() == .variable) {
- return sema.failWithNeededComptime(block, src);
- }
- var anon_decl = try block.startAnonDecl(src);
- defer anon_decl.deinit();
- iac.data.decl = try anon_decl.finish(
- try operand_ty.copy(anon_decl.arena()),
- try operand_val.copy(anon_decl.arena()),
- );
- // TODO set the alignment on the decl
- return;
- } else {
- return sema.failWithNeededComptime(block, src);
- }
+ switch (ptr_val.tag()) {
+ .inferred_alloc_comptime => {
+ const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
+ return sema.storeToInferredAllocComptime(block, src, operand, iac);
+ },
+ .inferred_alloc => {
+ const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
+ return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc);
+ },
+ else => unreachable,
}
+}
- if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| {
- // Add the stored instruction to the set we will use to resolve peer types
- // for the inferred allocation.
- try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
- // Create a runtime bitcast instruction with exactly the type the pointer wants.
- const ptr_ty = try Type.ptr(sema.arena, .{
- .pointee_type = operand_ty,
- .@"align" = inferred_alloc.data.alignment,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
- return sema.storePtr(block, src, bitcasted_ptr, operand);
+fn storeToInferredAlloc(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ptr: Air.Inst.Ref,
+ operand: Air.Inst.Ref,
+ inferred_alloc: *Value.Payload.InferredAlloc,
+) CompileError!void {
+ const operand_ty = sema.typeOf(operand);
+ // Add the stored instruction to the set we will use to resolve peer types
+ // for the inferred allocation.
+ try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
+ // Create a runtime bitcast instruction with exactly the type the pointer wants.
+ const ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = operand_ty,
+ .@"align" = inferred_alloc.data.alignment,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
+ return sema.storePtr(block, src, bitcasted_ptr, operand);
+}
+
+fn storeToInferredAllocComptime(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+ iac: *Value.Payload.InferredAllocComptime,
+) CompileError!void {
+ const operand_ty = sema.typeOf(operand);
+ // There will be only one store_to_inferred_ptr because we are running at comptime.
+ // The alloc will turn into a Decl.
+ if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| {
+ if (operand_val.tag() == .variable) {
+ return sema.failWithNeededComptime(block, src);
+ }
+ var anon_decl = try block.startAnonDecl(src);
+ defer anon_decl.deinit();
+ iac.data.decl = try anon_decl.finish(
+ try operand_ty.copy(anon_decl.arena()),
+ try operand_val.copy(anon_decl.arena()),
+ );
+ // TODO set the alignment on the decl
+ return;
+ } else {
+ return sema.failWithNeededComptime(block, src);
}
- unreachable;
}
fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
diff --git a/src/Zir.zig b/src/Zir.zig
index 49541283d3..c74b2c1d8f 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -518,11 +518,14 @@ pub const Inst = struct {
/// Same as `store` except provides a source location.
/// Uses the `pl_node` union field. Payload is `Bin`.
store_node,
- /// Same as `store` but the type of the value being stored will be used to infer
- /// the block type. The LHS is the pointer to store to.
- /// Uses the `bin` union field.
- /// If the pointer is none, it means this instruction has been elided in
- /// AstGen, but AstGen was unable to actually omit it from the ZIR code.
+ /// This instruction is not really supposed to be emitted from AstGen; nevetheless it
+ /// is sometimes emitted due to deficiencies in AstGen. When Sema sees this instruction,
+ /// it must clean up after AstGen's mess by looking at various context clues and
+ /// then treating it as one of the following:
+ /// * no-op
+ /// * store_to_inferred_ptr
+ /// * store
+ /// Uses the `bin` union field with LHS as the pointer to store to.
store_to_block_ptr,
/// Same as `store` but the type of the value being stored will be used to infer
/// the pointer type.