From af8661405b908c0abfc191501a8ad1a59a54e86a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 19 Jul 2019 16:56:44 -0400 Subject: fix usingnamespace It used to be that usingnamespace was only allowed at top level. This made it OK to put the state inside the AST node data structure. However, now usingnamespace can occur inside any aggregate data structure, and therefore the state must be in the TopLevelDeclaration rather than in the AST node. There were two other problems with the usingnamespace implementation: * It was passing the wrong destination ScopeDecl, so it could cause an incorrect error such as "import of file outside package path". * When doing `usingnamespace` on a file that already had `pub usingnamespace` in it would "steal" the usingnamespace, causing incorrect "use of undeclared identifier" errors in the target file. closes #2632 closes #2580 --- test/compile_errors.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/compile_errors.zig') diff --git a/test/compile_errors.zig b/test/compile_errors.zig index fd365235d8..40ce8d304b 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -234,7 +234,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "usingnamespace with wrong type", - \\use void; + \\usingnamespace void; , "tmp.zig:1:1: error: expected struct, enum, or union; found 'void'", ); -- cgit v1.2.3 From 19ee4957502c704312646f75544e968b618aa807 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 Jul 2019 19:35:41 -0400 Subject: add error for function with ccc indirectly calling async function --- src/all_types.hpp | 2 +- src/analyze.cpp | 56 ++++++++++++++++++++++++++++++++++++++----------- src/analyze.hpp | 4 ++-- test/compile_errors.zig | 18 ++++++++++++++++ 4 files changed, 65 insertions(+), 15 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/all_types.hpp b/src/all_types.hpp index 8991b53e64..a68f19a877 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1342,7 +1342,6 @@ struct FnCall { }; struct ZigFn { - CodeGen *codegen; LLVMValueRef llvm_value; const char *llvm_name; AstNode *proto_node; @@ -1385,6 +1384,7 @@ struct ZigFn { AstNode *set_cold_node; const AstNode *inferred_async_node; + ZigFn *inferred_async_fn; ZigList export_list; ZigList call_list; diff --git a/src/analyze.cpp b/src/analyze.cpp index 3da13dcc02..fe86c613f3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -61,14 +61,14 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg) { return err; } -ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) { +ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg) { Token fake_token; fake_token.start_line = node->line; fake_token.start_column = node->column; return add_token_error(g, node->owner, &fake_token, msg); } -ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg) { +ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg) { Token fake_token; fake_token.start_line = node->line; fake_token.start_column = node->column; @@ -2656,7 +2656,6 @@ ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) { fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota; - fn_entry->codegen = g; fn_entry->analyzed_executable.backward_branch_count = &fn_entry->prealloc_bbc; fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota; fn_entry->analyzed_executable.fn_entry = fn_entry; @@ -2784,6 +2783,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { } } } else { + fn_table_entry->inferred_async_node = inferred_async_none; g->external_prototypes.put_unique(tld_fn->base.name, &tld_fn->base); } @@ -2805,14 +2805,11 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { g->fn_defs.append(fn_table_entry); } - switch (fn_table_entry->type_entry->data.fn.fn_type_id.cc) { - case CallingConventionAsync: - fn_table_entry->inferred_async_node = fn_table_entry->proto_node; - break; - case CallingConventionUnspecified: - break; - default: - fn_table_entry->inferred_async_node = inferred_async_none; + // if the calling convention implies that it cannot be async, we save that for later + // and leave the value to be nullptr to indicate that we have not emitted possible + // compile errors for improperly calling async functions. + if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) { + fn_table_entry->inferred_async_node = fn_table_entry->proto_node; } if (scope_is_root_decls(tld_fn->base.parent_scope) && @@ -3801,6 +3798,25 @@ bool fn_is_async(ZigFn *fn) { return fn->inferred_async_node != inferred_async_none; } +static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { + assert(fn->inferred_async_node != nullptr); + assert(fn->inferred_async_node != inferred_async_checking); + assert(fn->inferred_async_node != inferred_async_none); + if (fn->inferred_async_fn != nullptr) { + ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("async function call here")); + return add_async_error_notes(g, new_msg, fn->inferred_async_fn); + } else if (fn->inferred_async_node->type == NodeTypeFnProto) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("async calling convention here")); + } else if (fn->inferred_async_node->type == NodeTypeSuspend) { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("suspends here")); + } else { + zig_unreachable(); + } +} + // This function resolves functions being inferred async. static void analyze_fn_async(CodeGen *g, ZigFn *fn) { if (fn->inferred_async_node == inferred_async_checking) { @@ -3816,6 +3832,13 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { return; } fn->inferred_async_node = inferred_async_checking; + + bool must_not_be_async = false; + if (fn->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) { + must_not_be_async = true; + fn->inferred_async_node = inferred_async_none; + } + for (size_t i = 0; i < fn->call_list.length; i += 1) { FnCall *call = &fn->call_list.at(i); if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) @@ -3828,6 +3851,15 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { } if (fn_is_async(call->callee)) { fn->inferred_async_node = call->source_node; + fn->inferred_async_fn = call->callee; + if (must_not_be_async) { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("function with calling convention '%s' cannot be async", + calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc))); + add_async_error_notes(g, msg, fn); + fn->anal_state = FnAnalStateInvalid; + return; + } resolve_async_fn_frame(g, fn); return; } @@ -4451,7 +4483,7 @@ bool generic_fn_type_id_eql(GenericFnTypeId *a, GenericFnTypeId *b) { if (a_val->special != ConstValSpecialRuntime && b_val->special != ConstValSpecialRuntime) { assert(a_val->special == ConstValSpecialStatic); assert(b_val->special == ConstValSpecialStatic); - if (!const_values_equal(a->fn_entry->codegen, a_val, b_val)) { + if (!const_values_equal(a->codegen, a_val, b_val)) { return false; } } else { diff --git a/src/analyze.hpp b/src/analyze.hpp index 50e7b72309..47ff4344ba 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -11,9 +11,9 @@ #include "all_types.hpp" void semantic_analyze(CodeGen *g); -ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg); +ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg); ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg); -ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg); +ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg); void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg); ZigType *new_type_table_entry(ZigTypeId id); ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 40ce8d304b..c4948135a0 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,24 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "function with ccc indirectly calling async function", + \\export fn entry() void { + \\ foo(); + \\} + \\fn foo() void { + \\ bar(); + \\} + \\fn bar() void { + \\ suspend; + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:2:8: note: async function call here", + "tmp.zig:5:8: note: async function call here", + "tmp.zig:8:5: note: suspends here", + ); + cases.add( "capture group on switch prong with incompatible payload types", \\const Union = union(enum) { -- cgit v1.2.3 From d105769926fd5360a5309be3e202cc65d32ce604 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Aug 2019 16:09:40 -0400 Subject: fix regressions regarding writing through const pointers --- src/all_types.hpp | 2 ++ src/ir.cpp | 34 ++++++++++++++++++---------------- test/compile_errors.zig | 16 ++++++++-------- 3 files changed, 28 insertions(+), 24 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/all_types.hpp b/src/all_types.hpp index a6b2bc51c3..4c3aeade9e 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2543,6 +2543,7 @@ struct IrInstructionLoadPtrGen { struct IrInstructionStorePtr { IrInstruction base; + bool allow_write_through_const; IrInstruction *ptr; IrInstruction *value; }; @@ -3707,6 +3708,7 @@ enum ResultLocId { struct ResultLoc { ResultLocId id; bool written; + bool allow_write_through_const; IrInstruction *resolved_loc; // result ptr IrInstruction *source_instruction; IrInstruction *gen_instruction; // value to store to the result loc diff --git a/src/ir.cpp b/src/ir.cpp index de2e4e1654..65a21a418d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -198,7 +198,7 @@ static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruct static IrInstruction *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *base_ptr, bool initializing); static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *ptr, IrInstruction *uncasted_value); + IrInstruction *ptr, IrInstruction *uncasted_value, bool allow_write_through_const); static IrInstruction *ir_gen_union_init_expr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *union_type, IrInstruction *field_name, AstNode *expr_node, LVal lval, ResultLoc *parent_result_loc); @@ -1613,7 +1613,7 @@ static IrInstruction *ir_build_unreachable(IrBuilder *irb, Scope *scope, AstNode return &unreachable_instruction->base; } -static IrInstruction *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, +static IrInstructionStorePtr *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *ptr, IrInstruction *value) { IrInstructionStorePtr *instruction = ir_build_instruction(irb, scope, source_node); @@ -1625,7 +1625,7 @@ static IrInstruction *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode * ir_ref_instruction(ptr, irb->current_basic_block); ir_ref_instruction(value, irb->current_basic_block); - return &instruction->base; + return instruction; } static IrInstruction *ir_build_var_decl_src(IrBuilder *irb, Scope *scope, AstNode *source_node, @@ -6051,6 +6051,7 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A ResultLocInstruction *result_loc_inst = allocate(1); result_loc_inst->base.id = ResultLocIdInstruction; result_loc_inst->base.source_instruction = field_ptr; + result_loc_inst->base.allow_write_through_const = true; ir_ref_instruction(field_ptr, irb->current_basic_block); ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base); @@ -6089,6 +6090,7 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A ResultLocInstruction *result_loc_inst = allocate(1); result_loc_inst->base.id = ResultLocIdInstruction; result_loc_inst->base.source_instruction = elem_ptr; + result_loc_inst->base.allow_write_through_const = true; ir_ref_instruction(elem_ptr, irb->current_basic_block); ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base); @@ -6646,7 +6648,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo ir_set_cursor_at_end_and_append_block(irb, continue_block); IrInstruction *new_index_val = ir_build_bin_op(irb, child_scope, node, IrBinOpAdd, index_val, one, false); - ir_mark_gen(ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val)); + ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val)->allow_write_through_const = true; ir_build_br(irb, child_scope, node, cond_block, is_comptime); IrInstruction *else_result = nullptr; @@ -14848,7 +14850,7 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, // instruction. assert(deref->value.special != ConstValSpecialRuntime); var_ptr->value.special = ConstValSpecialRuntime; - ir_analyze_store_ptr(ira, var_ptr, var_ptr, deref); + ir_analyze_store_ptr(ira, var_ptr, var_ptr, deref, false); } if (var_ptr->value.special == ConstValSpecialStatic && var->mem_slot_index != SIZE_MAX) { @@ -15862,7 +15864,7 @@ no_mem_slot: } static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *ptr, IrInstruction *uncasted_value) + IrInstruction *ptr, IrInstruction *uncasted_value, bool allow_write_through_const) { assert(ptr->value.type->id == ZigTypeIdPointer); @@ -15878,7 +15880,7 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source ZigType *child_type = ptr->value.type->data.pointer.child_type; - if (ptr->value.type->data.pointer.is_const && !source_instr->is_gen) { + if (ptr->value.type->data.pointer.is_const && !allow_write_through_const) { ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant")); return ira->codegen->invalid_instruction; } @@ -15957,10 +15959,9 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source break; } - IrInstruction *result = ir_build_store_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, - ptr, value); - result->value.type = ira->codegen->builtin_types.entry_void; - return result; + IrInstructionStorePtr *store_ptr = ir_build_store_ptr(&ira->new_irb, source_instr->scope, + source_instr->source_node, ptr, value); + return &store_ptr->base; } static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, @@ -18283,7 +18284,7 @@ static IrInstruction *ir_analyze_instruction_store_ptr(IrAnalyze *ira, IrInstruc if (type_is_invalid(value->value.type)) return ira->codegen->invalid_instruction; - return ir_analyze_store_ptr(ira, &instruction->base, ptr, value); + return ir_analyze_store_ptr(ira, &instruction->base, ptr, value, instruction->allow_write_through_const); } static IrInstruction *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *instruction) { @@ -19691,7 +19692,7 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc IrInstruction *field_ptr = ir_analyze_struct_field_ptr(ira, instruction, field, result_loc, container_type, true); - ir_analyze_store_ptr(ira, instruction, field_ptr, runtime_inst); + ir_analyze_store_ptr(ira, instruction, field_ptr, runtime_inst, false); if (instr_is_comptime(field_ptr) && field_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) { const_ptrs.append(field_ptr); } else { @@ -19708,7 +19709,7 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc IrInstruction *field_result_loc = const_ptrs.at(i); IrInstruction *deref = ir_get_deref(ira, field_result_loc, field_result_loc, nullptr); field_result_loc->value.special = ConstValSpecialRuntime; - ir_analyze_store_ptr(ira, field_result_loc, field_result_loc, deref); + ir_analyze_store_ptr(ira, field_result_loc, field_result_loc, deref, false); } } } @@ -19835,7 +19836,7 @@ static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira, assert(elem_result_loc->value.special == ConstValSpecialStatic); IrInstruction *deref = ir_get_deref(ira, elem_result_loc, elem_result_loc, nullptr); elem_result_loc->value.special = ConstValSpecialRuntime; - ir_analyze_store_ptr(ira, elem_result_loc, elem_result_loc, deref); + ir_analyze_store_ptr(ira, elem_result_loc, elem_result_loc, deref, false); } } } @@ -25418,7 +25419,8 @@ static IrInstruction *ir_analyze_instruction_end_expr(IrAnalyze *ira, IrInstruct return result_loc; if (!was_written) { - IrInstruction *store_ptr = ir_analyze_store_ptr(ira, &instruction->base, result_loc, value); + IrInstruction *store_ptr = ir_analyze_store_ptr(ira, &instruction->base, result_loc, value, + instruction->result_loc->allow_write_through_const); if (type_is_invalid(store_ptr->value.type)) { return ira->codegen->invalid_instruction; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 40ce8d304b..a4bc2a66f0 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -201,7 +201,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ return error.OutOfMemory; \\} , - "tmp.zig:2:7: error: error is discarded", + "tmp.zig:2:12: error: error is discarded", ); cases.add( @@ -2740,7 +2740,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ 3 = 3; \\} , - "tmp.zig:2:7: error: cannot assign to constant", + "tmp.zig:2:9: error: cannot assign to constant", ); cases.add( @@ -2750,7 +2750,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ a = 4; \\} , - "tmp.zig:3:7: error: cannot assign to constant", + "tmp.zig:3:9: error: cannot assign to constant", ); cases.add( @@ -2820,7 +2820,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\} \\export fn entry() void { f(); } , - "tmp.zig:3:7: error: cannot assign to constant", + "tmp.zig:3:9: error: cannot assign to constant", ); cases.add( @@ -3883,7 +3883,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\export fn entry() usize { return @sizeOf(@typeOf(a)); } , - "tmp.zig:6:24: error: unable to evaluate constant expression", + "tmp.zig:6:26: error: unable to evaluate constant expression", "tmp.zig:4:17: note: called from here", ); @@ -4133,7 +4133,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ cstr[0] = 'W'; \\} , - "tmp.zig:3:11: error: cannot assign to constant", + "tmp.zig:3:13: error: cannot assign to constant", ); cases.add( @@ -4143,7 +4143,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ cstr[0] = 'W'; \\} , - "tmp.zig:3:11: error: cannot assign to constant", + "tmp.zig:3:13: error: cannot assign to constant", ); cases.add( @@ -4291,7 +4291,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ f.field = 0; \\} , - "tmp.zig:6:13: error: cannot assign to constant", + "tmp.zig:6:15: error: cannot assign to constant", ); cases.add( -- cgit v1.2.3 From 24d78177eec4d8fc3aa8ca99dd50788e38f9f8b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 01:06:14 -0400 Subject: add compile error for async call of function pointer --- BRANCH_TODO | 2 +- src/ir.cpp | 5 ++++- test/compile_errors.zig | 12 ++++++++++++ test/stage1/behavior/coroutines.zig | 8 ++++---- 4 files changed, 21 insertions(+), 6 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 92390f099f..f3d881f5e5 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,4 +1,3 @@ - * struct types as the return type of an async function. make sure it works with return result locations. * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function @@ -19,3 +18,4 @@ * make resuming inside a suspend block, with nothing after it, a must-tail call. * make sure there are safety tests for all the new safety features (search the new PanicFnId enum values) * error return tracing + * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function diff --git a/src/ir.cpp b/src/ir.cpp index f140cfeabe..b01f43b3e1 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -14819,7 +14819,10 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count) { - ir_assert(fn_entry != nullptr, &call_instruction->base); + if (fn_entry == nullptr) { + ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); + return ira->codegen->invalid_instruction; + } ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 272d99c930..4b1a24c675 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "runtime-known function called with async keyword", + \\export fn entry() void { + \\ var ptr = afunc; + \\ _ = async ptr(); + \\} + \\ + \\async fn afunc() void { } + , + "tmp.zig:3:15: error: function is not comptime-known; @asyncCall required", + ); + cases.add( "function with ccc indirectly calling async function", \\export fn entry() void { diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index a1c1b7ad61..aa77541d19 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -263,15 +263,15 @@ test "async function with dot syntax" { //test "async fn pointer in a struct field" { // var data: i32 = 1; // const Foo = struct { -// bar: async<*std.mem.Allocator> fn (*i32) void, +// bar: async fn (*i32) void, // }; // var foo = Foo{ .bar = simpleAsyncFn2 }; -// const p = (async foo.bar(&data)) catch unreachable; +// const p = async foo.bar(&data); // expect(data == 2); -// cancel p; +// resume p; // expect(data == 4); //} -//async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { +//async fn simpleAsyncFn2(y: *i32) void { // defer y.* += 2; // y.* += 1; // suspend; -- cgit v1.2.3 From c87920966133d3285b60ccd022282e3f53789e0c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 02:40:38 -0400 Subject: add compile error for calling async function pointer --- src/analyze.cpp | 6 +++++- test/compile_errors.zig | 12 ++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'test/compile_errors.zig') diff --git a/src/analyze.cpp b/src/analyze.cpp index 5af9698dd1..5eb70d6717 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5177,7 +5177,11 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { for (size_t i = 0; i < fn->call_list.length; i += 1) { IrInstructionCallGen *call = fn->call_list.at(i); ZigFn *callee = call->fn_entry; - assert(callee != nullptr); + if (callee == nullptr) { + add_node_error(g, call->base.source_node, + buf_sprintf("function is not comptime-known; @asyncCall required")); + return ErrorSemanticAnalyzeFail; + } analyze_fn_body(g, callee); if (callee->anal_state == FnAnalStateInvalid) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 4b1a24c675..3245632e37 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "runtime-known async function called", + \\export fn entry() void { + \\ var ptr = afunc; + \\ _ = ptr(); + \\} + \\ + \\async fn afunc() void {} + , + "tmp.zig:3:12: error: function is not comptime-known; @asyncCall required", + ); + cases.add( "runtime-known function called with async keyword", \\export fn entry() void { -- cgit v1.2.3 From 87710a1cc2c4d0e7ecc309e430f7d33baadc5f02 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 3 Aug 2019 16:14:24 -0400 Subject: implement `@asyncCall` which supports async function pointers --- BRANCH_TODO | 16 +++++- src/all_types.hpp | 3 ++ src/analyze.cpp | 3 ++ src/codegen.cpp | 105 +++++++++++++++++++++++++++--------- src/ir.cpp | 102 +++++++++++++++++++++++++++++------ test/compile_errors.zig | 12 +++++ test/runtime_safety.zig | 15 ++++++ test/stage1/behavior/coroutines.zig | 52 ++++++++++++------ 8 files changed, 247 insertions(+), 61 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index a9bc5f3666..0ac1062b43 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,9 +1,8 @@ + * @asyncCall with an async function pointer * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function - * await in single-threaded mode * async call on a non async function - * @asyncCall with an async function pointer * cancel * defer and errdefer * safety for double await @@ -21,3 +20,16 @@ * compile error for copying a frame * compile error for resuming a const frame pointer * runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return + * await in single-threaded mode + * calling a generic function which is async + * make sure `await @asyncCall` and `await async` are handled correctly. + * allow @asyncCall with a real @Frame(func) (the point of this is result pointer) + * documentation + - @asyncCall + - @frame + - @Frame + - @frameSize + - coroutines section + - suspend + - resume + - anyframe, anyframe->T diff --git a/src/all_types.hpp b/src/all_types.hpp index 0f8cce1376..87db8edf8d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1503,6 +1503,7 @@ enum BuiltinFnId { BuiltinFnIdInlineCall, BuiltinFnIdNoInlineCall, BuiltinFnIdNewStackCall, + BuiltinFnIdAsyncCall, BuiltinFnIdTypeId, BuiltinFnIdShlExact, BuiltinFnIdShrExact, @@ -1553,6 +1554,7 @@ enum PanicMsgId { PanicMsgIdBadAwait, PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, + PanicMsgIdFrameTooSmall, PanicMsgIdCount, }; @@ -3699,6 +3701,7 @@ static const size_t maybe_null_index = 1; static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; +// label (grep this): [coro_frame_struct_layout] static const size_t coro_fn_ptr_index = 0; static const size_t coro_awaiter_index = 1; static const size_t coro_arg_start = 2; diff --git a/src/analyze.cpp b/src/analyze.cpp index 5eb70d6717..cd8f981ff3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5205,6 +5205,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { call->frame_result_loc = &alloca_gen->base; } + // label (grep this): [coro_frame_struct_layout] ZigList field_types = {}; ZigList field_names = {}; @@ -7525,6 +7526,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re if (result_type == nullptr) { g->anyframe_fn_type = ptr_result_type; } + // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { ptr_result_type, // fn_ptr usize_type_ref, // awaiter @@ -7558,6 +7560,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type); } else { ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false); + // label (grep this): [coro_frame_struct_layout] LLVMTypeRef field_types[] = { LLVMPointerType(fn_type, 0), // fn_ptr usize_type_ref, // awaiter diff --git a/src/codegen.cpp b/src/codegen.cpp index db617e636a..ebdd9e6120 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -879,6 +879,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: return buf_create_from_str("awaiting function resumed"); + case PanicMsgIdFrameTooSmall: + return buf_create_from_str("frame too small"); } zig_unreachable(); } @@ -3479,7 +3481,18 @@ static void render_async_var_decls(CodeGen *g, Scope *scope) { } } +static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) { + LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type; + LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0); + LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, ""); + LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true); + LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, ""); + return LLVMBuildLoad(g->builder, prefix_ptr, ""); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef fn_val; ZigType *fn_type; bool callee_is_async; @@ -3511,34 +3524,54 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef awaiter_init_val; LLVMValueRef ret_ptr; if (instruction->is_async) { - frame_result_loc = result_loc; awaiter_init_val = zero; - if (ret_has_bits) { - ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); - } - // Use the result location which is inside the frame if this is an async call. - if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); - LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + if (instruction->new_stack == nullptr) { + frame_result_loc = result_loc; + + if (ret_has_bits) { + // Use the result location which is inside the frame if this is an async call. + ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, ""); + } + } else { + LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack); + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, ""); + LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, ""); + LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val); + + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail"); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk"); + + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, ""); + LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); + + LLVMPositionBuilderAtEnd(g->builder, fail_block); + gen_safety_crash(g, PanicMsgIdFrameTooSmall); + + LLVMPositionBuilderAtEnd(g->builder, ok_block); + } + LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, ""); + LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, ""); + frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, + get_llvm_type(g, instruction->base.value.type), ""); + + if (ret_has_bits) { + // Use the result location provided to the @asyncCall builtin + ret_ptr = result_loc; + } } } else if (callee_is_async) { frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer if (ret_has_bits) { + // Use the call instruction's result location. ret_ptr = result_loc; } - - // Use the call instruction's result location. - if (ret_has_bits) { - LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); - LLVMBuildStore(g->builder, result_loc, ret_ptr_ptr); - } } if (instruction->is_async || callee_is_async) { assert(frame_result_loc != nullptr); - assert(instruction->fn_entry != nullptr); if (prefix_arg_err_ret_stack) { zig_panic("TODO"); @@ -3547,6 +3580,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, ""); LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr); + if (ret_has_bits) { + LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, ""); + LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr); + } } if (!instruction->is_async && !callee_is_async) { if (first_arg_ret) { @@ -3581,16 +3618,37 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async || callee_is_async) { size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0; + size_t arg_start_i = coro_arg_start + ret_2_or_0; + + LLVMValueRef casted_frame; + if (instruction->new_stack != nullptr) { + // We need the frame type to be a pointer to a struct that includes the args + // label (grep this): [coro_frame_struct_layout] + size_t field_count = arg_start_i + gen_param_values.length; + LLVMTypeRef *field_types = allocate_nonzero(field_count); + LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types); + for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { + field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i)); + } + LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false); + LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0); + + casted_frame = LLVMBuildBitCast(g->builder, frame_result_loc, ptr_frame_with_args_type, ""); + } else { + casted_frame = frame_result_loc; + } + for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) { - LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - coro_arg_start + ret_2_or_0 + arg_i, ""); + LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, ""); LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr); } } - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (instruction->is_async) { LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)}; ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, ""); + if (instruction->new_stack != nullptr) { + return frame_result_loc; + } return nullptr; } else if (callee_is_async) { ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true); @@ -5223,13 +5281,8 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) { - LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type; - LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0); LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn); - LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, ""); - LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true); - LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, ""); - return LLVMBuildLoad(g->builder, prefix_ptr, ""); + return gen_frame_size(g, fn_val); } static void set_debug_location(CodeGen *g, IrInstruction *instruction) { @@ -7097,13 +7150,13 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2); create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2); create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2); - //Needs library support on Windows - //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); + create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); create_builtin_fn(g, BuiltinFnIdRound, "round", 2); create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4); create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); + create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1); create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2); create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2); diff --git a/src/ir.cpp b/src/ir.cpp index b01f43b3e1..fbf9da9656 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1402,6 +1402,10 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], irb->current_basic_block); + if (is_async && new_stack != nullptr) { + // in this case the arg at the end is the return pointer + ir_ref_instruction(args[arg_count], irb->current_basic_block); + } if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block); return &call_instruction->base; @@ -5203,8 +5207,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } case BuiltinFnIdNewStackCall: { - if (node->data.fn_call_expr.params.length == 0) { - add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0")); + if (node->data.fn_call_expr.params.length < 2) { + add_node_error(irb->codegen, node, + buf_sprintf("expected at least 2 arguments, found %" ZIG_PRI_usize, + node->data.fn_call_expr.params.length)); return irb->codegen->invalid_instruction; } @@ -5232,6 +5238,50 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo FnInlineAuto, false, new_stack, result_loc); return ir_lval_wrap(irb, scope, call, lval, result_loc); } + case BuiltinFnIdAsyncCall: + { + size_t arg_offset = 3; + if (node->data.fn_call_expr.params.length < arg_offset) { + add_node_error(irb->codegen, node, + buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize, + arg_offset, node->data.fn_call_expr.params.length)); + return irb->codegen->invalid_instruction; + } + + AstNode *bytes_node = node->data.fn_call_expr.params.at(0); + IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope); + if (bytes == irb->codegen->invalid_instruction) + return bytes; + + AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1); + IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope); + if (ret_ptr == irb->codegen->invalid_instruction) + return ret_ptr; + + AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2); + IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); + if (fn_ref == irb->codegen->invalid_instruction) + return fn_ref; + + size_t arg_count = node->data.fn_call_expr.params.length - arg_offset; + + // last "arg" is return pointer + IrInstruction **args = allocate(arg_count + 1); + + for (size_t i = 0; i < arg_count; i += 1) { + AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset); + IrInstruction *arg = ir_gen_node(irb, arg_node, scope); + if (arg == irb->codegen->invalid_instruction) + return arg; + args[i] = arg; + } + + args[arg_count] = ret_ptr; + + IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, + FnInlineAuto, true, bytes, result_loc); + return ir_lval_wrap(irb, scope, call, lval, result_loc); + } case BuiltinFnIdTypeId: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -14817,11 +14867,31 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst } static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, - ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count) + ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, + IrInstruction *casted_new_stack) { if (fn_entry == nullptr) { - ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); - return ira->codegen->invalid_instruction; + if (call_instruction->new_stack == nullptr) { + ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required")); + return ira->codegen->invalid_instruction; + } + // this is an @asyncCall + + if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) { + ir_add_error(ira, fn_ref, + buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child; + if (type_is_invalid(ret_ptr->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type); + + IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref, + arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type); + return &call_gen->base; } ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry); @@ -15559,13 +15629,13 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c size_t impl_param_count = impl_fn_type_id->param_count; if (call_instruction->is_async) { - zig_panic("TODO async call"); + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, + nullptr, casted_args, call_param_count, casted_new_stack); + return ir_finish_anal(ira, result); } - if (!call_instruction->is_async) { - if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { - parent_fn_entry->inferred_async_node = fn_ref->source_node; - } + if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, @@ -15645,18 +15715,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c return ira->codegen->invalid_instruction; } - if (!call_instruction->is_async) { - if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { - parent_fn_entry->inferred_async_node = fn_ref->source_node; - } - } - if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, - casted_args, call_param_count); + casted_args, call_param_count, casted_new_stack); return ir_finish_anal(ira, result); } + if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { + parent_fn_entry->inferred_async_node = fn_ref->source_node; + } + IrInstruction *result_loc; if (handle_is_ptr(return_type)) { result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 3245632e37..2941cadcf5 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "non async function pointer passed to @asyncCall", + \\export fn entry() void { + \\ var ptr = afunc; + \\ var bytes: [100]u8 = undefined; + \\ _ = @asyncCall(&bytes, {}, ptr); + \\} + \\fn afunc() void { } + , + "tmp.zig:4:32: error: expected async function, found 'fn() void'", + ); + cases.add( "runtime-known async function called", \\export fn entry() void { diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index 43cf0856c3..ac9037caae 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,20 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { + cases.addRuntimeSafety("@asyncCall with too small a frame", + \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { + \\ @import("std").os.exit(126); + \\} + \\pub fn main() void { + \\ var bytes: [1]u8 = undefined; + \\ var ptr = other; + \\ var frame = @asyncCall(&bytes, {}, ptr); + \\} + \\async fn other() void { + \\ suspend; + \\} + ); + cases.addRuntimeSafety("resuming a function which is awaiting a frame", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); @@ -17,6 +31,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ suspend; \\} ); + cases.addRuntimeSafety("resuming a function which is awaiting a call", \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig index 2b82dce707..511568a898 100644 --- a/test/stage1/behavior/coroutines.zig +++ b/test/stage1/behavior/coroutines.zig @@ -260,22 +260,42 @@ test "async function with dot syntax" { expect(S.y == 2); } -//test "async fn pointer in a struct field" { -// var data: i32 = 1; -// const Foo = struct { -// bar: async fn (*i32) void, -// }; -// var foo = Foo{ .bar = simpleAsyncFn2 }; -// const p = async foo.bar(&data); -// expect(data == 2); -// resume p; -// expect(data == 4); -//} -//async fn simpleAsyncFn2(y: *i32) void { -// defer y.* += 2; -// y.* += 1; -// suspend; -//} +test "async fn pointer in a struct field" { + var data: i32 = 1; + const Foo = struct { + bar: async fn (*i32) void, + }; + var foo = Foo{ .bar = simpleAsyncFn2 }; + var bytes: [64]u8 = undefined; + const p = @asyncCall(&bytes, {}, foo.bar, &data); + comptime expect(@typeOf(p) == anyframe->void); + expect(data == 2); + resume p; + expect(data == 4); +} +async fn simpleAsyncFn2(y: *i32) void { + defer y.* += 2; + y.* += 1; + suspend; +} + +test "@asyncCall with return type" { + const Foo = struct { + bar: async fn () i32, + + async fn afunc() i32 { + suspend; + return 1234; + } + }; + var foo = Foo{ .bar = Foo.afunc }; + var bytes: [64]u8 = undefined; + var aresult: i32 = 0; + const frame = @asyncCall(&bytes, &aresult, foo.bar); + expect(aresult == 0); + resume frame; + expect(aresult == 1234); +} //test "async fn with inferred error set" { // const p = async failing(); -- cgit v1.2.3 From bfa1d12fbad2031402fbafe51c3a0c481fe69351 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Aug 2019 13:44:57 -0400 Subject: better compile errors when frame depends on itself --- src/analyze.cpp | 11 ++++++++--- src/ir.cpp | 12 +++++++++--- src/ir.hpp | 2 ++ test/compile_errors.zig | 30 ++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 6 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/analyze.cpp b/src/analyze.cpp index aa5c3c88f7..cc90573f41 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5179,11 +5179,14 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (fn->anal_state == FnAnalStateInvalid) return ErrorSemanticAnalyzeFail; break; - case FnAnalStateProbing: - add_node_error(g, fn->proto_node, + case FnAnalStateProbing: { + ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("cannot resolve '%s': function not fully analyzed yet", buf_ptr(&frame_type->name))); + ir_add_analysis_trace(fn->ir_executable.analysis, msg, + buf_sprintf("depends on its own frame here")); return ErrorSemanticAnalyzeFail; + } } ZigType *fn_type = get_async_fn_type(g, fn->type_entry); @@ -5201,8 +5204,10 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (callee->anal_state == FnAnalStateProbing) { ErrorMsg *msg = add_node_error(g, fn->proto_node, buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name))); - add_error_note(g, msg, call->base.source_node, + ErrorMsg *note = add_error_note(g, msg, call->base.source_node, buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name))); + ir_add_analysis_trace(callee->ir_executable.analysis, note, + buf_sprintf("depends on the frame here")); return ErrorSemanticAnalyzeFail; } diff --git a/src/ir.cpp b/src/ir.cpp index b2389d1501..f92434bb33 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8217,18 +8217,24 @@ bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) { return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable); } -static void add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) { +static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) { if (!exec || !exec->source_node || limit < 0) return; add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here")); - add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1); + ir_add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1); +} + +void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text) { + IrInstruction *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index); + add_error_note(ira->codegen, err_msg, old_instruction->source_node, text); + ir_add_call_stack_errors(ira->codegen, ira->new_irb.exec, err_msg, 10); } static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg) { invalidate_exec(exec); ErrorMsg *err_msg = add_node_error(codegen, source_node, msg); if (exec->parent_exec) { - add_call_stack_errors(codegen, exec, err_msg, 10); + ir_add_call_stack_errors(codegen, exec, err_msg, 10); } return err_msg; } diff --git a/src/ir.hpp b/src/ir.hpp index 597624e2e6..3761c5a97d 100644 --- a/src/ir.hpp +++ b/src/ir.hpp @@ -28,4 +28,6 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal AstNode *source_node); const char *float_op_to_name(BuiltinFnId op, bool llvm_name); +void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text); + #endif diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 2941cadcf5..810e40b18b 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,36 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "async function indirectly depends on its own frame", + \\export fn entry() void { + \\ _ = async amain(); + \\} + \\async fn amain() void { + \\ other(); + \\} + \\fn other() void { + \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined; + \\} + , + "tmp.zig:4:1: error: unable to determine async function frame of 'amain'", + "tmp.zig:5:10: note: analysis of function 'other' depends on the frame", + "tmp.zig:8:13: note: depends on the frame here", + ); + + cases.add( + "async function depends on its own frame", + \\export fn entry() void { + \\ _ = async amain(); + \\} + \\async fn amain() void { + \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined; + \\} + , + "tmp.zig:4:1: error: cannot resolve '@Frame(amain)': function not fully analyzed yet", + "tmp.zig:5:13: note: depends on its own frame here", + ); + cases.add( "non async function pointer passed to @asyncCall", \\export fn entry() void { -- cgit v1.2.3 From 1b83ee78a48a64bef28f12b7b2e263074f88b6b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 12:00:32 -0400 Subject: allow comptime_int to implicit cast to comptime_float --- src/ir.cpp | 3 +++ std/math.zig | 7 +++++++ test/compile_errors.zig | 8 -------- test/stage1/behavior/cast.zig | 7 ++++++- 4 files changed, 16 insertions(+), 9 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/ir.cpp b/src/ir.cpp index 2b096a3383..13348d28c4 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9713,6 +9713,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat); assert(const_val_is_int || const_val_is_float); + if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) { + return true; + } if (other_type->id == ZigTypeIdFloat) { if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) { return true; diff --git a/std/math.zig b/std/math.zig index e10c9329d9..e47021512e 100644 --- a/std/math.zig +++ b/std/math.zig @@ -305,6 +305,13 @@ test "math.min" { testing.expect(@typeOf(result) == i16); testing.expect(result == -200); } + { + const a = 10.34; + var b: f32 = 999.12; + var result = min(a, b); + testing.expect(@typeOf(result) == f32); + testing.expect(result == 10.34); + } } pub fn max(x: var, y: var) @typeOf(x + y) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index a4bc2a66f0..437e40900d 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3225,14 +3225,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:3:17: note: value 8 cannot fit into type u3", ); - cases.add( - "incompatible number literals", - \\const x = 2 == 2.0; - \\export fn entry() usize { return @sizeOf(@typeOf(x)); } - , - "tmp.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'", - ); - cases.add( "missing function call param", \\const Foo = struct { diff --git a/test/stage1/behavior/cast.zig b/test/stage1/behavior/cast.zig index c243f18088..04c7fa606f 100644 --- a/test/stage1/behavior/cast.zig +++ b/test/stage1/behavior/cast.zig @@ -508,7 +508,7 @@ test "peer type resolution: unreachable, null, slice" { } test "peer type resolution: unreachable, error set, unreachable" { - const Error = error { + const Error = error{ FileDescriptorAlreadyPresentInSet, OperationCausesCircularLoop, FileDescriptorNotRegistered, @@ -529,3 +529,8 @@ test "peer type resolution: unreachable, error set, unreachable" { }; expect(transformed_err == error.SystemResources); } + +test "implicit cast comptime_int to comptime_float" { + comptime expect(comptime_float(10) == f32(10)); + expect(2 == 2.0); +} -- cgit v1.2.3 From 4d8d513e16d308131846d98267bc844bf702e9ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Aug 2019 19:53:10 -0400 Subject: all tests passing --- BRANCH_TODO | 6 ++- doc/docgen.zig | 2 +- doc/langref.html.in | 81 ++++++++++++-------------------------- src/analyze.cpp | 2 +- src/codegen.cpp | 49 ++++++++++++----------- src/ir.cpp | 3 ++ std/event/channel.zig | 11 +++--- std/event/fs.zig | 102 ++++++++++++------------------------------------ std/event/future.zig | 45 +++++++++------------ std/event/group.zig | 68 ++++++++++---------------------- std/event/io.zig | 19 +++++---- std/event/lock.zig | 54 ++++++++++--------------- std/event/loop.zig | 4 +- std/event/net.zig | 53 +++++++++++-------------- std/event/rwlock.zig | 85 ++++++++++++++++++++-------------------- std/zig/parser_test.zig | 2 +- test/compile_errors.zig | 35 +++++------------ 17 files changed, 240 insertions(+), 381 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index bd797a75a8..b2b293aec1 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,10 +1,13 @@ + * for loops need to spill the index. other payload captures probably also need to spill + * compile error (instead of crashing) for trying to get @Frame of generic function + * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function + * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill * compile error for error: expected anyframe->T, found 'anyframe' * compile error for error: expected anyframe->T, found 'i32' * await of a non async function * async call on a non async function * a test where an async function destroys its own frame in a defer * implicit cast of normal function to async function should be allowed when it is inferred to be async - * revive std.event.Loop * @typeInfo for @Frame(func) * peer type resolution of *@Frame(func) and anyframe * peer type resolution of *@Frame(func) and anyframe->T when the return type matches @@ -36,3 +39,4 @@ - it can be assumed that these are always available: the awaiter ptr, return ptr if applicable, error return trace ptr if applicable. - it can be assumed that it is never cancelled + * fix the debug info for variables of async functions diff --git a/doc/docgen.zig b/doc/docgen.zig index 3d3dcba76d..92764d7642 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -770,7 +770,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok .Keyword_or, .Keyword_orelse, .Keyword_packed, - .Keyword_promise, + .Keyword_anyframe, .Keyword_pub, .Keyword_resume, .Keyword_return, diff --git a/doc/langref.html.in b/doc/langref.html.in index ac381e00b2..0cb76a4bdf 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6024,13 +6024,14 @@ const assert = std.debug.assert; var x: i32 = 1; -test "create a coroutine and cancel it" { - const p = try async simpleAsyncFn(); - comptime assert(@typeOf(p) == promise->void); - cancel p; +test "call an async function" { + var frame = async simpleAsyncFn(); + comptime assert(@typeOf(frame) == @Frame(simpleAsyncFn)); assert(x == 2); } -async<*std.mem.Allocator> fn simpleAsyncFn() void { +fn simpleAsyncFn() void { + x += 1; + suspend; x += 1; } {#code_end#} @@ -6041,60 +6042,33 @@ async<*std.mem.Allocator> fn simpleAsyncFn() void { return to the caller or resumer. The following code demonstrates where control flow goes:

- {#code_begin|test#} -const std = @import("std"); -const assert = std.debug.assert; - -test "coroutine suspend, resume, cancel" { - seq('a'); - const p = try async testAsyncSeq(); - seq('c'); - resume p; - seq('f'); - cancel p; - seq('g'); - - assert(std.mem.eql(u8, points, "abcdefg")); -} -async fn testAsyncSeq() void { - defer seq('e'); - - seq('b'); - suspend; - seq('d'); -} -var points = [_]u8{0} ** "abcdefg".len; -var index: usize = 0; - -fn seq(c: u8) void { - points[index] = c; - index += 1; -} - {#code_end#} +

+ TODO another test example here +

When an async function suspends itself, it must be sure that it will be resumed or canceled somehow, for example by registering its promise handle in an event loop. Use a suspend capture block to gain access to the - promise: + promise (TODO this is outdated):

{#code_begin|test#} const std = @import("std"); const assert = std.debug.assert; +var the_frame: anyframe = undefined; +var result = false; + test "coroutine suspend with block" { - const p = try async testSuspendBlock(); + _ = async testSuspendBlock(); std.debug.assert(!result); - resume a_promise; + resume the_frame; std.debug.assert(result); - cancel p; } -var a_promise: promise = undefined; -var result = false; -async fn testSuspendBlock() void { +fn testSuspendBlock() void { suspend { - comptime assert(@typeOf(@handle()) == promise->void); - a_promise = @handle(); + comptime assert(@typeOf(@frame()) == *@Frame(testSuspendBlock)); + the_frame = @frame(); } result = true; } @@ -6124,16 +6098,13 @@ const std = @import("std"); const assert = std.debug.assert; test "resume from suspend" { - var buf: [500]u8 = undefined; - var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; var my_result: i32 = 1; - const p = try async testResumeFromSuspend(&my_result); - cancel p; + _ = async testResumeFromSuspend(&my_result); std.debug.assert(my_result == 2); } async fn testResumeFromSuspend(my_result: *i32) void { suspend { - resume @handle(); + resume @frame(); } my_result.* += 1; suspend; @@ -6172,30 +6143,30 @@ async fn testResumeFromSuspend(my_result: *i32) void { const std = @import("std"); const assert = std.debug.assert; -var a_promise: promise = undefined; +var the_frame: anyframe = undefined; var final_result: i32 = 0; test "coroutine await" { seq('a'); - const p = async amain() catch unreachable; + _ = async amain(); seq('f'); - resume a_promise; + resume the_frame; seq('i'); assert(final_result == 1234); assert(std.mem.eql(u8, seq_points, "abcdefghi")); } async fn amain() void { seq('b'); - const p = async another() catch unreachable; + var f = async another(); seq('e'); - final_result = await p; + final_result = await f; seq('h'); } async fn another() i32 { seq('c'); suspend { seq('d'); - a_promise = @handle(); + the_frame = @frame(); } seq('g'); return 1234; diff --git a/src/analyze.cpp b/src/analyze.cpp index 7482ba92ba..30aa82a216 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5325,7 +5325,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) { if (*instruction->name_hint == 0) { name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i)); } else { - name = instruction->name_hint; + name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i)); } field_names.append(name); field_types.append(child_type); diff --git a/src/codegen.cpp b/src/codegen.cpp index f1a42e321d..4510e7156c 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -535,24 +535,24 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) { // use the ABI alignment, which is fine. } - unsigned init_gen_i = 0; - if (!type_has_bits(return_type)) { - // nothing to do - } else if (type_is_nonnull_ptr(return_type)) { - addLLVMAttr(llvm_fn, 0, "nonnull"); - } else if (!is_async && want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { - // Sret pointers must not be address 0 - addLLVMArgAttr(llvm_fn, 0, "nonnull"); - addLLVMArgAttr(llvm_fn, 0, "sret"); - if (cc_want_sret_attr(cc)) { - addLLVMArgAttr(llvm_fn, 0, "noalias"); - } - init_gen_i = 1; - } - if (is_async) { addLLVMArgAttr(llvm_fn, 0, "nonnull"); } else { + unsigned init_gen_i = 0; + if (!type_has_bits(return_type)) { + // nothing to do + } else if (type_is_nonnull_ptr(return_type)) { + addLLVMAttr(llvm_fn, 0, "nonnull"); + } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) { + // Sret pointers must not be address 0 + addLLVMArgAttr(llvm_fn, 0, "nonnull"); + addLLVMArgAttr(llvm_fn, 0, "sret"); + if (cc_want_sret_attr(cc)) { + addLLVMArgAttr(llvm_fn, 0, "noalias"); + } + init_gen_i = 1; + } + // set parameter attributes FnWalk fn_walk = {}; fn_walk.id = FnWalkIdAttrs; @@ -911,7 +911,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { case PanicMsgIdBadResume: return buf_create_from_str("resumed an async function which already returned"); case PanicMsgIdBadAwait: - return buf_create_from_str("async function awaited/canceled twice"); + return buf_create_from_str("async function awaited twice"); case PanicMsgIdBadReturn: return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: @@ -2350,6 +2350,10 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true)); } +static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { + LLVMSetTailCall(call_inst, true); +} + static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; @@ -2394,7 +2398,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, get_llvm_type(g, any_frame_type), ""); LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); g->cur_is_after_return = false; @@ -4009,7 +4013,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, call_bb); @@ -5520,7 +5524,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, resume_bb); @@ -5556,8 +5560,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst } // supply the error return trace pointer - LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); - if (my_err_ret_trace_val != nullptr) { + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + assert(my_err_ret_trace_val != nullptr); LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_index_trace_arg(g, result_type), ""); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); @@ -5588,7 +5593,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Tail resume it now, so that it can complete. LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); - LLVMSetTailCall(call_inst, true); + set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); // Rely on the target to resume us from suspension. diff --git a/src/ir.cpp b/src/ir.cpp index f1d4b80a2c..57c50db818 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15064,6 +15064,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { return result_loc; } + result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false)); + if (type_is_invalid(result_loc->value.type)) + return ira->codegen->invalid_instruction; return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base; } diff --git a/std/event/channel.zig b/std/event/channel.zig index c9686e37e9..c4f7dca085 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -77,18 +77,19 @@ pub fn Channel(comptime T: type) type { /// must be called when all calls to put and get have suspended and no more calls occur pub fn destroy(self: *SelfChannel) void { while (self.getters.get()) |get_node| { - cancel get_node.data.tick_node.data; + resume get_node.data.tick_node.data; } while (self.putters.get()) |put_node| { - cancel put_node.data.tick_node.data; + resume put_node.data.tick_node.data; } self.loop.allocator.free(self.buffer_nodes); self.loop.allocator.destroy(self); } - /// puts a data item in the channel. The promise completes when the value has been added to the + /// puts a data item in the channel. The function returns when the value has been added to the /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter. - pub async fn put(self: *SelfChannel, data: T) void { + /// Or when the channel is destroyed. + pub fn put(self: *SelfChannel, data: T) void { var my_tick_node = Loop.NextTickNode.init(@frame()); var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{ .tick_node = &my_tick_node, @@ -114,7 +115,7 @@ pub fn Channel(comptime T: type) type { } } - /// await this function to get an item from the channel. If the buffer is empty, the promise will + /// await this function to get an item from the channel. If the buffer is empty, the frame will /// complete when the next item is put in the channel. pub async fn get(self: *SelfChannel) T { // TODO integrate this function with named return values diff --git a/std/event/fs.zig b/std/event/fs.zig index 22e9fc38c9..fe2f604ac3 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -76,12 +76,8 @@ pub const Request = struct { pub const PWriteVError = error{OutOfMemory} || File.WriteError; -/// data - just the inner references - must live until pwritev promise completes. +/// data - just the inner references - must live until pwritev frame completes. pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) PWriteVError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } switch (builtin.os) { .macosx, .linux, @@ -109,7 +105,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us } } -/// data must outlive the returned promise +/// data must outlive the returned frame pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) os.WindowsWriteError!void { if (data.len == 0) return; if (data.len == 1) return await (async pwriteWindows(loop, fd, data[0], offset) catch unreachable); @@ -123,15 +119,10 @@ pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, off } pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var resume_node = Loop.ResumeNode.Basic{ .base = Loop.ResumeNode{ .id = Loop.ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = windows.OVERLAPPED{ .Internal = 0, .InternalHigh = 0, @@ -166,18 +157,13 @@ pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) } } -/// iovecs must live until pwritev promise completes. +/// iovecs must live until pwritev frame completes. pub async fn pwritevPosix( loop: *Loop, fd: fd_t, iovecs: []const os.iovec_const, offset: usize, ) os.WriteError!void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var req_node = RequestNode{ .prev = null, .next = null, @@ -194,7 +180,7 @@ pub async fn pwritevPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -211,13 +197,8 @@ pub async fn pwritevPosix( pub const PReadVError = error{OutOfMemory} || File.ReadError; -/// data - just the inner references - must live until preadv promise completes. +/// data - just the inner references - must live until preadv frame completes. pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PReadVError!usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - assert(data.len != 0); switch (builtin.os) { .macosx, @@ -246,7 +227,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR } } -/// data must outlive the returned promise +/// data must outlive the returned frame pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u64) !usize { assert(data.len != 0); if (data.len == 1) return await (async preadWindows(loop, fd, data[0], offset) catch unreachable); @@ -272,15 +253,10 @@ pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u6 } pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var resume_node = Loop.ResumeNode.Basic{ .base = Loop.ResumeNode{ .id = Loop.ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = windows.OVERLAPPED{ .Internal = 0, .InternalHigh = 0, @@ -314,18 +290,13 @@ pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize return usize(bytes_transferred); } -/// iovecs must live until preadv promise completes +/// iovecs must live until preadv frame completes pub async fn preadvPosix( loop: *Loop, fd: fd_t, iovecs: []const os.iovec, offset: usize, ) os.ReadError!usize { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - var req_node = RequestNode{ .prev = null, .next = null, @@ -342,7 +313,7 @@ pub async fn preadvPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -363,11 +334,6 @@ pub async fn openPosix( flags: u32, mode: File.Mode, ) File.OpenError!fd_t { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - const path_c = try std.os.toPosixPath(path); var req_node = RequestNode{ @@ -386,7 +352,7 @@ pub async fn openPosix( .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -643,11 +609,6 @@ async fn writeFileWindows(loop: *Loop, path: []const u8, contents: []const u8) ! } async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void { - // workaround for https://github.com/ziglang/zig/issues/1194 - suspend { - resume @handle(); - } - const path_with_null = try std.cstr.addNullByte(loop.allocator, path); defer loop.allocator.free(path_with_null); @@ -667,7 +628,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8 .TickNode = Loop.NextTickNode{ .prev = null, .next = null, - .data = @handle(), + .data = @frame(), }, }, }, @@ -682,7 +643,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8 return req_node.data.msg.WriteFile.result; } -/// The promise resumes when the last data has been confirmed written, but before the file handle +/// The frame resumes when the last data has been confirmed written, but before the file handle /// is closed. /// Caller owns returned memory. pub async fn readFile(loop: *Loop, file_path: []const u8, max_size: usize) ![]u8 { @@ -734,7 +695,7 @@ pub const WatchEventId = enum { // // const FileTable = std.AutoHashMap([]const u8, *Put); // const Put = struct { -// putter: promise, +// putter: anyframe, // value_ptr: *V, // }; // }, @@ -748,21 +709,21 @@ pub const WatchEventId = enum { // const WindowsOsData = struct { // table_lock: event.Lock, // dir_table: DirTable, -// all_putters: std.atomic.Queue(promise), +// all_putters: std.atomic.Queue(anyframe), // ref_count: std.atomic.Int(usize), // // const DirTable = std.AutoHashMap([]const u8, *Dir); // const FileTable = std.AutoHashMap([]const u16, V); // // const Dir = struct { -// putter: promise, +// putter: anyframe, // file_table: FileTable, // table_lock: event.Lock, // }; // }; // // const LinuxOsData = struct { -// putter: promise, +// putter: anyframe, // inotify_fd: i32, // wd_table: WdTable, // table_lock: event.Lock, @@ -776,7 +737,7 @@ pub const WatchEventId = enum { // }; // }; // -// const FileToHandle = std.AutoHashMap([]const u8, promise); +// const FileToHandle = std.AutoHashMap([]const u8, anyframe); // // const Self = @This(); // @@ -811,7 +772,7 @@ pub const WatchEventId = enum { // .table_lock = event.Lock.init(loop), // .dir_table = OsData.DirTable.init(loop.allocator), // .ref_count = std.atomic.Int(usize).init(1), -// .all_putters = std.atomic.Queue(promise).init(), +// .all_putters = std.atomic.Queue(anyframe).init(), // }, // }; // return self; @@ -926,14 +887,9 @@ pub const WatchEventId = enum { // } // // async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // var value_copy = value; // var put = OsData.Put{ -// .putter = @handle(), +// .putter = @frame(), // .value_ptr = &value_copy, // }; // out_put.* = &put; @@ -1091,18 +1047,13 @@ pub const WatchEventId = enum { // } // // async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // self.ref(); // defer self.deref(); // // defer os.close(dir_handle); // -// var putter_node = std.atomic.Queue(promise).Node{ -// .data = @handle(), +// var putter_node = std.atomic.Queue(anyframe).Node{ +// .data = @frame(), // .prev = null, // .next = null, // }; @@ -1112,7 +1063,7 @@ pub const WatchEventId = enum { // var resume_node = Loop.ResumeNode.Basic{ // .base = Loop.ResumeNode{ // .id = Loop.ResumeNode.Id.Basic, -// .handle = @handle(), +// .handle = @frame(), // .overlapped = windows.OVERLAPPED{ // .Internal = 0, // .InternalHigh = 0, @@ -1207,17 +1158,12 @@ pub const WatchEventId = enum { // } // // async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void { -// // TODO https://github.com/ziglang/zig/issues/1194 -// suspend { -// resume @handle(); -// } -// // const loop = channel.loop; // // var watch = Self{ // .channel = channel, // .os_data = OsData{ -// .putter = @handle(), +// .putter = @frame(), // .inotify_fd = inotify_fd, // .wd_table = OsData.WdTable.init(loop.allocator), // .table_lock = event.Lock.init(loop), diff --git a/std/event/future.zig b/std/event/future.zig index 2e62ace978..11a4c82fb0 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -2,8 +2,6 @@ const std = @import("../std.zig"); const assert = std.debug.assert; const testing = std.testing; const builtin = @import("builtin"); -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Lock = std.event.Lock; const Loop = std.event.Loop; @@ -23,7 +21,7 @@ pub fn Future(comptime T: type) type { available: u8, const Self = @This(); - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub fn init(loop: *Loop) Self { return Self{ @@ -37,10 +35,10 @@ pub fn Future(comptime T: type) type { /// available. /// Thread-safe. pub async fn get(self: *Self) *T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { + if (@atomicLoad(u8, &self.available, .SeqCst) == 2) { return &self.data; } - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); held.release(); return &self.data; @@ -49,7 +47,7 @@ pub fn Future(comptime T: type) type { /// Gets the data without waiting for it. If it's available, a pointer is /// returned. Otherwise, null is returned. pub fn getOrNull(self: *Self) ?*T { - if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) { + if (@atomicLoad(u8, &self.available, .SeqCst) == 2) { return &self.data; } else { return null; @@ -62,10 +60,10 @@ pub fn Future(comptime T: type) type { /// It's not required to call start() before resolve() but it can be useful since /// this method is thread-safe. pub async fn start(self: *Self) ?*T { - const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null; + const state = @cmpxchgStrong(u8, &self.available, 0, 1, .SeqCst, .SeqCst) orelse return null; switch (state) { 1 => { - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); held.release(); return &self.data; }, @@ -77,7 +75,7 @@ pub fn Future(comptime T: type) type { /// Make the data become available. May be called only once. /// Before calling this, modify the `data` property. pub fn resolve(self: *Self) void { - const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst); + const prev = @atomicRmw(u8, &self.available, .Xchg, 2, .SeqCst); assert(prev == 0 or prev == 1); // resolve() called twice Lock.Held.release(Lock.Held{ .lock = &self.lock }); } @@ -86,7 +84,7 @@ pub fn Future(comptime T: type) type { test "std.event.Future" { // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest; + if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -94,38 +92,33 @@ test "std.event.Future" { try loop.initMultiThreaded(allocator); defer loop.deinit(); - const handle = try async testFuture(&loop); - defer cancel handle; + const handle = async testFuture(&loop); loop.run(); } async fn testFuture(loop: *Loop) void { - suspend { - resume @handle(); - } var future = Future(i32).init(loop); - const a = async waitOnFuture(&future) catch @panic("memory"); - const b = async waitOnFuture(&future) catch @panic("memory"); - const c = async resolveFuture(&future) catch @panic("memory"); + const a = async waitOnFuture(&future); + const b = async waitOnFuture(&future); + const c = async resolveFuture(&future); + + // TODO make this work: + //const result = (await a) + (await b); + const a_result = await a; + const b_result = await b; + const result = a_result + b_result; - const result = (await a) + (await b); cancel c; testing.expect(result == 12); } async fn waitOnFuture(future: *Future(i32)) i32 { - suspend { - resume @handle(); - } - return (await (async future.get() catch @panic("memory"))).*; + return future.get().*; } async fn resolveFuture(future: *Future(i32)) void { - suspend { - resume @handle(); - } future.data = 6; future.resolve(); } diff --git a/std/event/group.zig b/std/event/group.zig index 36235eed74..ab6d592278 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -2,8 +2,6 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); const Lock = std.event.Lock; const Loop = std.event.Loop; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const testing = std.testing; /// ReturnType must be `void` or `E!void` @@ -16,10 +14,10 @@ pub fn Group(comptime ReturnType: type) type { const Self = @This(); const Error = switch (@typeInfo(ReturnType)) { - builtin.TypeId.ErrorUnion => |payload| payload.error_set, + .ErrorUnion => |payload| payload.error_set, else => void, }; - const Stack = std.atomic.Stack(promise->ReturnType); + const Stack = std.atomic.Stack(anyframe->ReturnType); pub fn init(loop: *Loop) Self { return Self{ @@ -29,7 +27,7 @@ pub fn Group(comptime ReturnType: type) type { }; } - /// Cancel all the outstanding promises. Can be called even if wait was already called. + /// Cancel all the outstanding frames. Can be called even if wait was already called. pub fn deinit(self: *Self) void { while (self.coro_stack.pop()) |node| { cancel node.data; @@ -40,8 +38,8 @@ pub fn Group(comptime ReturnType: type) type { } } - /// Add a promise to the group. Thread-safe. - pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) { + /// Add a frame to the group. Thread-safe. + pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) { const node = try self.lock.loop.allocator.create(Stack.Node); node.* = Stack.Node{ .next = undefined, @@ -51,7 +49,7 @@ pub fn Group(comptime ReturnType: type) type { } /// Add a node to the group. Thread-safe. Cannot fail. - /// `node.data` should be the promise handle to add to the group. + /// `node.data` should be the frame handle to add to the group. /// The node's memory should be in the coroutine frame of /// the handle that is in the node, or somewhere guaranteed to live /// at least as long. @@ -59,40 +57,11 @@ pub fn Group(comptime ReturnType: type) type { self.coro_stack.push(node); } - /// This is equivalent to an async call, but the async function is added to the group, instead - /// of returning a promise. func must be async and have return type ReturnType. - /// Thread-safe. - pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) { - const S = struct { - async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType { - // TODO this is a hack to make the memory following be inside the coro frame - suspend { - var my_node: Stack.Node = undefined; - node.* = &my_node; - resume @handle(); - } - - // TODO this allocation elision should be guaranteed because we await it in - // this coro frame - return await (async func(args2) catch unreachable); - } - }; - var node: *Stack.Node = undefined; - const handle = try async S.asyncFunc(&node, args); - node.* = Stack.Node{ - .next = undefined, - .data = handle, - }; - self.coro_stack.push(node); - } - /// Wait for all the calls and promises of the group to complete. /// Thread-safe. /// Safe to call any number of times. pub async fn wait(self: *Self) ReturnType { - // TODO catch unreachable because the allocation can be grouped with - // the coro frame allocation - const held = await (async self.lock.acquire() catch unreachable); + const held = self.lock.acquire(); defer held.release(); while (self.coro_stack.pop()) |node| { @@ -131,8 +100,7 @@ test "std.event.Group" { try loop.initMultiThreaded(allocator); defer loop.deinit(); - const handle = try async testGroup(&loop); - defer cancel handle; + const handle = async testGroup(&loop); loop.run(); } @@ -140,26 +108,30 @@ test "std.event.Group" { async fn testGroup(loop: *Loop) void { var count: usize = 0; var group = Group(void).init(loop); - group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory"); - group.call(increaseByTen, &count) catch @panic("memory"); - await (async group.wait() catch @panic("memory")); + var sleep_a_little_frame = async sleepALittle(&count); + group.add(&sleep_a_little_frame) catch @panic("memory"); + var increase_by_ten_frame = async increaseByTen(&count); + group.add(&increase_by_ten_frame) catch @panic("memory"); + group.wait(); testing.expect(count == 11); var another = Group(anyerror!void).init(loop); - another.add(async somethingElse() catch @panic("memory")) catch @panic("memory"); - another.call(doSomethingThatFails) catch @panic("memory"); - testing.expectError(error.ItBroke, await (async another.wait() catch @panic("memory"))); + var something_else_frame = async somethingElse(); + another.add(&something_else_frame) catch @panic("memory"); + var something_that_fails_frame = async doSomethingThatFails(); + another.add(&something_that_fails_frame) catch @panic("memory"); + testing.expectError(error.ItBroke, another.wait()); } async fn sleepALittle(count: *usize) void { std.time.sleep(1 * std.time.millisecond); - _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, count, .Add, 1, .SeqCst); } async fn increaseByTen(count: *usize) void { var i: usize = 0; while (i < 10) : (i += 1) { - _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, count, .Add, 1, .SeqCst); } } diff --git a/std/event/io.zig b/std/event/io.zig index 29419a792e..4b54822e68 100644 --- a/std/event/io.zig +++ b/std/event/io.zig @@ -1,6 +1,5 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const Allocator = std.mem.Allocator; const assert = std.debug.assert; const mem = std.mem; @@ -12,13 +11,13 @@ pub fn InStream(comptime ReadError: type) type { /// Return the number of bytes read. It may be less than buffer.len. /// If the number of bytes read is 0, it means end of stream. /// End of stream is not an error condition. - readFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!usize, + readFn: async fn (self: *Self, buffer: []u8) Error!usize, /// Return the number of bytes read. It may be less than buffer.len. /// If the number of bytes read is 0, it means end of stream. /// End of stream is not an error condition. pub async fn read(self: *Self, buffer: []u8) !usize { - return await (async self.readFn(self, buffer) catch unreachable); + return self.readFn(self, buffer); } /// Return the number of bytes read. If it is less than buffer.len @@ -26,7 +25,7 @@ pub fn InStream(comptime ReadError: type) type { pub async fn readFull(self: *Self, buffer: []u8) !usize { var index: usize = 0; while (index != buf.len) { - const amt_read = try await (async self.read(buf[index..]) catch unreachable); + const amt_read = try self.read(buf[index..]); if (amt_read == 0) return index; index += amt_read; } @@ -35,25 +34,25 @@ pub fn InStream(comptime ReadError: type) type { /// Same as `readFull` but end of stream returns `error.EndOfStream`. pub async fn readNoEof(self: *Self, buf: []u8) !void { - const amt_read = try await (async self.readFull(buf[index..]) catch unreachable); + const amt_read = try self.readFull(buf[index..]); if (amt_read < buf.len) return error.EndOfStream; } pub async fn readIntLittle(self: *Self, comptime T: type) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readIntLittle(T, &bytes); } pub async fn readIntBe(self: *Self, comptime T: type) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readIntBig(T, &bytes); } pub async fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T { var bytes: [@sizeOf(T)]u8 = undefined; - try await (async self.readNoEof(bytes[0..]) catch unreachable); + try self.readNoEof(bytes[0..]); return mem.readInt(T, &bytes, endian); } @@ -61,7 +60,7 @@ pub fn InStream(comptime ReadError: type) type { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto); var res: [1]T = undefined; - try await (async self.readNoEof(@sliceToBytes(res[0..])) catch unreachable); + try self.readNoEof(@sliceToBytes(res[0..])); return res[0]; } }; @@ -72,6 +71,6 @@ pub fn OutStream(comptime WriteError: type) type { const Self = @This(); pub const Error = WriteError; - writeFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!void, + writeFn: async fn (self: *Self, buffer: []u8) Error!void, }; } diff --git a/std/event/lock.zig b/std/event/lock.zig index d86902cc06..8f2dac008c 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Loop = std.event.Loop; /// Thread-safe async/await lock. @@ -17,7 +15,7 @@ pub const Lock = struct { queue: Queue, queue_empty_bit: u8, // TODO make this a bool - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub const Held = struct { lock: *Lock, @@ -30,19 +28,19 @@ pub const Lock = struct { } // We need to release the lock. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. // But if there's a queue item, we are the actor which must loop and attempt // to grab the lock again. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { return; } while (true) { - const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst); if (old_bit != 0) { // We did not obtain the lock. Great, the queue is someone else's problem. return; @@ -55,11 +53,11 @@ pub const Lock = struct { } // Release the lock again. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); // Find out if we can be done. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) { + if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { return; } } @@ -88,15 +86,11 @@ pub const Lock = struct { /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *Lock) void { assert(self.shared_bit == 0); - while (self.queue.get()) |node| cancel node.data; + while (self.queue.get()) |node| resume node.data; } pub async fn acquire(self: *Lock) Held { - // TODO explicitly put this memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - var my_tick_node = Loop.NextTickNode.init(@handle()); + var my_tick_node = Loop.NextTickNode.init(@frame()); errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire suspend { @@ -107,9 +101,9 @@ pub const Lock = struct { // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor // will attempt to grab the lock. - _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst); - const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); if (old_bit == 0) { if (self.queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. @@ -123,8 +117,7 @@ pub const Lock = struct { }; test "std.event.Lock" { - // TODO https://github.com/ziglang/zig/issues/2377 - if (true) return error.SkipZigTest; + // TODO https://github.com/ziglang/zig/issues/1908 if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; @@ -136,39 +129,34 @@ test "std.event.Lock" { var lock = Lock.init(&loop); defer lock.deinit(); - const handle = try async testLock(&loop, &lock); - defer cancel handle; + _ = async testLock(&loop, &lock); loop.run(); testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data); } async fn testLock(loop: *Loop, lock: *Lock) void { - // TODO explicitly put next tick node memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - const handle1 = async lockRunner(lock) catch @panic("out of memory"); + const handle1 = async lockRunner(lock); var tick_node1 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle1, + .data = &handle1, }; loop.onNextTick(&tick_node1); - const handle2 = async lockRunner(lock) catch @panic("out of memory"); + const handle2 = async lockRunner(lock); var tick_node2 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle2, + .data = &handle2, }; loop.onNextTick(&tick_node2); - const handle3 = async lockRunner(lock) catch @panic("out of memory"); + const handle3 = async lockRunner(lock); var tick_node3 = Loop.NextTickNode{ .prev = undefined, .next = undefined, - .data = handle3, + .data = &handle3, }; loop.onNextTick(&tick_node3); @@ -185,7 +173,7 @@ async fn lockRunner(lock: *Lock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquire() catch @panic("out of memory"); + const lock_promise = async lock.acquire(); const handle = await lock_promise; defer handle.release(); diff --git a/std/event/loop.zig b/std/event/loop.zig index a4a60b5098..f1febd3fdb 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -457,7 +457,7 @@ pub const Loop = struct { var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = ResumeNode.overlapped_init, }, }; @@ -469,7 +469,7 @@ pub const Loop = struct { var resume_node = ResumeNode.Basic{ .base = ResumeNode{ .id = ResumeNode.Id.Basic, - .handle = @handle(), + .handle = @frame(), .overlapped = ResumeNode.overlapped_init, }, .kev = undefined, diff --git a/std/event/net.zig b/std/event/net.zig index 46b724e32e..3752c88e99 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -9,17 +9,17 @@ const File = std.fs.File; const fd_t = os.fd_t; pub const Server = struct { - handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void, + handleRequestFn: async fn (*Server, *const std.net.Address, File) void, loop: *Loop, sockfd: ?i32, - accept_coro: ?promise, + accept_coro: ?anyframe, listen_address: std.net.Address, waiting_for_emfile_node: PromiseNode, listen_resume_node: event.Loop.ResumeNode, - const PromiseNode = std.TailQueue(promise).Node; + const PromiseNode = std.TailQueue(anyframe).Node; pub fn init(loop: *Loop) Server { // TODO can't initialize handler coroutine here because we need well defined copy elision @@ -41,7 +41,7 @@ pub const Server = struct { pub fn listen( self: *Server, address: *const std.net.Address, - handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void, + handleRequestFn: async fn (*Server, *const std.net.Address, File) void, ) !void { self.handleRequestFn = handleRequestFn; @@ -53,7 +53,7 @@ pub const Server = struct { try os.listen(sockfd, os.SOMAXCONN); self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd)); - self.accept_coro = try async Server.handler(self); + self.accept_coro = async Server.handler(self); errdefer cancel self.accept_coro.?; self.listen_resume_node.handle = self.accept_coro.?; @@ -86,12 +86,7 @@ pub const Server = struct { continue; } var socket = File.openHandle(accepted_fd); - _ = async self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) { - error.OutOfMemory => { - socket.close(); - continue; - }, - }; + self.handleRequestFn(self, &accepted_addr, socket); } else |err| switch (err) { error.ProcessFdQuotaExceeded => @panic("TODO handle this error"), error.ConnectionAborted => continue, @@ -124,7 +119,7 @@ pub async fn connectUnixSocket(loop: *Loop, path: []const u8) !i32 { mem.copy(u8, sock_addr.path[0..], path); const size = @intCast(u32, @sizeOf(os.sa_family_t) + path.len); try os.connect_async(sockfd, &sock_addr, size); - try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); + try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); try os.getsockoptError(sockfd); return sockfd; @@ -149,7 +144,7 @@ pub async fn read(loop: *std.event.Loop, fd: fd_t, buffer: []u8) ReadError!usize .iov_len = buffer.len, }; const iovs: *const [1]os.iovec = &iov; - return await (async readvPosix(loop, fd, iovs, 1) catch unreachable); + return readvPosix(loop, fd, iovs, 1); } pub const WriteError = error{}; @@ -160,7 +155,7 @@ pub async fn write(loop: *std.event.Loop, fd: fd_t, buffer: []const u8) WriteErr .iov_len = buffer.len, }; const iovs: *const [1]os.iovec_const = &iov; - return await (async writevPosix(loop, fd, iovs, 1) catch unreachable); + return writevPosix(loop, fd, iovs, 1); } pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, count: usize) !void { @@ -174,7 +169,7 @@ pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, cou os.EINVAL => unreachable, os.EFAULT => unreachable, os.EAGAIN => { - try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT) catch unreachable); + try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT); continue; }, os.EBADF => unreachable, // always a race condition @@ -205,7 +200,7 @@ pub async fn readvPosix(loop: *std.event.Loop, fd: i32, iov: [*]os.iovec, count: os.EINVAL => unreachable, os.EFAULT => unreachable, os.EAGAIN => { - try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN) catch unreachable); + try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN); continue; }, os.EBADF => unreachable, // always a race condition @@ -232,7 +227,7 @@ pub async fn writev(loop: *Loop, fd: fd_t, data: []const []const u8) !void { }; } - return await (async writevPosix(loop, fd, iovecs.ptr, data.len) catch unreachable); + return writevPosix(loop, fd, iovecs.ptr, data.len); } pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize { @@ -246,7 +241,7 @@ pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize { }; } - return await (async readvPosix(loop, fd, iovecs.ptr, data.len) catch unreachable); + return readvPosix(loop, fd, iovecs.ptr, data.len); } pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File { @@ -256,7 +251,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File { errdefer os.close(sockfd); try os.connect_async(sockfd, &address.os_addr, @sizeOf(os.sockaddr_in)); - try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); + try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); try os.getsockoptError(sockfd); return File.openHandle(sockfd); @@ -275,17 +270,16 @@ test "listen on a port, send bytes, receive bytes" { tcp_server: Server, const Self = @This(); - async<*mem.Allocator> fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void { + async fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void { const self = @fieldParentPtr(Self, "tcp_server", tcp_server); var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592 defer socket.close(); // TODO guarantee elision of this allocation - const next_handler = async errorableHandler(self, _addr, socket) catch unreachable; - (await next_handler) catch |err| { + const next_handler = errorableHandler(self, _addr, socket) catch |err| { std.debug.panic("unable to handle connection: {}\n", err); }; suspend { - cancel @handle(); + cancel @frame(); } } async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void { @@ -306,15 +300,14 @@ test "listen on a port, send bytes, receive bytes" { defer server.tcp_server.deinit(); try server.tcp_server.listen(&addr, MyServer.handler); - const p = try async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server); - defer cancel p; + _ = async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server); loop.run(); } async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Server) void { errdefer @panic("test failure"); - var socket_file = try await try async connect(loop, address); + var socket_file = try connect(loop, address); defer socket_file.close(); var buf: [512]u8 = undefined; @@ -340,9 +333,9 @@ pub const OutStream = struct { }; } - async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { + async fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void { const self = @fieldParentPtr(OutStream, "stream", out_stream); - return await (async write(self.loop, self.fd, bytes) catch unreachable); + return write(self.loop, self.fd, bytes); } }; @@ -362,8 +355,8 @@ pub const InStream = struct { }; } - async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { + async fn readFn(in_stream: *Stream, bytes: []u8) Error!usize { const self = @fieldParentPtr(InStream, "stream", in_stream); - return await (async read(self.loop, self.fd, bytes) catch unreachable); + return read(self.loop, self.fd, bytes); } }; diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig index 7b97fa24c1..a5768e5b65 100644 --- a/std/event/rwlock.zig +++ b/std/event/rwlock.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; const Loop = std.event.Loop; /// Thread-safe async/await lock. @@ -28,19 +26,19 @@ pub const RwLock = struct { const ReadLock = 2; }; - const Queue = std.atomic.Queue(promise); + const Queue = std.atomic.Queue(anyframe); pub const HeldRead = struct { lock: *RwLock, pub fn release(self: HeldRead) void { // If other readers still hold the lock, we're done. - if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) { + if (@atomicRmw(usize, &self.lock.reader_lock_count, .Sub, 1, .SeqCst) != 1) { return; } - _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; } @@ -61,17 +59,17 @@ pub const RwLock = struct { } // We need to release the write lock. Check if any readers are waiting to grab the lock. - if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) { + if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { // Switch to a read lock. - _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { self.lock.loop.onNextTick(node); } return; } - _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst); self.lock.commonPostUnlock(); } @@ -93,17 +91,16 @@ pub const RwLock = struct { /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *RwLock) void { assert(self.shared_state == State.Unlocked); - while (self.writer_queue.get()) |node| cancel node.data; - while (self.reader_queue.get()) |node| cancel node.data; + while (self.writer_queue.get()) |node| resume node.data; + while (self.reader_queue.get()) |node| resume node.data; } pub async fn acquireRead(self: *RwLock) HeldRead { - _ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + _ = @atomicRmw(usize, &self.reader_lock_count, .Add, 1, .SeqCst); suspend { - // TODO explicitly put this memory in the coroutine frame #1194 var my_tick_node = Loop.NextTickNode{ - .data = @handle(), + .data = @frame(), .prev = undefined, .next = undefined, }; @@ -115,10 +112,10 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. - const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true; + const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == State.ReadLock else true; if (have_read_lock) { // Give out all the read locks. if (self.reader_queue.get()) |first_node| { @@ -134,9 +131,8 @@ pub const RwLock = struct { pub async fn acquireWrite(self: *RwLock) HeldWrite { suspend { - // TODO explicitly put this memory in the coroutine frame #1194 var my_tick_node = Loop.NextTickNode{ - .data = @handle(), + .data = @frame(), .prev = undefined, .next = undefined, }; @@ -148,10 +144,10 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) == null) { // We now have a write lock. if (self.writer_queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. @@ -169,8 +165,8 @@ pub const RwLock = struct { // obtain the lock. // But if there's a writer_queue item or a reader_queue item, // we are the actor which must loop and attempt to grab the lock again. - if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) { - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; } @@ -180,13 +176,13 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - _ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst); + _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst); + _ = @atomicRmw(u8, &self.shared_state, .Xchg, State.Unlocked, .SeqCst); continue; } - if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) { - if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) { + if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; } @@ -199,8 +195,8 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); - if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) { + _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; } @@ -215,6 +211,9 @@ test "std.event.RwLock" { // https://github.com/ziglang/zig/issues/2377 if (true) return error.SkipZigTest; + // https://github.com/ziglang/zig/issues/1908 + if (builtin.single_threaded) return error.SkipZigTest; + const allocator = std.heap.direct_allocator; var loop: Loop = undefined; @@ -224,8 +223,7 @@ test "std.event.RwLock" { var lock = RwLock.init(&loop); defer lock.deinit(); - const handle = try async testLock(&loop, &lock); - defer cancel handle; + const handle = testLock(&loop, &lock); loop.run(); const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; @@ -233,28 +231,31 @@ test "std.event.RwLock" { } async fn testLock(loop: *Loop, lock: *RwLock) void { - // TODO explicitly put next tick node memory in the coroutine frame #1194 - suspend { - resume @handle(); - } - var read_nodes: [100]Loop.NextTickNode = undefined; for (read_nodes) |*read_node| { - read_node.data = async readRunner(lock) catch @panic("out of memory"); + const frame = loop.allocator.create(@Frame(readRunner)) catch @panic("memory"); + read_node.data = frame; + frame.* = async readRunner(lock); loop.onNextTick(read_node); } var write_nodes: [shared_it_count]Loop.NextTickNode = undefined; for (write_nodes) |*write_node| { - write_node.data = async writeRunner(lock) catch @panic("out of memory"); + const frame = loop.allocator.create(@Frame(writeRunner)) catch @panic("memory"); + write_node.data = frame; + frame.* = async writeRunner(lock); loop.onNextTick(write_node); } for (write_nodes) |*write_node| { - await @ptrCast(promise->void, write_node.data); + const casted = @ptrCast(*const @Frame(writeRunner), write_node.data); + await casted; + loop.allocator.destroy(casted); } for (read_nodes) |*read_node| { - await @ptrCast(promise->void, read_node.data); + const casted = @ptrCast(*const @Frame(readRunner), read_node.data); + await casted; + loop.allocator.destroy(casted); } } @@ -269,7 +270,7 @@ async fn writeRunner(lock: *RwLock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { std.time.sleep(100 * std.time.microsecond); - const lock_promise = async lock.acquireWrite() catch @panic("out of memory"); + const lock_promise = async lock.acquireWrite(); const handle = await lock_promise; defer handle.release(); @@ -287,7 +288,7 @@ async fn readRunner(lock: *RwLock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquireRead() catch @panic("out of memory"); + const lock_promise = async lock.acquireRead(); const handle = await lock_promise; defer handle.release(); diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 7407528bf5..aec1ef96b5 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -1183,7 +1183,7 @@ test "zig fmt: resume from suspend block" { try testCanonical( \\fn foo() void { \\ suspend { - \\ resume @handle(); + \\ resume @frame(); \\ } \\} \\ diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 810e40b18b..835f968e23 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1403,24 +1403,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "@handle() called outside of function definition", - \\var handle_undef: promise = undefined; - \\var handle_dummy: promise = @handle(); + "@frame() called outside of function definition", + \\var handle_undef: anyframe = undefined; + \\var handle_dummy: anyframe = @frame(); \\export fn entry() bool { \\ return handle_undef == handle_dummy; \\} , - "tmp.zig:2:29: error: @handle() called outside of function definition", - ); - - cases.add( - "@handle() in non-async function", - \\export fn entry() bool { - \\ var handle_undef: promise = undefined; - \\ return handle_undef == @handle(); - \\} - , - "tmp.zig:3:28: error: @handle() in non-async function", + "tmp.zig:2:30: error: @frame() called outside of function definition", ); cases.add( @@ -1796,15 +1786,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "suspend inside suspend block", - \\const std = @import("std",); - \\ \\export fn entry() void { - \\ var buf: [500]u8 = undefined; - \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; - \\ const p = (async foo()) catch unreachable; - \\ cancel p; + \\ _ = async foo(); \\} - \\ \\async fn foo() void { \\ suspend { \\ suspend { @@ -1812,8 +1796,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ } \\} , - "tmp.zig:12:9: error: cannot suspend inside suspend block", - "tmp.zig:11:5: note: other suspend block here", + "tmp.zig:6:9: error: cannot suspend inside suspend block", + "tmp.zig:5:5: note: other suspend block here", ); cases.add( @@ -1854,15 +1838,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "returning error from void async function", - \\const std = @import("std",); \\export fn entry() void { - \\ const p = async amain() catch unreachable; + \\ _ = async amain(); \\} \\async fn amain() void { \\ return error.ShouldBeCompileError; \\} , - "tmp.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'", + "tmp.zig:5:17: error: expected type 'void', found 'error{ShouldBeCompileError}'", ); cases.add( -- cgit v1.2.3 From f3f838cc016fd8190a9bba46fa495fbc27325492 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Aug 2019 11:22:12 -0400 Subject: add compile error for await in exported function --- src/analyze.cpp | 45 +++++++++++++++++++++++++++++---------------- test/compile_errors.zig | 14 ++++++++++++++ 2 files changed, 43 insertions(+), 16 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/analyze.cpp b/src/analyze.cpp index 146f661244..64d6059da4 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3893,18 +3893,18 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) { fn->inferred_async_node = inferred_async_none; } -static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) { - ZigType *fn_type = fn_table_entry->type_entry; +static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) { + ZigType *fn_type = fn->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - ZigType *block_return_type = ir_analyze(g, &fn_table_entry->ir_executable, - &fn_table_entry->analyzed_executable, fn_type_id->return_type, return_type_node); - fn_table_entry->src_implicit_return_type = block_return_type; + ZigType *block_return_type = ir_analyze(g, &fn->ir_executable, + &fn->analyzed_executable, fn_type_id->return_type, return_type_node); + fn->src_implicit_return_type = block_return_type; - if (type_is_invalid(block_return_type) || fn_table_entry->analyzed_executable.invalid) { + if (type_is_invalid(block_return_type) || fn->analyzed_executable.invalid) { assert(g->errors.length > 0); - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } @@ -3912,20 +3912,20 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ ZigType *return_err_set_type = fn_type_id->return_type->data.error_union.err_set_type; if (return_err_set_type->data.error_set.infer_fn != nullptr) { ZigType *inferred_err_set_type; - if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorSet) { - inferred_err_set_type = fn_table_entry->src_implicit_return_type; - } else if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorUnion) { - inferred_err_set_type = fn_table_entry->src_implicit_return_type->data.error_union.err_set_type; + if (fn->src_implicit_return_type->id == ZigTypeIdErrorSet) { + inferred_err_set_type = fn->src_implicit_return_type; + } else if (fn->src_implicit_return_type->id == ZigTypeIdErrorUnion) { + inferred_err_set_type = fn->src_implicit_return_type->data.error_union.err_set_type; } else { add_node_error(g, return_type_node, buf_sprintf("function with inferred error set must return at least one possible error")); - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } if (inferred_err_set_type->data.error_set.infer_fn != nullptr) { if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) { - fn_table_entry->anal_state = FnAnalStateInvalid; + fn->anal_state = FnAnalStateInvalid; return; } } @@ -3945,12 +3945,25 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ } } + CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc; + if (cc != CallingConventionUnspecified && cc != CallingConventionAsync && + fn->inferred_async_node != nullptr && + fn->inferred_async_node != inferred_async_checking && + fn->inferred_async_node != inferred_async_none) + { + ErrorMsg *msg = add_node_error(g, fn->proto_node, + buf_sprintf("function with calling convention '%s' cannot be async", + calling_convention_name(cc))); + add_async_error_notes(g, msg, fn); + fn->anal_state = FnAnalStateInvalid; + } + if (g->verbose_ir) { - fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name)); - ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4); + fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name)); + ir_print(g, stderr, &fn->analyzed_executable, 4); fprintf(stderr, "}\n"); } - fn_table_entry->anal_state = FnAnalStateComplete; + fn->anal_state = FnAnalStateComplete; } static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index d3d2685f1b..c07786d462 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,20 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "invalid suspend in exported function", + \\export fn entry() void { + \\ var frame = async func(); + \\ var result = await frame; + \\} + \\fn func() void { + \\ suspend; + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:3:18: note: await is a suspend point", + ); + cases.add( "async function indirectly depends on its own frame", \\export fn entry() void { -- cgit v1.2.3 From 13b5a4bf8ca65c569e6b28ca0e41d101d12d0ff1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 14:05:12 -0400 Subject: remove `cancel` --- doc/docgen.zig | 1 - doc/langref.html.in | 14 +- src-self-hosted/main.zig | 3 - src/all_types.hpp | 33 +-- src/analyze.cpp | 53 +++-- src/analyze.hpp | 2 +- src/ast_render.cpp | 8 - src/codegen.cpp | 412 +++++++++++++++----------------------- src/ir.cpp | 230 +++------------------ src/ir_print.cpp | 24 --- src/parser.cpp | 12 -- src/tokenizer.cpp | 2 - src/tokenizer.hpp | 1 - std/event/fs.zig | 2 +- std/event/future.zig | 2 +- std/event/group.zig | 20 +- std/event/net.zig | 8 +- std/zig/parse.zig | 15 -- std/zig/parser_test.zig | 4 +- std/zig/tokenizer.zig | 2 - test/compile_errors.zig | 8 +- test/stage1/behavior.zig | 1 - test/stage1/behavior/async_fn.zig | 90 ++------- test/stage1/behavior/cancel.zig | 115 ----------- 24 files changed, 256 insertions(+), 806 deletions(-) delete mode 100644 test/stage1/behavior/cancel.zig (limited to 'test/compile_errors.zig') diff --git a/doc/docgen.zig b/doc/docgen.zig index 92764d7642..458b97d2c0 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -750,7 +750,6 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok .Keyword_async, .Keyword_await, .Keyword_break, - .Keyword_cancel, .Keyword_catch, .Keyword_comptime, .Keyword_const, diff --git a/doc/langref.html.in b/doc/langref.html.in index 23e4dd194e..0f964373c5 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5971,7 +5971,7 @@ test "global assembly" { {#header_open|Async Functions#}

An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation, - followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled. + followed by an {#syntax#}await{#endsyntax#} completion.

When you call a function, it creates a stack frame, @@ -6013,11 +6013,11 @@ test "global assembly" {

The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#} is the return type of the async function. Once a promise has been created, it must be - consumed, either with {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}: + consumed with {#syntax#}await{#endsyntax#}:

Async functions start executing when created, so in the following example, the entire - async function completes before it is canceled: + TODO

{#code_begin|test#} const std = @import("std"); @@ -6048,7 +6048,7 @@ fn simpleAsyncFn() void {

When an async function suspends itself, it must be sure that it will be - resumed or canceled somehow, for example by registering its promise handle + resumed somehow, for example by registering its promise handle in an event loop. Use a suspend capture block to gain access to the promise (TODO this is outdated):

@@ -6134,7 +6134,7 @@ async fn testResumeFromSuspend(my_result: *i32) void { resumes the awaiter.

- A promise handle must be consumed exactly once after it is created, either by {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}. + A frame handle must be consumed exactly once after it is created with {#syntax#}await{#endsyntax#}.

{#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#}, @@ -9764,7 +9764,6 @@ PrimaryExpr <- AsmExpr / IfExpr / KEYWORD_break BreakLabel? Expr? - / KEYWORD_cancel Expr / KEYWORD_comptime Expr / KEYWORD_continue BreakLabel? / KEYWORD_resume Expr @@ -10120,7 +10119,6 @@ KEYWORD_asm <- 'asm' end_of_word KEYWORD_async <- 'async' end_of_word KEYWORD_await <- 'await' end_of_word KEYWORD_break <- 'break' end_of_word -KEYWORD_cancel <- 'cancel' end_of_word KEYWORD_catch <- 'catch' end_of_word KEYWORD_comptime <- 'comptime' end_of_word KEYWORD_const <- 'const' end_of_word @@ -10165,7 +10163,7 @@ KEYWORD_volatile <- 'volatile' end_of_word KEYWORD_while <- 'while' end_of_word keyword <- KEYWORD_align / KEYWORD_and / KEYWORD_allowzero / KEYWORD_asm - / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_cancel + / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_catch / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue / KEYWORD_defer / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer / KEYWORD_error / KEYWORD_export / KEYWORD_extern / KEYWORD_false diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index bc5d078950..5136b32735 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -467,7 +467,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co comp.start(); // TODO const process_build_events_handle = try async processBuildEvents(comp, color); - defer cancel process_build_events_handle; loop.run(); } @@ -579,7 +578,6 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void { defer zig_compiler.deinit(); // TODO const handle = try async findLibCAsync(&zig_compiler); - defer cancel handle; loop.run(); } @@ -669,7 +667,6 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void { // TODO &flags, // TODO color, // TODO ); - defer cancel main_handle; loop.run(); return result; } diff --git a/src/all_types.hpp b/src/all_types.hpp index 22e38b9f0c..f1c699ba10 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -476,7 +476,6 @@ enum NodeType { NodeTypeIfErrorExpr, NodeTypeIfOptional, NodeTypeErrorSetDecl, - NodeTypeCancel, NodeTypeResume, NodeTypeAwaitExpr, NodeTypeSuspend, @@ -911,10 +910,6 @@ struct AstNodeBreakExpr { AstNode *expr; // may be null }; -struct AstNodeCancelExpr { - AstNode *expr; -}; - struct AstNodeResumeExpr { AstNode *expr; }; @@ -1003,7 +998,6 @@ struct AstNode { AstNodeInferredArrayType inferred_array_type; AstNodeErrorType error_type; AstNodeErrorSetDecl err_set_decl; - AstNodeCancelExpr cancel_expr; AstNodeResumeExpr resume_expr; AstNodeAwaitExpr await_expr; AstNodeSuspend suspend; @@ -1561,7 +1555,6 @@ enum PanicMsgId { PanicMsgIdBadAwait, PanicMsgIdBadReturn, PanicMsgIdResumedAnAwaitingFn, - PanicMsgIdResumedACancelingFn, PanicMsgIdFrameTooSmall, PanicMsgIdResumedFnPendingAwait, @@ -1729,8 +1722,6 @@ struct CodeGen { LLVMValueRef cur_async_switch_instr; LLVMValueRef cur_async_resume_index_ptr; LLVMValueRef cur_async_awaiter_ptr; - LLVMValueRef cur_async_prev_val; - LLVMValueRef cur_async_prev_val_field_ptr; LLVMBasicBlockRef cur_preamble_llvm_block; size_t cur_resume_block_count; LLVMValueRef cur_err_ret_trace_val_arg; @@ -1822,7 +1813,6 @@ struct CodeGen { ZigType *align_amt_type; ZigType *stack_trace_type; - ZigType *ptr_to_stack_trace_type; ZigType *err_tag_type; ZigType *test_fn_type; @@ -1892,7 +1882,6 @@ struct CodeGen { bool system_linker_hack; bool reported_bad_link_libc_error; bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl. - bool cur_is_after_return; //////////////////////////// Participates in Input Parameter Cache Hash /////// Note: there is a separate cache hash for builtin.zig, when adding fields, @@ -2235,7 +2224,6 @@ enum IrInstructionId { IrInstructionIdCallGen, IrInstructionIdConst, IrInstructionIdReturn, - IrInstructionIdReturnBegin, IrInstructionIdCast, IrInstructionIdResizeSlice, IrInstructionIdContainerInitList, @@ -2345,7 +2333,6 @@ enum IrInstructionId { IrInstructionIdExport, IrInstructionIdErrorReturnTrace, IrInstructionIdErrorUnion, - IrInstructionIdCancel, IrInstructionIdAtomicRmw, IrInstructionIdAtomicLoad, IrInstructionIdSaveErrRetAddr, @@ -2370,7 +2357,6 @@ enum IrInstructionId { IrInstructionIdAwaitSrc, IrInstructionIdAwaitGen, IrInstructionIdResume, - IrInstructionIdTestCancelRequested, IrInstructionIdSpillBegin, IrInstructionIdSpillEnd, }; @@ -2649,12 +2635,6 @@ struct IrInstructionReturn { IrInstruction *operand; }; -struct IrInstructionReturnBegin { - IrInstruction base; - - IrInstruction *operand; -}; - enum CastOp { CastOpNoCast, // signifies the function call expression is not a cast CastOpNoop, // fn call expr is a cast, but does nothing @@ -3440,12 +3420,6 @@ struct IrInstructionErrorUnion { IrInstruction *payload; }; -struct IrInstructionCancel { - IrInstruction base; - - IrInstruction *frame; -}; - struct IrInstructionAtomicRmw { IrInstruction base; @@ -3647,10 +3621,6 @@ struct IrInstructionResume { IrInstruction *frame; }; -struct IrInstructionTestCancelRequested { - IrInstruction base; -}; - enum SpillId { SpillIdInvalid, SpillIdRetErrCode, @@ -3756,8 +3726,7 @@ static const size_t err_union_err_index = 1; static const size_t frame_fn_ptr_index = 0; static const size_t frame_resume_index = 1; static const size_t frame_awaiter_index = 2; -static const size_t frame_prev_val_index = 3; -static const size_t frame_ret_start = 4; +static const size_t frame_ret_start = 3; // TODO https://github.com/ziglang/zig/issues/3056 // We require this to be a power of 2 so that we can use shifting rather than diff --git a/src/analyze.cpp b/src/analyze.cpp index 1b6de6e7df..fc42abaf26 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) { zig_unreachable(); } -ZigType *get_ptr_to_stack_trace_type(CodeGen *g) { +ZigType *get_stack_trace_type(CodeGen *g) { if (g->stack_trace_type == nullptr) { ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace"); assert(stack_trace_type_val->type->id == ZigTypeIdMetaType); g->stack_trace_type = stack_trace_type_val->data.x_type; assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown)); - - g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false); } - return g->ptr_to_stack_trace_type; + return g->stack_trace_type; } bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) { @@ -3035,7 +3033,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeIfErrorExpr: case NodeTypeIfOptional: case NodeTypeErrorSetDecl: - case NodeTypeCancel: case NodeTypeResume: case NodeTypeAwaitExpr: case NodeTypeSuspend: @@ -3822,11 +3819,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await is a suspend point")); - } else if (fn->inferred_async_node->type == NodeTypeCancel) { - add_error_note(g, msg, fn->inferred_async_node, - buf_sprintf("cancel is a suspend point")); } else { - zig_unreachable(); + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("suspends here")); } } @@ -5231,12 +5226,21 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0}); fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); - fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); fields.append({"@result_ptr_callee", ptr_return_type, 0}); fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); fields.append({"@result", fn_type_id->return_type, 0}); + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false); + fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0}); + fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0}); + + fields.append({"@stack_trace", get_stack_trace_type(g), 0}); + fields.append({"@instruction_addresses", + get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0}); + } + frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name), fields.items, fields.length, target_fn_align(g->zig_target)); frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size; @@ -5311,14 +5315,15 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { fields.append({"@fn_ptr", fn_type, 0}); fields.append({"@resume_index", g->builtin_types.entry_usize, 0}); fields.append({"@awaiter", g->builtin_types.entry_usize, 0}); - fields.append({"@prev_val", g->builtin_types.entry_usize, 0}); fields.append({"@result_ptr_callee", ptr_return_type, 0}); fields.append({"@result_ptr_awaiter", ptr_return_type, 0}); fields.append({"@result", fn_type_id->return_type, 0}); if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - fields.append({"@ptr_stack_trace", get_ptr_to_stack_trace_type(g), 0}); + ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false); + fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0}); + fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0}); } for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) { @@ -5337,9 +5342,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { } if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) { - (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type - - fields.append({"@stack_trace", g->stack_trace_type, 0}); + fields.append({"@stack_trace", get_stack_trace_type(g), 0}); fields.append({"@instruction_addresses", get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0}); } @@ -7553,7 +7556,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) { fn_type->data.fn.gen_return_type = gen_return_type; if (prefix_arg_error_return_trace && !is_async) { - ZigType *gen_type = get_ptr_to_stack_trace_type(g); + ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false); gen_param_types.append(get_llvm_type(g, gen_type)); param_di_types.append(get_llvm_di_type(g, gen_type)); } @@ -7727,7 +7730,6 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re field_types.append(ptr_fn_llvm_type); // fn_ptr field_types.append(usize_type_ref); // resume_index field_types.append(usize_type_ref); // awaiter - field_types.append(usize_type_ref); // prev_val bool have_result_type = result_type != nullptr && type_has_bits(result_type); if (have_result_type) { @@ -7735,7 +7737,9 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter field_types.append(get_llvm_type(g, result_type)); // result if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace + ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false); + field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee + field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter } } LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false); @@ -7792,14 +7796,23 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type))); if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false); + di_element_types.append( + ZigLLVMCreateDebugMemberType(g->dbuilder, + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee", + di_file, line, + 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), + 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace))); di_element_types.append( ZigLLVMCreateDebugMemberType(g->dbuilder, - ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace", + ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter", di_file, line, 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)), 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length), - ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g)))); + ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace))); } }; diff --git a/src/analyze.hpp b/src/analyze.hpp index e6336d3cdc..5752c74751 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -195,7 +195,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name); -ZigType *get_ptr_to_stack_trace_type(CodeGen *g); +ZigType *get_stack_trace_type(CodeGen *g); bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node); ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index dd4d9cf646..334dc37b59 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -249,8 +249,6 @@ static const char *node_type_str(NodeType node_type) { return "IfOptional"; case NodeTypeErrorSetDecl: return "ErrorSetDecl"; - case NodeTypeCancel: - return "Cancel"; case NodeTypeResume: return "Resume"; case NodeTypeAwaitExpr: @@ -1136,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "}"); break; } - case NodeTypeCancel: - { - fprintf(ar->f, "cancel "); - render_node_grouped(ar, node->data.cancel_expr.expr); - break; - } case NodeTypeResume: { fprintf(ar->f, "resume "); diff --git a/src/codegen.cpp b/src/codegen.cpp index e9f323dd0d..9bf7b0287b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -28,8 +28,6 @@ enum ResumeId { ResumeIdManual, ResumeIdReturn, ResumeIdCall, - - ResumeIdAwaitEarlyReturn // must be last }; static void init_darwin_native(CodeGen *g) { @@ -317,8 +315,9 @@ static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) { // label (grep this): [fn_frame_struct_layout] static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) { bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type); - // [0] *StackTrace - uint32_t trace_field_count = have_stack_trace ? 1 : 0; + // [0] *StackTrace (callee's) + // [1] *StackTrace (awaiter's) + uint32_t trace_field_count = have_stack_trace ? 2 : 0; return frame_index_trace_arg(g, return_type) + trace_field_count; } @@ -916,8 +915,6 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("async function returned twice"); case PanicMsgIdResumedAnAwaitingFn: return buf_create_from_str("awaiting function resumed"); - case PanicMsgIdResumedACancelingFn: - return buf_create_from_str("canceling function resumed"); case PanicMsgIdFrameTooSmall: return buf_create_from_str("frame too small"); case PanicMsgIdResumedFnPendingAwait: @@ -946,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) { return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0)); } +static ZigType *ptr_to_stack_trace_type(CodeGen *g) { + return get_pointer_to_type(g, get_stack_trace_type(g), false); +} + static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) { assert(g->panic_fn != nullptr); LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn); LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc); if (stack_trace_arg == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } LLVMValueRef args[] = { msg_arg, @@ -1046,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) { return g->add_error_return_trace_addr_fn_val; LLVMTypeRef arg_types[] = { - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), g->builtin_types.entry_usize->llvm_type, }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); @@ -1127,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { LLVMTypeRef arg_types[] = { // error return trace pointer - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false); @@ -1205,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMTypeRef fn_type_ref; if (g->have_err_ret_tracing) { LLVMTypeRef arg_types[] = { - get_llvm_type(g, g->ptr_to_stack_trace_type), + get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)), get_llvm_type(g, g->err_tag_type), }; fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); @@ -1321,14 +1321,7 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) { if (g->cur_err_ret_trace_val_stack != nullptr) { return g->cur_err_ret_trace_val_stack; } - if (g->cur_err_ret_trace_val_arg != nullptr) { - if (fn_is_async(g->cur_fn)) { - return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); - } else { - return g->cur_err_ret_trace_val_arg; - } - } - return nullptr; + return g->cur_err_ret_trace_val_arg; } static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) { @@ -1337,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc if (g->have_err_ret_tracing) { LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope); if (err_ret_trace_val == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } LLVMValueRef args[] = { err_ret_trace_val, @@ -2044,8 +2036,8 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { assert(g->stack_trace_type != nullptr); LLVMTypeRef param_types[] = { - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), - get_llvm_type(g, get_ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), + get_llvm_type(g, ptr_to_stack_trace_type(g)), }; LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); @@ -2058,7 +2050,6 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { addLLVMArgAttr(fn_val, (unsigned)0, "noalias"); addLLVMArgAttr(fn_val, (unsigned)0, "writeonly"); - addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); addLLVMArgAttr(fn_val, (unsigned)1, "noalias"); addLLVMArgAttr(fn_val, (unsigned)1, "readonly"); if (g->build_mode == BuildModeDebug) { @@ -2075,7 +2066,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, entry_block); ZigLLVMClearCurrentDebugLocation(g->builder); - // if (dest_stack_trace == null) return; + // if (dest_stack_trace == null or src_stack_trace == null) return; // var frame_index: usize = undefined; // var frames_left: usize = undefined; // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) { @@ -2093,7 +2084,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len; // } LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return"); - LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull"); + LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull"); LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index"); LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left"); @@ -2103,9 +2094,12 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr, LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), ""); - LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block); + LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr, + LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), ""); + LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, ""); + LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block); - LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block); + LLVMPositionBuilderAtEnd(g->builder, non_null_block); size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr, @@ -2183,13 +2177,11 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); - if (fn_is_async(g->cur_fn) && g->cur_fn->calls_or_awaits_errorable_fn && - codegen_fn_has_err_ret_tracing_arg(g, g->cur_fn->type_entry->data.fn.fn_type_id.return_type)) - { - LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, ""); - LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val }; - ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, - get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) { + LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, ret_type), ""); + LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr); } return nullptr; @@ -2201,16 +2193,9 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume"); if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume"); - LLVMValueRef ok_bit; - if (resume_id == ResumeIdAwaitEarlyReturn) { - LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false)); - ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, ""); - } else { - LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false)); - ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); - } + LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false)); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, ""); LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block); LLVMPositionBuilderAtEnd(g->builder, bad_resume_block); @@ -2219,36 +2204,19 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume LLVMPositionBuilderAtEnd(g->builder, end_bb); } -static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, - ResumeId resume_id, LLVMValueRef arg_val) -{ +static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (fn_val == nullptr) { LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, ""); fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, ""); } - if (arg_val == nullptr) { - arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), - LLVMConstInt(usize_type_ref, resume_id, false), ""); - } else { - assert(resume_id == ResumeIdAwaitEarlyReturn); - } + LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref), + LLVMConstInt(usize_type_ref, resume_id, false), ""); LLVMValueRef args[] = {target_frame_ptr, arg_val}; return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, ""); } -static LLVMValueRef get_cur_async_prev_val(CodeGen *g) { - if (g->cur_async_prev_val != nullptr) { - return g->cur_async_prev_val; - } - g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, ""); - return g->cur_async_prev_val; -} - static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { - // This becomes invalid when a suspend happens. - g->cur_async_prev_val = nullptr; - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint); size_t new_block_index = g->cur_resume_block_count; @@ -2259,6 +2227,10 @@ static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) { return resume_bb; } +static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { + LLVMSetTailCall(call_inst, true); +} + static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val, LLVMAtomicOrdering order) { @@ -2282,32 +2254,32 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV } } -static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, - IrInstructionReturnBegin *instruction) -{ +static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr; bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type); - if (!fn_is_async(g->cur_fn)) { - return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr; - } + ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool ret_type_has_bits = type_has_bits(ret_type); - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; if (operand_has_bits && instruction->operand != nullptr) { - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type); if (need_store) { - // It didn't get written to the result ptr. We do that now so that we do not have to spill - // the return operand. + // It didn't get written to the result ptr. We do that now. ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true); gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand)); } } - // Prepare to be suspended. We might end up not having to suspend though. - LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume"); + // Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed. + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); + LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); + } LLVMValueRef zero = LLVMConstNull(usize_type_ref); LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr, all_ones, LLVMAtomicOrderingAcquire); @@ -2316,7 +2288,6 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem"); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2); - LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder); LLVMAddCase(switch_instr, zero, early_return_block); LLVMAddCase(switch_instr, all_ones, bad_return_block); @@ -2325,90 +2296,63 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable, LLVMPositionBuilderAtEnd(g->builder, bad_return_block); gen_assertion(g, PanicMsgIdBadReturn, &instruction->base); - // The caller has not done an await yet. So we suspend at the return instruction, until a - // cancel or await is performed. + // There is no awaiter yet, but we're completely done. LLVMPositionBuilderAtEnd(g->builder, early_return_block); LLVMBuildRetVoid(g->builder); - // Add a safety check for when getting resumed by the awaiter. - LLVMPositionBuilderAtEnd(g->builder, resume_bb); - LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder); - gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait, - resume_them_block); - - // We need to resume the caller by tail calling them. - // That will happen when rendering IrInstructionReturn after running the defers/errdefers. - // We either got here from Entry (function call) or from the switch above - g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, ""); - LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val }; - LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb }; - LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2); - - g->cur_is_after_return = true; - LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr); - - if (!operand_has_bits) { - return nullptr; - } - - return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true)); -} - -static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) { - LLVMSetTailCall(call_inst, true); + // We need to resume the caller by tail calling them, + // but first write through the result pointer and possibly + // error return trace pointer. + LLVMPositionBuilderAtEnd(g->builder, resume_them_block); + + if (ret_type_has_bits) { + // If the awaiter result pointer is non-null, we need to copy the result to there. + LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); + LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); + LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, ""); + LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); + LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); + LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); + + LLVMPositionBuilderAtEnd(g->builder, copy_block); + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, ret_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + LLVMBuildBr(g->builder, copy_end_block); + + LLVMPositionBuilderAtEnd(g->builder, copy_end_block); + if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) { + LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, ret_type) + 1, ""); + LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, ""); + LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } + } + + // Resume the caller by tail calling them. + ZigType *any_frame_type = get_any_frame_type(g, ret_type); + LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), ""); + LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn); + set_tail_call_if_appropriate(g, call_inst); + LLVMBuildRetVoid(g->builder); } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) { if (fn_is_async(g->cur_fn)) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - bool ret_type_has_bits = type_has_bits(ret_type); - - if (ir_want_runtime_safety(g, &instruction->base)) { - LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref); - LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr); - } - - if (ret_type_has_bits) { - // If the awaiter result pointer is non-null, we need to copy the result to there. - LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult"); - LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd"); - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, ""); - LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, ""); - LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr)); - LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, ""); - LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block); - - LLVMPositionBuilderAtEnd(g->builder, copy_block); - LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); - LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, ""); - LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, ""); - bool is_volatile = false; - uint32_t abi_align = get_abi_alignment(g, ret_type); - LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false); - ZigLLVMBuildMemCpy(g->builder, - dest_ptr_casted, abi_align, - src_ptr_casted, abi_align, byte_count_val, is_volatile); - LLVMBuildBr(g->builder, copy_end_block); - - LLVMPositionBuilderAtEnd(g->builder, copy_end_block); - } - - // We need to resume the caller by tail calling them. - ZigType *any_frame_type = get_any_frame_type(g, ret_type); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - LLVMValueRef mask_val = LLVMConstNot(one); - LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, ""); - LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val, - get_llvm_type(g, any_frame_type), ""); - LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - - g->cur_is_after_return = false; - + gen_async_return(g, instruction); return nullptr; } + if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { if (instruction->operand == nullptr) { LLVMBuildRetVoid(g->builder); @@ -3893,6 +3837,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr // even if prefix_arg_err_ret_stack is true, let the async function do its own // initialization. } else { + // async function called as a normal function + frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc); awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer if (ret_has_bits) { @@ -3912,7 +3858,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (prefix_arg_err_ret_stack) { LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, - frame_index_trace_arg(g, src_return_type), ""); + frame_index_trace_arg(g, src_return_type) + 1, ""); LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); } @@ -4018,7 +3964,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } if (instruction->is_async) { - gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); if (instruction->new_stack != nullptr) { return frame_result_loc; } @@ -4028,7 +3974,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume"); - LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr); + LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall); set_tail_call_if_appropriate(g, call_inst); LLVMBuildRetVoid(g->builder); @@ -4744,8 +4690,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu { LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); if (cur_err_ret_trace_val == nullptr) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); - return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type)); + return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g))); } return cur_err_ret_trace_val; } @@ -5505,60 +5450,6 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl return nullptr; } -static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { - LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; - LLVMValueRef zero = LLVMConstNull(usize_type_ref); - LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref); - LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false); - src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node); - ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type; - - LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame); - LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume"); - - // supply null for the awaiter return pointer (no copy needed) - if (type_has_bits(result_type)) { - LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, ""); - LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))), - awaiter_ret_ptr_ptr); - } - - // supply null for the error return trace pointer - if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { - LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, - frame_index_trace_arg(g, result_type), ""); - LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))), - err_ret_trace_ptr_ptr); - } - - LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); - LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, ""); - LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); - - LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val, - LLVMAtomicOrderingRelease); - - LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend"); - LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn"); - - LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2); - LLVMAddCase(switch_instr, zero, complete_suspend_block); - LLVMAddCase(switch_instr, all_ones, early_return_block); - - LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - - LLVMPositionBuilderAtEnd(g->builder, resume_bb); - gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr); - - return nullptr; -} - static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; LLVMValueRef zero = LLVMConstNull(usize_type_ref); @@ -5568,8 +5459,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst // Prepare to be suspended LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume"); + LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd"); - // At this point resuming the function will do the correct thing. + // At this point resuming the function will continue from resume_bb. // This code is as if it is running inside the suspend block. // supply the awaiter return pointer @@ -5591,15 +5483,15 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope); assert(my_err_ret_trace_val != nullptr); LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, - frame_index_trace_arg(g, result_type), ""); + frame_index_trace_arg(g, result_type) + 1, ""); LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr); } // caller's own frame pointer LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, ""); - LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, - LLVMAtomicOrderingRelease, g->is_single_threaded); + LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val, + LLVMAtomicOrderingRelease); LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait"); LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend"); @@ -5615,20 +5507,42 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst LLVMPositionBuilderAtEnd(g->builder, bad_await_block); gen_assertion(g, PanicMsgIdBadAwait, &instruction->base); - // Early return: The async function has already completed, but it is suspending before setting the result, - // populating the error return trace if applicable, and running the defers. - // Tail resume it now, so that it can complete. - LLVMPositionBuilderAtEnd(g->builder, early_return_block); - LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val); - set_tail_call_if_appropriate(g, call_inst); - LLVMBuildRetVoid(g->builder); - // Rely on the target to resume us from suspension. LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block); LLVMBuildRetVoid(g->builder); + // Early return: The async function has already completed. We must copy the result and + // the error return trace if applicable. + LLVMPositionBuilderAtEnd(g->builder, early_return_block); + if (type_has_bits(result_type) && result_loc != nullptr) { + LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, ""); + LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, ""); + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, ""); + LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, ""); + bool is_volatile = false; + uint32_t abi_align = get_abi_alignment(g, result_type); + LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false); + ZigLLVMBuildMemCpy(g->builder, + dest_ptr_casted, abi_align, + src_ptr_casted, abi_align, byte_count_val, is_volatile); + } + if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) { + LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, + frame_index_trace_arg(g, result_type), ""); + LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, ""); + LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope); + LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr }; + ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + } + LLVMBuildBr(g->builder, end_bb); + LLVMPositionBuilderAtEnd(g->builder, resume_bb); gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr); + LLVMBuildBr(g->builder, end_bb); + + LLVMPositionBuilderAtEnd(g->builder, end_bb); if (type_has_bits(result_type) && result_loc != nullptr) { return get_handle_value(g, result_loc, result_type, ptr_result_type); } @@ -5640,7 +5554,7 @@ static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrIns ZigType *frame_type = instruction->frame->value.type; assert(frame_type->id == ZigTypeIdAnyFrame); - gen_resume(g, nullptr, frame, ResumeIdManual, nullptr); + gen_resume(g, nullptr, frame, ResumeIdManual); return nullptr; } @@ -5651,18 +5565,6 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, return gen_frame_size(g, fn_val); } -static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable, - IrInstructionTestCancelRequested *instruction) -{ - if (!fn_is_async(g->cur_fn)) - return LLVMConstInt(LLVMInt1Type(), 0, false); - if (g->cur_is_after_return) { - return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), ""); - } else { - zig_panic("TODO"); - } -} - static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable, IrInstructionSpillBegin *instruction) { @@ -5798,8 +5700,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdDeclVarGen: return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction); - case IrInstructionIdReturnBegin: - return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_render_return(g, executable, (IrInstructionReturn *)instruction); case IrInstructionIdBinOp: @@ -5918,8 +5818,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction); case IrInstructionIdErrorReturnTrace: return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); - case IrInstructionIdCancel: - return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); case IrInstructionIdAtomicRmw: return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: @@ -5952,8 +5850,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction); case IrInstructionIdAwaitGen: return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction); - case IrInstructionIdTestCancelRequested: - return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction); case IrInstructionIdSpillBegin: return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction); case IrInstructionIdSpillEnd: @@ -7060,9 +6956,9 @@ static void do_code_gen(CodeGen *g) { ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count); err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type)); - // populate g->stack_trace_type - (void)get_ptr_to_stack_trace_type(g); - g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type)); + (void)get_llvm_type(g, get_stack_trace_type(g)); + g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace", + get_abi_alignment(g, g->stack_trace_type)); } else { g->cur_err_ret_trace_val_stack = nullptr; } @@ -7204,18 +7100,12 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, ""); g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, ""); } - if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { - uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type); - g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, ""); - } uint32_t trace_field_index_stack = UINT32_MAX; if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) { trace_field_index_stack = frame_index_trace_stack(g, fn_type_id); g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); } - g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, - frame_prev_val_index, ""); LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, ""); LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4); @@ -7227,6 +7117,13 @@ static void do_code_gen(CodeGen *g) { g->cur_resume_block_count += 1; LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block); if (trace_field_index_stack != UINT32_MAX) { + if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) { + LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + frame_index_trace_arg(g, fn_type_id->return_type), ""); + LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr))); + LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr); + } + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, @@ -7273,8 +7170,6 @@ static void do_code_gen(CodeGen *g) { LLVMDumpModule(g->module); } - // in release mode, we're sooooo confident that we've generated correct ir, - // that we skip the verify module step in order to get better performance. #ifndef NDEBUG char *error = nullptr; LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error); @@ -10157,6 +10052,11 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) { } bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) { - return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && - (is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type)); + if (is_async) { + return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn || + codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type)); + } else { + return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn && + !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type); + } } diff --git a/src/ir.cpp b/src/ir.cpp index 3564435ddd..3e80fad270 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -526,10 +526,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) { return IrInstructionIdReturn; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) { - return IrInstructionIdReturnBegin; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) { return IrInstructionIdCast; } @@ -974,10 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) { return IrInstructionIdErrorUnion; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) { - return IrInstructionIdCancel; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { return IrInstructionIdAtomicRmw; } @@ -1062,10 +1054,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) { return IrInstructionIdResume; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) { - return IrInstructionIdTestCancelRequested; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) { return IrInstructionIdSpillBegin; } @@ -1138,18 +1126,6 @@ static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *sou return &return_instruction->base; } -static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *operand) -{ - IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node); - return_instruction->operand = operand; - - ir_ref_instruction(operand, irb->current_basic_block); - - return &return_instruction->base; -} - - static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = irb->codegen->builtin_types.entry_void; @@ -3284,16 +3260,6 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN return &instruction->base; } -static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) { - IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_void; - instruction->frame = frame; - - ir_ref_instruction(frame, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame, ResultLoc *result_loc) { @@ -3331,13 +3297,6 @@ static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) { - IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node); - instruction->base.value.type = irb->codegen->builtin_types.entry_bool; - - return &instruction->base; -} - static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *operand, SpillId spill_id) { @@ -3532,7 +3491,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, } ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value)); - return_value = ir_build_return_begin(irb, scope, node, return_value); size_t defer_counts[2]; ir_count_defers(irb, scope, outer_scope, defer_counts); @@ -3545,49 +3503,40 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, return result; } bool should_inline = ir_should_inline(irb->exec, scope); - bool need_test_cancel = !should_inline && have_err_defers; IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr"); - IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers"); - IrBasicBlock *ok_block = need_test_cancel ? - ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block; - IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block; + IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk"); + + if (!have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, false); + } IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true); - IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline); - IrInstruction *err_is_comptime; + IrInstruction *is_comptime; if (should_inline) { - err_is_comptime = force_comptime; + is_comptime = ir_build_const_bool(irb, scope, node, should_inline); } else { - err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err); + is_comptime = ir_build_test_comptime(irb, scope, node, is_err); } - ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime)); + ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime)); IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); ir_set_cursor_at_end_and_append_block(irb, err_block); + if (have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, true); + } if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } - ir_build_br(irb, scope, node, all_defers_block, err_is_comptime); - - if (need_test_cancel) { - ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node); - ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled, - all_defers_block, normal_defers_block, force_comptime)); - } + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); - if (all_defers_block != normal_defers_block) { - ir_set_cursor_at_end_and_append_block(irb, all_defers_block); - ir_gen_defers_for_block(irb, scope, outer_scope, true); - ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); + ir_set_cursor_at_end_and_append_block(irb, ok_block); + if (have_err_defers) { + ir_gen_defers_for_block(irb, scope, outer_scope, false); } - - ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); - ir_gen_defers_for_block(irb, scope, outer_scope, false); - ir_build_br(irb, scope, node, ret_stmt_block, force_comptime); + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); IrInstruction *result = ir_build_return(irb, scope, node, return_value); @@ -3619,8 +3568,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val)); IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val, SpillIdRetErrCode); - ir_build_return_begin(irb, scope, node, err_val); - err_val = ir_build_spill_end(irb, scope, node, spill_begin); ResultLocReturn *result_loc_ret = allocate(1); result_loc_ret->base.id = ResultLocIdReturn; ir_build_reset_result(irb, scope, node, &result_loc_ret->base); @@ -3629,6 +3576,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } + err_val = ir_build_spill_end(irb, scope, node, spill_begin); IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val); result_loc_ret->base.source_instruction = ret_inst; } @@ -3847,38 +3795,10 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode return result; // no need for save_err_ret_addr because this cannot return error - // but if it is a canceled async function we do need to run the errdefers + // only generate unconditional defers ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result)); - result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result)); - - size_t defer_counts[2]; - ir_count_defers(irb, child_scope, outer_block_scope, defer_counts); - bool have_err_defers = defer_counts[ReturnKindError] > 0; - if (!have_err_defers) { - // only generate unconditional defers - ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); - return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); - } - IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node); - IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers"); - IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers"); - IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt"); - bool should_inline = ir_should_inline(irb->exec, child_scope); - IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node, - should_inline || !have_err_defers); - ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled, - all_defers_block, normal_defers_block, errdefers_is_comptime)); - - ir_set_cursor_at_end_and_append_block(irb, all_defers_block); - ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true); - ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, normal_defers_block); ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); - ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result)); } @@ -7930,31 +7850,6 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args); } -static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) { - assert(node->type == NodeTypeCancel); - - ZigFn *fn_entry = exec_fn_entry(irb->exec); - if (!fn_entry) { - add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition")); - return irb->codegen->invalid_instruction; - } - ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope); - if (existing_suspend_scope) { - if (!existing_suspend_scope->reported_err) { - ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block")); - add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here")); - existing_suspend_scope->reported_err = true; - } - return irb->codegen->invalid_instruction; - } - - IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr); - if (operand == irb->codegen->invalid_instruction) - return irb->codegen->invalid_instruction; - - return ir_build_cancel(irb, scope, node, operand); -} - static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeResume); @@ -8149,8 +8044,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc); case NodeTypeErrorSetDecl: return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc); - case NodeTypeCancel: - return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc); case NodeTypeResume: return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc); case NodeTypeAwaitExpr: @@ -8228,7 +8121,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec if (!instr_is_unreachable(result)) { ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result)); - result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result)); // no need for save_err_ret_addr because this cannot return error ir_mark_gen(ir_build_return(irb, scope, result->source_node, result)); } @@ -8340,7 +8232,6 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec switch (instruction->id) { case IrInstructionIdUnwrapErrPayload: case IrInstructionIdUnionFieldPtr: - case IrInstructionIdReturnBegin: continue; default: break; @@ -12745,17 +12636,17 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze return ir_const_void(ira, &instruction->base); } -static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) { +static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { IrInstruction *operand = instruction->operand->child; if (type_is_invalid(operand->value.type)) - return ira->codegen->invalid_instruction; + return ir_unreach_error(ira); if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { // result location mechanism took care of it. - IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, operand); - copy_const_val(&result->value, &operand->value, true); - return result; + IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + return ir_finish_anal(ira, result); } IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); @@ -12777,38 +12668,6 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst return ir_unreach_error(ira); } - IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, casted_operand); - copy_const_val(&result->value, &casted_operand->value, true); - return result; -} - -static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) { - IrInstruction *operand = instruction->operand->child; - if (type_is_invalid(operand->value.type)) - return ir_unreach_error(ira); - - if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) { - // result location mechanism took care of it. - IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, nullptr); - result->value.type = ira->codegen->builtin_types.entry_unreachable; - return ir_finish_anal(ira, result); - } - - // This cast might have been already done from IrInstructionReturnBegin but it also - // might not have, in the case of `try`. - IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type); - if (type_is_invalid(casted_operand->value.type)) { - AstNode *source_node = ira->explicit_return_type_source_node; - if (source_node != nullptr) { - ErrorMsg *msg = ira->codegen->errors.last(); - add_error_note(ira->codegen, msg, source_node, - buf_sprintf("return type declared here")); - } - return ir_unreach_error(ira); - } - IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_operand); result->value.type = ira->codegen->builtin_types.entry_unreachable; @@ -14540,8 +14399,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) { static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, IrInstructionErrorReturnTrace *instruction) { + ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false); if (instruction->optional == IrInstructionErrorReturnTrace::Null) { - ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { IrInstruction *result = ir_const(ira, &instruction->base, optional_type); @@ -14559,7 +14418,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, assert(ira->codegen->have_err_ret_tracing); IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, instruction->base.source_node, instruction->optional); - new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen); + new_instruction->value.type = ptr_to_stack_trace_type; return new_instruction; } } @@ -15800,6 +15659,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; + parent_fn_entry->inferred_async_fn = impl_fn; } IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, @@ -15923,6 +15783,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) { parent_fn_entry->inferred_async_node = fn_ref->source_node; + parent_fn_entry->inferred_async_fn = fn_entry; } IrInstruction *result_loc; @@ -24702,21 +24563,6 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct return casted_frame; } -static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); - if (type_is_invalid(frame->value.type)) - return ira->codegen->invalid_instruction; - - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - ir_assert(fn_entry != nullptr, &instruction->base); - - if (fn_entry->inferred_async_node == nullptr) { - fn_entry->inferred_async_node = instruction->base.source_node; - } - - return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame); -} - static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) { IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child); if (type_is_invalid(frame->value.type)) @@ -24772,15 +24618,6 @@ static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructio return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame); } -static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira, - IrInstructionTestCancelRequested *instruction) -{ - if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) { - return ir_const_bool(ira, &instruction->base, false); - } - return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node); -} - static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) { if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) return ir_const_void(ira, &instruction->base); @@ -24848,8 +24685,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction case IrInstructionIdAwaitGen: zig_unreachable(); - case IrInstructionIdReturnBegin: - return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction); case IrInstructionIdReturn: return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction); case IrInstructionIdConst: @@ -25070,8 +24905,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction); case IrInstructionIdErrorUnion: return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction); - case IrInstructionIdCancel: - return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction); case IrInstructionIdAtomicRmw: return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: @@ -25114,8 +24947,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction); case IrInstructionIdAwaitSrc: return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction); - case IrInstructionIdTestCancelRequested: - return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction); case IrInstructionIdSpillBegin: return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction); case IrInstructionIdSpillEnd: @@ -25209,7 +25040,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdStorePtr: case IrInstructionIdCallSrc: case IrInstructionIdCallGen: - case IrInstructionIdReturnBegin: case IrInstructionIdReturn: case IrInstructionIdUnreachable: case IrInstructionIdSetCold: @@ -25235,7 +25065,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdPtrType: case IrInstructionIdSetAlignStack: case IrInstructionIdExport: - case IrInstructionIdCancel: case IrInstructionIdSaveErrRetAddr: case IrInstructionIdAddImplicitReturnType: case IrInstructionIdAtomicRmw: @@ -25355,7 +25184,6 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdHasDecl: case IrInstructionIdAllocaSrc: case IrInstructionIdAllocaGen: - case IrInstructionIdTestCancelRequested: case IrInstructionIdSpillEnd: return false; diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 63f3711266..7580f19059 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -64,12 +64,6 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) { } } -static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) { - fprintf(irp->f, "@returnBegin("); - ir_print_other_instruction(irp, instruction->operand); - fprintf(irp->f, ")"); -} - static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) { fprintf(irp->f, "return "); ir_print_other_instruction(irp, instruction->operand); @@ -1394,11 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct ir_print_other_instruction(irp, instruction->payload); } -static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { - fprintf(irp->f, "cancel "); - ir_print_other_instruction(irp, instruction->frame); -} - static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) { fprintf(irp->f, "@atomicRmw("); if (instruction->operand_type != nullptr) { @@ -1549,10 +1538,6 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) fprintf(irp->f, ")"); } -static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) { - fprintf(irp->f, "@testCancelRequested()"); -} - static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) { fprintf(irp->f, "@spillBegin("); ir_print_other_instruction(irp, instruction->operand); @@ -1570,9 +1555,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: zig_unreachable(); - case IrInstructionIdReturnBegin: - ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction); - break; case IrInstructionIdReturn: ir_print_return(irp, (IrInstructionReturn *)instruction); break; @@ -1966,9 +1948,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdErrorUnion: ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction); break; - case IrInstructionIdCancel: - ir_print_cancel(irp, (IrInstructionCancel *)instruction); - break; case IrInstructionIdAtomicRmw: ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction); break; @@ -2047,9 +2026,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAwaitGen: ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction); break; - case IrInstructionIdTestCancelRequested: - ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction); - break; case IrInstructionIdSpillBegin: ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction); break; diff --git a/src/parser.cpp b/src/parser.cpp index 82312aacf3..afe5735a06 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1167,7 +1167,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) { // <- AsmExpr // / IfExpr // / KEYWORD_break BreakLabel? Expr? -// / KEYWORD_cancel Expr // / KEYWORD_comptime Expr // / KEYWORD_continue BreakLabel? // / KEYWORD_resume Expr @@ -1195,14 +1194,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) { return res; } - Token *cancel = eat_token_if(pc, TokenIdKeywordCancel); - if (cancel != nullptr) { - AstNode *expr = ast_expect(pc, ast_parse_expr); - AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel); - res->data.cancel_expr.expr = expr; - return res; - } - Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime); if (comptime != nullptr) { AstNode *expr = ast_expect(pc, ast_parse_expr); @@ -3035,9 +3026,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeErrorSetDecl: visit_node_list(&node->data.err_set_decl.decls, visit, context); break; - case NodeTypeCancel: - visit_field(&node->data.cancel_expr.expr, visit, context); - break; case NodeTypeResume: visit_field(&node->data.resume_expr.expr, visit, context); break; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 38c6c7153e..84f3f2c0ec 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -114,7 +114,6 @@ static const struct ZigKeyword zig_keywords[] = { {"async", TokenIdKeywordAsync}, {"await", TokenIdKeywordAwait}, {"break", TokenIdKeywordBreak}, - {"cancel", TokenIdKeywordCancel}, {"catch", TokenIdKeywordCatch}, {"comptime", TokenIdKeywordCompTime}, {"const", TokenIdKeywordConst}, @@ -1531,7 +1530,6 @@ const char * token_name(TokenId id) { case TokenIdKeywordAwait: return "await"; case TokenIdKeywordResume: return "resume"; case TokenIdKeywordSuspend: return "suspend"; - case TokenIdKeywordCancel: return "cancel"; case TokenIdKeywordAlign: return "align"; case TokenIdKeywordAnd: return "and"; case TokenIdKeywordAnyFrame: return "anyframe"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 98bdfea907..ce62f5dc87 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -58,7 +58,6 @@ enum TokenId { TokenIdKeywordAsync, TokenIdKeywordAwait, TokenIdKeywordBreak, - TokenIdKeywordCancel, TokenIdKeywordCatch, TokenIdKeywordCompTime, TokenIdKeywordConst, diff --git a/std/event/fs.zig b/std/event/fs.zig index 73a296ca3f..d6d8f2faef 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -1301,7 +1301,7 @@ async fn testFsWatch(loop: *Loop) !void { const ev = try async watch.channel.get(); var ev_consumed = false; - defer if (!ev_consumed) cancel ev; + defer if (!ev_consumed) await ev; // overwrite line 2 const fd = try await try async openReadWrite(loop, file_path, File.default_mode); diff --git a/std/event/future.zig b/std/event/future.zig index e5f6d984ce..45bb7759c5 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -110,7 +110,7 @@ async fn testFuture(loop: *Loop) void { const b_result = await b; const result = a_result + b_result; - cancel c; + await c; testing.expect(result == 12); } diff --git a/std/event/group.zig b/std/event/group.zig index 1fc4a61e93..f96b938f80 100644 --- a/std/event/group.zig +++ b/std/event/group.zig @@ -27,17 +27,6 @@ pub fn Group(comptime ReturnType: type) type { }; } - /// Cancel all the outstanding frames. Can be called even if wait was already called. - pub fn deinit(self: *Self) void { - while (self.frame_stack.pop()) |node| { - cancel node.data; - } - while (self.alloc_stack.pop()) |node| { - cancel node.data; - self.lock.loop.allocator.destroy(node); - } - } - /// Add a frame to the group. Thread-safe. pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) { const node = try self.lock.loop.allocator.create(Stack.Node); @@ -64,13 +53,14 @@ pub fn Group(comptime ReturnType: type) type { const held = self.lock.acquire(); defer held.release(); + var result: ReturnType = {}; + while (self.frame_stack.pop()) |node| { if (Error == void) { await node.data; } else { (await node.data) catch |err| { - self.deinit(); - return err; + result = err; }; } } @@ -81,11 +71,11 @@ pub fn Group(comptime ReturnType: type) type { await handle; } else { (await handle) catch |err| { - self.deinit(); - return err; + result = err; }; } } + return result; } }; } diff --git a/std/event/net.zig b/std/event/net.zig index 2a28a0ef93..bed665dcdc 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -54,7 +54,7 @@ pub const Server = struct { self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd)); self.accept_frame = async Server.handler(self); - errdefer cancel self.accept_frame.?; + errdefer await self.accept_frame.?; self.listen_resume_node.handle = self.accept_frame.?; try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET); @@ -71,7 +71,7 @@ pub const Server = struct { } pub fn deinit(self: *Server) void { - if (self.accept_frame) |accept_frame| cancel accept_frame; + if (self.accept_frame) |accept_frame| await accept_frame; if (self.sockfd) |sockfd| os.close(sockfd); } @@ -274,13 +274,9 @@ test "listen on a port, send bytes, receive bytes" { const self = @fieldParentPtr(Self, "tcp_server", tcp_server); var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592 defer socket.close(); - // TODO guarantee elision of this allocation const next_handler = errorableHandler(self, _addr, socket) catch |err| { std.debug.panic("unable to handle connection: {}\n", err); }; - suspend { - cancel @frame(); - } } async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void { const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592 diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 600178cdce..077870a9ca 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -814,7 +814,6 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node { /// <- AsmExpr /// / IfExpr /// / KEYWORD_break BreakLabel? Expr? -/// / KEYWORD_cancel Expr /// / KEYWORD_comptime Expr /// / KEYWORD_continue BreakLabel? /// / KEYWORD_resume Expr @@ -839,20 +838,6 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node return &node.base; } - if (eatToken(it, .Keyword_cancel)) |token| { - const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{ - .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index }, - }); - const node = try arena.create(Node.PrefixOp); - node.* = Node.PrefixOp{ - .base = Node{ .id = .PrefixOp }, - .op_token = token, - .op = Node.PrefixOp.Op.Cancel, - .rhs = expr_node, - }; - return &node.base; - } - if (eatToken(it, .Keyword_comptime)) |token| { const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{ .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index }, diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 5f2a3934fd..c5c740353e 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -2115,10 +2115,10 @@ test "zig fmt: async functions" { \\ await p; \\} \\ - \\test "suspend, resume, cancel" { + \\test "suspend, resume, await" { \\ const p: anyframe = async testAsyncSeq(); \\ resume p; - \\ cancel p; + \\ await p; \\} \\ ); diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig index 9de20c39f2..4d4ceb07db 100644 --- a/std/zig/tokenizer.zig +++ b/std/zig/tokenizer.zig @@ -21,7 +21,6 @@ pub const Token = struct { Keyword{ .bytes = "await", .id = Id.Keyword_await }, Keyword{ .bytes = "break", .id = Id.Keyword_break }, Keyword{ .bytes = "catch", .id = Id.Keyword_catch }, - Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel }, Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime }, Keyword{ .bytes = "const", .id = Id.Keyword_const }, Keyword{ .bytes = "continue", .id = Id.Keyword_continue }, @@ -151,7 +150,6 @@ pub const Token = struct { Keyword_async, Keyword_await, Keyword_break, - Keyword_cancel, Keyword_catch, Keyword_comptime, Keyword_const, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index c07786d462..f53b1c9707 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -61,13 +61,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "runtime-known async function called", \\export fn entry() void { + \\ _ = async amain(); + \\} + \\fn amain() void { \\ var ptr = afunc; \\ _ = ptr(); \\} - \\ \\async fn afunc() void {} , - "tmp.zig:3:12: error: function is not comptime-known; @asyncCall required", + "tmp.zig:6:12: error: function is not comptime-known; @asyncCall required", ); cases.add( @@ -3388,7 +3390,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\export fn entry() usize { return @sizeOf(@typeOf(Foo)); } , - "tmp.zig:5:18: error: unable to evaluate constant expression", + "tmp.zig:5:25: error: unable to evaluate constant expression", "tmp.zig:2:12: note: called from here", "tmp.zig:2:8: note: called from here", ); diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index dba43268e2..6ec1521048 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -41,7 +41,6 @@ comptime { _ = @import("behavior/bugs/920.zig"); _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); - _ = @import("behavior/cancel.zig"); _ = @import("behavior/cast.zig"); _ = @import("behavior/const_slice_child.zig"); _ = @import("behavior/defer.zig"); diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 2d76b47244..ec0c9e52a6 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -150,7 +150,7 @@ test "coroutine suspend, resume" { seq('a'); var f = async testAsyncSeq(); seq('c'); - cancel f; + await f; seq('g'); } @@ -271,7 +271,6 @@ test "async function with dot syntax" { } }; const p = async S.foo(); - // can't cancel in tests because they are non-async functions expect(S.y == 2); } @@ -286,7 +285,7 @@ test "async fn pointer in a struct field" { comptime expect(@typeOf(f) == anyframe->void); expect(data == 2); resume f; - expect(data == 2); + expect(data == 4); _ = async doTheAwait(f); expect(data == 4); } @@ -394,7 +393,6 @@ async fn printTrace(p: anyframe->(anyerror!void)) void { test "break from suspend" { var my_result: i32 = 1; const p = async testBreakFromSuspend(&my_result); - // can't cancel here std.testing.expect(my_result == 2); } async fn testBreakFromSuspend(my_result: *i32) void { @@ -530,45 +528,6 @@ test "call async function which has struct return type" { S.doTheTest(); } -test "errdefers in scope get run when canceling async fn call" { - const S = struct { - var frame: anyframe = undefined; - var x: u32 = 0; - - fn doTheTest() void { - x = 9; - _ = async cancelIt(); - resume frame; - expect(x == 6); - - x = 9; - _ = async awaitIt(); - resume frame; - expect(x == 11); - } - - fn cancelIt() void { - var f = async func(); - cancel f; - } - - fn awaitIt() void { - var f = async func(); - await f; - } - - fn func() void { - defer x += 1; - errdefer x /= 2; - defer x += 1; - suspend { - frame = @frame(); - } - } - }; - S.doTheTest(); -} - test "pass string literal to async function" { const S = struct { var frame: anyframe = undefined; @@ -590,7 +549,7 @@ test "pass string literal to async function" { S.doTheTest(); } -test "cancel inside an errdefer" { +test "await inside an errdefer" { const S = struct { var frame: anyframe = undefined; @@ -601,7 +560,7 @@ test "cancel inside an errdefer" { fn amainWrap() !void { var foo = async func(); - errdefer cancel foo; + errdefer await foo; return error.Bad; } @@ -614,35 +573,6 @@ test "cancel inside an errdefer" { S.doTheTest(); } -test "combining try with errdefer cancel" { - const S = struct { - var frame: anyframe = undefined; - var ok = false; - - fn doTheTest() void { - _ = async amain(); - resume frame; - expect(ok); - } - - fn amain() !void { - var f = async func("https://example.com/"); - errdefer cancel f; - - _ = try await f; - } - - fn func(url: []const u8) ![]u8 { - errdefer ok = true; - frame = @frame(); - suspend; - return error.Bad; - } - - }; - S.doTheTest(); -} - test "try in an async function with error union and non-zero-bit payload" { const S = struct { var frame: anyframe = undefined; @@ -730,14 +660,22 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si fn amain() !void { const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks var download_frame = async fetchUrl(allocator, "https://example.com/"); - errdefer cancel download_frame; + var download_awaited = false; + errdefer if (!download_awaited) { + if (await download_frame) |x| allocator.free(x) else |_| {} + }; var file_frame = async readFile(allocator, "something.txt"); - errdefer cancel file_frame; + var file_awaited = false; + errdefer if (!file_awaited) { + if (await file_frame) |x| allocator.free(x) else |_| {} + }; + download_awaited = true; const download_text = try await download_frame; defer allocator.free(download_text); + file_awaited = true; const file_text = try await file_frame; defer allocator.free(file_text); diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig deleted file mode 100644 index 5dedb20159..0000000000 --- a/test/stage1/behavior/cancel.zig +++ /dev/null @@ -1,115 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; - -var defer_f1: bool = false; -var defer_f2: bool = false; -var defer_f3: bool = false; -var f3_frame: anyframe = undefined; - -test "cancel forwards" { - _ = async atest1(); - resume f3_frame; -} - -fn atest1() void { - const p = async f1(); - cancel &p; - expect(defer_f1); - expect(defer_f2); - expect(defer_f3); -} - -async fn f1() void { - defer { - defer_f1 = true; - } - var f2_frame = async f2(); - await f2_frame; -} - -async fn f2() void { - defer { - defer_f2 = true; - } - f3(); -} - -async fn f3() void { - f3_frame = @frame(); - defer { - defer_f3 = true; - } - suspend; -} - -var defer_b1: bool = false; -var defer_b2: bool = false; -var defer_b3: bool = false; -var defer_b4: bool = false; - -test "cancel backwards" { - var b1_frame = async b1(); - resume b4_handle; - _ = async awaitAFrame(&b1_frame); - expect(defer_b1); - expect(defer_b2); - expect(defer_b3); - expect(defer_b4); -} - -async fn b1() void { - defer { - defer_b1 = true; - } - b2(); -} - -var b4_handle: anyframe->void = undefined; - -async fn b2() void { - const b3_handle = async b3(); - resume b4_handle; - defer { - defer_b2 = true; - } - const value = await b3_handle; - expect(value == 1234); -} - -async fn b3() i32 { - defer { - defer_b3 = true; - } - b4(); - return 1234; -} - -async fn b4() void { - defer { - defer_b4 = true; - } - suspend { - b4_handle = @frame(); - } - suspend; -} - -fn awaitAFrame(f: anyframe->void) void { - await f; -} - -test "cancel on a non-pointer" { - const S = struct { - fn doTheTest() void { - _ = async atest(); - } - fn atest() void { - var f = async func(); - cancel f; - } - fn func() void { - suspend; - } - }; - S.doTheTest(); -} -- cgit v1.2.3 From 55f5cee86b39bb2127a316f9b5d0abf532580cac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Aug 2019 15:06:05 -0400 Subject: fix error return traces for async calls of blocking functions --- src/analyze.cpp | 5 ++++ src/codegen.cpp | 52 ++++++++++++++++++++++++++++----------- src/ir.cpp | 4 +++ test/compile_errors.zig | 12 +++++++++ test/stage1/behavior/async_fn.zig | 35 ++++++++++++++++++++------ 5 files changed, 85 insertions(+), 23 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/analyze.cpp b/src/analyze.cpp index fc42abaf26..21289f24a8 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3819,6 +3819,11 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) { } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("await is a suspend point")); + } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr && + fn->inferred_async_node->data.fn_call_expr.is_builtin) + { + add_error_note(g, msg, fn->inferred_async_node, + buf_sprintf("@frame() causes function to be async")); } else { add_error_note(g, msg, fn->inferred_async_node, buf_sprintf("suspends here")); diff --git a/src/codegen.cpp b/src/codegen.cpp index 9bf7b0287b..45e2e4122f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3760,6 +3760,23 @@ static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) { return LLVMBuildLoad(g->builder, prefix_ptr, ""); } +static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMValueRef addrs_field_ptr) { + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; + LLVMValueRef zero = LLVMConstNull(usize_type_ref); + + LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); + LLVMBuildStore(g->builder, zero, index_ptr); + + LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); + LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); + LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; + LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, addrs_field_ptr, indices, 2, ""); + LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); + + LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); + LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type; @@ -3900,9 +3917,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (first_arg_ret) { gen_param_values.append(ret_ptr); } - } - if (prefix_arg_err_ret_stack) { - gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + if (prefix_arg_err_ret_stack) { + // Set up the callee stack trace pointer pointing into the frame. + // Then we have to wire up the StackTrace pointers. + // Await is responsible for merging error return traces. + uint32_t trace_field_index_start = frame_index_trace_arg(g, src_return_type); + LLVMValueRef callee_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start, ""); + LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start + 2, ""); + LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, + trace_field_index_start + 3, ""); + + LLVMBuildStore(g->builder, trace_field_ptr, callee_trace_ptr_ptr); + + gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr); + + gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); + } } } else { if (first_arg_ret) { @@ -7126,20 +7158,10 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack, ""); - LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, + LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index_stack + 1, ""); - LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, ""); - LLVMBuildStore(g->builder, zero, index_ptr); - - LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, ""); - LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, ""); - LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) }; - LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, ""); - LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr); - - LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, ""); - LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr); + gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr); } render_async_var_decls(g, entry_block->instruction_list.at(0)->scope); } else { diff --git a/src/ir.cpp b/src/ir.cpp index 3e80fad270..ddaf82893a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -22078,6 +22078,10 @@ static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInst ZigFn *fn = exec_fn_entry(ira->new_irb.exec); ir_assert(fn != nullptr, &instruction->base); + if (fn->inferred_async_node == nullptr) { + fn->inferred_async_node = instruction->base.source_node; + } + ZigType *frame_type = get_fn_frame_type(ira->codegen, fn); ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index f53b1c9707..0d579ece95 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "@frame() causes function to be async", + \\export fn entry() void { + \\ func(); + \\} + \\fn func() void { + \\ _ = @frame(); + \\} + , + "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", + "tmp.zig:5:9: note: @frame() causes function to be async", + ); cases.add( "invalid suspend in exported function", \\export fn entry() void { diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index ec0c9e52a6..96b7b02137 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -634,17 +634,30 @@ test "returning a const error from async function" { test "async/await typical usage" { inline for ([_]bool{false, true}) |b1| { inline for ([_]bool{false, true}) |b2| { - testAsyncAwaitTypicalUsage(b1, b2).doTheTest(); + inline for ([_]bool{false, true}) |b3| { + inline for ([_]bool{false, true}) |b4| { + testAsyncAwaitTypicalUsage(b1, b2, b3, b4).doTheTest(); + } + } } } } -fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime simulate_fail_file: bool) type { +fn testAsyncAwaitTypicalUsage( + comptime simulate_fail_download: bool, + comptime simulate_fail_file: bool, + comptime suspend_download: bool, + comptime suspend_file: bool) type +{ return struct { fn doTheTest() void { _ = async amainWrap(); - resume global_file_frame; - resume global_download_frame; + if (suspend_file) { + resume global_file_frame; + } + if (suspend_download) { + resume global_download_frame; + } } fn amainWrap() void { if (amain()) |_| { @@ -685,20 +698,26 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si var global_download_frame: anyframe = undefined; fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 { - global_download_frame = @frame(); const result = try std.mem.dupe(allocator, u8, "expected download text"); errdefer allocator.free(result); - suspend; + if (suspend_download) { + suspend { + global_download_frame = @frame(); + } + } if (simulate_fail_download) return error.NoResponse; return result; } var global_file_frame: anyframe = undefined; fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 { - global_file_frame = @frame(); const result = try std.mem.dupe(allocator, u8, "expected file text"); errdefer allocator.free(result); - suspend; + if (suspend_file) { + suspend { + global_file_frame = @frame(); + } + } if (simulate_fail_file) return error.FileNotFound; return result; } -- cgit v1.2.3 From 1254a453b91623849dcb0a655b1212c9a179d29a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:44:42 -0400 Subject: add compile error for @Frame() of generic function See #3063 --- src/analyze.cpp | 2 ++ src/ir.cpp | 6 ++++++ test/compile_errors.zig | 13 +++++++++++++ 3 files changed, 21 insertions(+) (limited to 'test/compile_errors.zig') diff --git a/src/analyze.cpp b/src/analyze.cpp index 21289f24a8..4aff6da8e9 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5197,6 +5197,8 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) { return ErrorNone; ZigFn *fn = frame_type->data.frame.fn; + assert(!fn->type_entry->data.fn.is_generic); + switch (fn->anal_state) { case FnAnalStateInvalid: return ErrorSemanticAnalyzeFail; diff --git a/src/ir.cpp b/src/ir.cpp index ddaf82893a..9589000ab0 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -22095,6 +22095,12 @@ static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstru if (fn == nullptr) return ira->codegen->invalid_instruction; + if (fn->type_entry->data.fn.is_generic) { + ir_add_error(ira, &instruction->base, + buf_sprintf("@Frame() of generic function")); + return ira->codegen->invalid_instruction; + } + ZigType *ty = get_fn_frame_type(ira->codegen, fn); return ir_const_type(ira, &instruction->base, ty); } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 0d579ece95..c4549be405 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,18 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "@Frame() of generic function", + \\export fn entry() void { + \\ var frame: @Frame(func) = undefined; + \\} + \\fn func(comptime T: type) void { + \\ var x: T = undefined; + \\} + , + "tmp.zig:2:16: error: @Frame() of generic function", + ); + cases.add( "@frame() causes function to be async", \\export fn entry() void { @@ -14,6 +26,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async", "tmp.zig:5:9: note: @frame() causes function to be async", ); + cases.add( "invalid suspend in exported function", \\export fn entry() void { -- cgit v1.2.3 From 5df89dafef1bb410608dae2c3c97daa644e89f75 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 10:49:00 -0400 Subject: add test for wrong frame type used for async call See #3063 --- test/compile_errors.zig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'test/compile_errors.zig') diff --git a/test/compile_errors.zig b/test/compile_errors.zig index c4549be405..42cead93ce 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,22 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "wrong frame type used for async call", + \\export fn entry() void { + \\ var frame: @Frame(foo) = undefined; + \\ frame = async bar(); + \\} + \\fn foo() void { + \\ suspend; + \\} + \\fn bar() void { + \\ suspend; + \\} + , + "tmp.zig:3:5: error: expected type '*@Frame(bar)', found '*@Frame(foo)'", + ); + cases.add( "@Frame() of generic function", \\export fn entry() void { -- cgit v1.2.3 From 7798054b5880860fba410b2cd12626455ef4394b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 11:00:21 -0400 Subject: add tests for bad implicit casting of anyframe types See #3063 --- test/compile_errors.zig | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'test/compile_errors.zig') diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 42cead93ce..b00f090780 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,27 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "prevent bad implicit casting of anyframe types", + \\export fn a() void { + \\ var x: anyframe = undefined; + \\ var y: anyframe->i32 = x; + \\} + \\export fn b() void { + \\ var x: i32 = undefined; + \\ var y: anyframe->i32 = x; + \\} + \\export fn c() void { + \\ var x: @Frame(func) = undefined; + \\ var y: anyframe->i32 = &x; + \\} + \\fn func() void {} + , + "tmp.zig:3:28: error: expected type 'anyframe->i32', found 'anyframe'", + "tmp.zig:7:28: error: expected type 'anyframe->i32', found 'i32'", + "tmp.zig:11:29: error: expected type 'anyframe->i32', found '*@Frame(func)'", + ); + cases.add( "wrong frame type used for async call", \\export fn entry() void { -- cgit v1.2.3 From 13c584d325d042879c8c56a3c41ffbf99a3346c0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Aug 2019 11:27:29 -0400 Subject: add compile error for casting const frame to anyframe See #3063 --- src/ir.cpp | 1 + std/event/channel.zig | 2 +- std/event/future.zig | 6 +++--- std/event/lock.zig | 10 +++++----- std/event/loop.zig | 4 ++-- test/compile_errors.zig | 18 ++++++++++++++++++ test/stage1/behavior/async_fn.zig | 14 +++++++------- test/stage1/behavior/await_struct.zig | 4 ++-- 8 files changed, 39 insertions(+), 20 deletions(-) (limited to 'test/compile_errors.zig') diff --git a/src/ir.cpp b/src/ir.cpp index 9589000ab0..d6fba23856 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -12112,6 +12112,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // *@Frame(func) to anyframe->T or anyframe if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle && + !actual_type->data.pointer.is_const && actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame && wanted_type->id == ZigTypeIdAnyFrame) { bool ok = true; diff --git a/std/event/channel.zig b/std/event/channel.zig index 91c4650dc1..a397d280de 100644 --- a/std/event/channel.zig +++ b/std/event/channel.zig @@ -331,7 +331,7 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void { const value3 = channel.getOrNull(); testing.expect(value3 == null); - const last_put = async testPut(channel, 4444); + var last_put = async testPut(channel, 4444); const value4 = channel.getOrNull(); testing.expect(value4.? == 4444); await last_put; diff --git a/std/event/future.zig b/std/event/future.zig index 45bb7759c5..70e20819be 100644 --- a/std/event/future.zig +++ b/std/event/future.zig @@ -100,9 +100,9 @@ test "std.event.Future" { async fn testFuture(loop: *Loop) void { var future = Future(i32).init(loop); - const a = async waitOnFuture(&future); - const b = async waitOnFuture(&future); - const c = async resolveFuture(&future); + var a = async waitOnFuture(&future); + var b = async waitOnFuture(&future); + var c = async resolveFuture(&future); // TODO make this work: //const result = (await a) + (await b); diff --git a/std/event/lock.zig b/std/event/lock.zig index da698d9fb2..0fa65f031d 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -135,7 +135,7 @@ test "std.event.Lock" { } async fn testLock(loop: *Loop, lock: *Lock) void { - const handle1 = async lockRunner(lock); + var handle1 = async lockRunner(lock); var tick_node1 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -143,7 +143,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void { }; loop.onNextTick(&tick_node1); - const handle2 = async lockRunner(lock); + var handle2 = async lockRunner(lock); var tick_node2 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -151,7 +151,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void { }; loop.onNextTick(&tick_node2); - const handle3 = async lockRunner(lock); + var handle3 = async lockRunner(lock); var tick_node3 = Loop.NextTickNode{ .prev = undefined, .next = undefined, @@ -172,8 +172,8 @@ async fn lockRunner(lock: *Lock) void { var i: usize = 0; while (i < shared_test_data.len) : (i += 1) { - const lock_promise = async lock.acquire(); - const handle = await lock_promise; + var lock_frame = async lock.acquire(); + const handle = await lock_frame; defer handle.release(); shared_test_index = 0; diff --git a/std/event/loop.zig b/std/event/loop.zig index 0e02addcb9..a4605c8928 100644 --- a/std/event/loop.zig +++ b/std/event/loop.zig @@ -900,8 +900,8 @@ test "std.event.Loop - call" { defer loop.deinit(); var did_it = false; - const handle = async Loop.call(testEventLoop); - const handle2 = async Loop.call(testEventLoop2, &handle, &did_it); + var handle = async Loop.call(testEventLoop); + var handle2 = async Loop.call(testEventLoop2, &handle, &did_it); loop.run(); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index b00f090780..7c9d8fae51 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,6 +2,24 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "const frame cast to anyframe", + \\export fn a() void { + \\ const f = async func(); + \\ resume f; + \\} + \\export fn b() void { + \\ const f = async func(); + \\ var x: anyframe = &f; + \\} + \\fn func() void { + \\ suspend; + \\} + , + "tmp.zig:3:12: error: expected type 'anyframe', found '*const @Frame(func)'", + "tmp.zig:7:24: error: expected type 'anyframe', found '*const @Frame(func)'", + ); + cases.add( "prevent bad implicit casting of anyframe types", \\export fn a() void { diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index a6231e4609..b5e1d3a63f 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -6,7 +6,7 @@ const expectEqual = std.testing.expectEqual; var global_x: i32 = 1; test "simple coroutine suspend and resume" { - const frame = async simpleAsyncFn(); + var frame = async simpleAsyncFn(); expect(global_x == 2); resume frame; expect(global_x == 3); @@ -25,7 +25,7 @@ fn simpleAsyncFn() void { var global_y: i32 = 1; test "pass parameter to coroutine" { - const p = async simpleAsyncFnWithArg(2); + var p = async simpleAsyncFnWithArg(2); expect(global_y == 3); resume p; expect(global_y == 5); @@ -60,7 +60,7 @@ test "local variable in async function" { fn doTheTest() void { expect(x == 0); - const p = async add(1, 2); + var p = async add(1, 2); expect(x == 0); resume p; expect(x == 0); @@ -201,7 +201,7 @@ var await_final_result: i32 = 0; test "coroutine await" { await_seq('a'); - const p = async await_amain(); + var p = async await_amain(); await_seq('f'); resume await_a_promise; await_seq('i'); @@ -210,7 +210,7 @@ test "coroutine await" { } async fn await_amain() void { await_seq('b'); - const p = async await_another(); + var p = async await_another(); await_seq('e'); await_final_result = await p; await_seq('h'); @@ -237,14 +237,14 @@ var early_final_result: i32 = 0; test "coroutine await early return" { early_seq('a'); - const p = async early_amain(); + var p = async early_amain(); early_seq('f'); expect(early_final_result == 1234); expect(std.mem.eql(u8, early_points, "abcdef")); } async fn early_amain() void { early_seq('b'); - const p = async early_another(); + var p = async early_another(); early_seq('d'); early_final_result = await p; early_seq('e'); diff --git a/test/stage1/behavior/await_struct.zig b/test/stage1/behavior/await_struct.zig index a649b0a39b..6e4d330ea3 100644 --- a/test/stage1/behavior/await_struct.zig +++ b/test/stage1/behavior/await_struct.zig @@ -11,7 +11,7 @@ var await_final_result = Foo{ .x = 0 }; test "coroutine await struct" { await_seq('a'); - const p = async await_amain(); + var p = async await_amain(); await_seq('f'); resume await_a_promise; await_seq('i'); @@ -20,7 +20,7 @@ test "coroutine await struct" { } async fn await_amain() void { await_seq('b'); - const p = async await_another(); + var p = async await_another(); await_seq('e'); await_final_result = await p; await_seq('h'); -- cgit v1.2.3