From 54e716afdcb0609cfc42229ad925e6dc9b07a66f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 27 Jun 2019 23:40:36 -0400
Subject: remove coroutines implementation and promise type
---
src/tokenizer.cpp | 2 --
1 file changed, 2 deletions(-)
(limited to 'src/tokenizer.cpp')
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 783b6e0e20..0869c3ba9c 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -136,7 +136,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"or", TokenIdKeywordOr},
{"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
- {"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
{"resume", TokenIdKeywordResume},
{"return", TokenIdKeywordReturn},
@@ -1558,7 +1557,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordOr: return "or";
case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
- case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
case TokenIdKeywordReturn: return "return";
case TokenIdKeywordLinkSection: return "linksection";
--
cgit v1.2.3
From ee64a22045ccbc39773779d4e386e25f563c8a90 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 26 Jul 2019 19:52:35 -0400
Subject: add the `anyframe` and `anyframe->T` types
---
BRANCH_TODO | 6 +-
src/all_types.hpp | 21 +++++++
src/analyze.cpp | 111 ++++++++++++++++++++++++++++++++++++-
src/analyze.hpp | 1 +
src/ast_render.cpp | 10 ++++
src/codegen.cpp | 16 +++++-
src/ir.cpp | 86 +++++++++++++++++++++++++++-
src/ir_print.cpp | 12 ++++
src/parser.cpp | 22 +++++++-
src/tokenizer.cpp | 2 +
src/tokenizer.hpp | 1 +
std/hash_map.zig | 1 +
std/testing.zig | 1 +
std/zig/ast.zig | 16 +++---
std/zig/parse.zig | 40 ++++++-------
std/zig/parser_test.zig | 4 +-
std/zig/render.zig | 10 ++--
std/zig/tokenizer.zig | 4 +-
test/stage1/behavior/type_info.zig | 23 +++++++-
19 files changed, 337 insertions(+), 50 deletions(-)
(limited to 'src/tokenizer.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index d10bc704d8..e2c4fec436 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,6 +1,4 @@
- * reimplement @frameSize with Prefix Data
- * reimplement with function splitting rather than switch
- * add the `anyframe` type and `anyframe->T`
+ * make the anyframe type and anyframe->T type work with resume
* await
* await of a non async function
* await in single-threaded mode
@@ -12,3 +10,5 @@
* implicit cast of normal function to async function should be allowed when it is inferred to be async
* go over the commented out tests
* revive std.event.Loop
+ * reimplement with function splitting rather than switch
+ * @typeInfo for @Frame(func)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index c9bdfabb0d..1096feade0 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -479,6 +479,7 @@ enum NodeType {
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
+ NodeTypeAnyFrameType,
NodeTypeEnumLiteral,
};
@@ -936,6 +937,10 @@ struct AstNodeSuspend {
AstNode *block;
};
+struct AstNodeAnyFrameType {
+ AstNode *payload_type; // can be NULL
+};
+
struct AstNodeEnumLiteral {
Token *period;
Token *identifier;
@@ -1001,6 +1006,7 @@ struct AstNode {
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
+ AstNodeAnyFrameType anyframe_type;
AstNodeEnumLiteral enum_literal;
} data;
};
@@ -1253,6 +1259,7 @@ enum ZigTypeId {
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
ZigTypeIdCoroFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1272,6 +1279,10 @@ struct ZigTypeCoroFrame {
ZigType *locals_struct;
};
+struct ZigTypeAnyFrame {
+ ZigType *result_type; // null if `anyframe` instead of `anyframe->T`
+};
+
struct ZigType {
ZigTypeId id;
Buf name;
@@ -1298,11 +1309,13 @@ struct ZigType {
ZigTypeVector vector;
ZigTypeOpaque opaque;
ZigTypeCoroFrame frame;
+ ZigTypeAnyFrame any_frame;
} data;
// use these fields to make sure we don't duplicate type table entries for the same type
ZigType *pointer_parent[2]; // [0 - mut, 1 - const]
ZigType *optional_parent;
+ ZigType *any_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
// The type of this is array
ConstExprValue *cached_const_name_val;
@@ -1781,6 +1794,7 @@ struct CodeGen {
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
ZigType *entry_frame_header;
+ ZigType *entry_any_frame;
} builtin_types;
ZigType *align_amt_type;
ZigType *stack_trace_type;
@@ -2208,6 +2222,7 @@ enum IrInstructionId {
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
+ IrInstructionIdAnyFrameType,
IrInstructionIdSliceType,
IrInstructionIdGlobalAsm,
IrInstructionIdAsm,
@@ -2709,6 +2724,12 @@ struct IrInstructionPtrType {
bool is_allow_zero;
};
+struct IrInstructionAnyFrameType {
+ IrInstruction base;
+
+ IrInstruction *payload_type;
+};
+
struct IrInstructionSliceType {
IrInstruction base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e1fedab7cf..e47be8f14c 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -256,6 +256,7 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return nullptr;
}
zig_unreachable();
@@ -322,6 +323,7 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return true;
}
zig_unreachable();
@@ -354,6 +356,31 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) {
+ if (result_type != nullptr && result_type->any_frame_parent != nullptr) {
+ return result_type->any_frame_parent;
+ } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) {
+ return g->builtin_types.entry_any_frame;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame);
+ entry->abi_size = g->builtin_types.entry_usize->abi_size;
+ entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
+ entry->abi_align = g->builtin_types.entry_usize->abi_align;
+ entry->data.any_frame.result_type = result_type;
+ buf_init_from_str(&entry->name, "anyframe");
+ if (result_type != nullptr) {
+ buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
+ }
+
+ if (result_type != nullptr) {
+ result_type->any_frame_parent = entry;
+ } else if (result_type == nullptr) {
+ g->builtin_types.entry_any_frame = entry;
+ }
+ return entry;
+}
+
static const char *ptr_len_to_star_str(PtrLen ptr_len) {
switch (ptr_len) {
case PtrLenSingle:
@@ -1080,6 +1107,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1169,6 +1197,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdArgTuple:
case ZigTypeIdVoid:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1340,6 +1369,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1436,6 +1466,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -2997,6 +3028,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
case NodeTypeEnumLiteral:
+ case NodeTypeAnyFrameType:
zig_unreachable();
}
}
@@ -3049,6 +3081,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return type_entry;
}
zig_unreachable();
@@ -3550,6 +3583,7 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
}
zig_unreachable();
@@ -3607,6 +3641,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
zig_unreachable();
@@ -3615,11 +3650,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
+ if (type->id == ZigTypeIdAnyFrame) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
+ if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3635,6 +3672,13 @@ bool type_is_nonnull_ptr(ZigType *type) {
return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type);
}
+static uint32_t get_coro_frame_align_bytes(CodeGen *g) {
+ uint32_t a = g->pointer_size_bytes * 2;
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (a < 8) a = 8;
+ return a;
+}
+
uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
ZigType *ptr_type = get_src_ptr_type(type);
if (ptr_type->id == ZigTypeIdPointer) {
@@ -3646,6 +3690,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return get_coro_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3657,6 +3703,8 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return true;
} else {
zig_unreachable();
}
@@ -4153,6 +4201,7 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdEnum:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
@@ -4404,6 +4453,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdCoroFrame:
// TODO better hashing algorithm
return 675741936;
+ case ZigTypeIdAnyFrame:
+ // TODO better hashing algorithm
+ return 3747294894;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4469,6 +4521,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdPointer:
@@ -4541,6 +4594,7 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdPointer:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdArray:
@@ -4673,6 +4727,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdErrorUnion:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4761,6 +4816,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5433,6 +5489,8 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return true;
case ZigTypeIdCoroFrame:
zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5786,7 +5844,11 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
}
case ZigTypeIdCoroFrame:
- buf_appendf(buf, "(TODO: coroutine frame value)");
+ buf_appendf(buf, "(TODO: async function frame value)");
+ return;
+
+ case ZigTypeIdAnyFrame:
+ buf_appendf(buf, "(TODO: anyframe value)");
return;
}
@@ -5836,6 +5898,7 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5885,6 +5948,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -6051,6 +6115,7 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
ZigTypeIdCoroFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -6116,10 +6181,12 @@ size_t type_id_index(ZigType *entry) {
return 21;
case ZigTypeIdCoroFrame:
return 22;
- case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return 23;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 24;
+ case ZigTypeIdEnumLiteral:
+ return 25;
}
zig_unreachable();
}
@@ -6178,6 +6245,8 @@ const char *type_id_name(ZigTypeId id) {
return "Vector";
case ZigTypeIdCoroFrame:
return "Frame";
+ case ZigTypeIdAnyFrame:
+ return "AnyFrame";
}
zig_unreachable();
}
@@ -7398,6 +7467,40 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol
frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
}
+static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
+ if (any_frame_type->llvm_di_type != nullptr) return;
+
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
+ Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+
+ ZigType *frame_header_type;
+ if (result_type == nullptr || !type_has_bits(result_type)) {
+ const char *field_names[] = {"resume_index", "fn_ptr", "awaiter"};
+ ZigType *field_types[] = {
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ };
+ frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 3);
+ } else {
+ ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false);
+
+ const char *field_names[] = {"resume_index", "fn_ptr", "awaiter", "result_ptr", "result"};
+ ZigType *field_types[] = {
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ ptr_result_type,
+ result_type,
+ };
+ frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 5);
+ }
+
+ ZigType *ptr_type = get_pointer_to_type(g, frame_header_type, false);
+ any_frame_type->llvm_type = get_llvm_type(g, ptr_type);
+ any_frame_type->llvm_di_type = get_llvm_di_type(g, ptr_type);
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7460,6 +7563,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
}
case ZigTypeIdCoroFrame:
return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status);
+ case ZigTypeIdAnyFrame:
+ return resolve_llvm_types_any_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 47ff4344ba..3115c79b40 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -41,6 +41,7 @@ ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const c
ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
ZigType *field_types[], size_t field_count);
ZigType *get_test_fn_type(CodeGen *g);
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type);
bool handle_is_ptr(ZigType *type_entry);
bool type_has_bits(ZigType *type_entry);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index d97f58fdec..4d6bae311b 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -259,6 +259,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePointerType:
return "PointerType";
+ case NodeTypeAnyFrameType:
+ return "AnyFrameType";
case NodeTypeEnumLiteral:
return "EnumLiteral";
}
@@ -847,6 +849,14 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_ungrouped(ar, node->data.inferred_array_type.child_type);
break;
}
+ case NodeTypeAnyFrameType: {
+ fprintf(ar->f, "anyframe");
+ if (node->data.anyframe_type.payload_type != nullptr) {
+ fprintf(ar->f, "->");
+ render_node_grouped(ar, node->data.anyframe_type.payload_type);
+ }
+ break;
+ }
case NodeTypeErrorType:
fprintf(ar->f, "anyerror");
break;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 63018cb6a3..c666317c17 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4947,6 +4947,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdSwitchTarget:
@@ -5438,7 +5439,9 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
return val;
}
case ZigTypeIdCoroFrame:
- zig_panic("TODO bit pack a coroutine frame");
+ zig_panic("TODO bit pack an async function frame");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO bit pack an anyframe");
}
zig_unreachable();
}
@@ -5961,6 +5964,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
zig_unreachable();
case ZigTypeIdCoroFrame:
zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
}
zig_unreachable();
}
@@ -7176,6 +7181,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" ArgTuple: void,\n"
" Opaque: void,\n"
" Frame: void,\n"
+ " AnyFrame: AnyFrame,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -7291,6 +7297,10 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" args: []FnArg,\n"
" };\n"
"\n"
+ " pub const AnyFrame = struct {\n"
+ " child: ?type,\n"
+ " };\n"
+ "\n"
" pub const Vector = struct {\n"
" len: comptime_int,\n"
" child: type,\n"
@@ -8448,6 +8458,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -8632,6 +8643,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
}
@@ -8800,7 +8812,9 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
+
case ZigTypeIdEnum:
if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
fprintf(out_h, "enum %s {\n", buf_ptr(type_h_name(type_entry)));
diff --git a/src/ir.cpp b/src/ir.cpp
index 7a5af347b7..e6d987a2ee 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -303,6 +303,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdErrorSet:
case ZigTypeIdOpaque:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdFloat:
return a->data.floating.bit_count == b->data.floating.bit_count;
@@ -563,6 +564,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) {
return IrInstructionIdArrayType;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAnyFrameType *) {
+ return IrInstructionIdAnyFrameType;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) {
return IrInstructionIdSliceType;
}
@@ -1696,6 +1701,16 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *payload_type)
+{
+ IrInstructionAnyFrameType *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->payload_type = payload_type;
+
+ if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
+
+ return &instruction->base;
+}
static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero)
{
@@ -6515,6 +6530,22 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n
}
}
+static IrInstruction *ir_gen_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeAnyFrameType);
+
+ AstNode *payload_type_node = node->data.anyframe_type.payload_type;
+ IrInstruction *payload_type_value = nullptr;
+
+ if (payload_type_node != nullptr) {
+ payload_type_value = ir_gen_node(irb, payload_type_node, scope);
+ if (payload_type_value == irb->codegen->invalid_instruction)
+ return payload_type_value;
+
+ }
+
+ return ir_build_anyframe_type(irb, scope, node, payload_type_value);
+}
+
static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeUndefinedLiteral);
return ir_build_const_undefined(irb, scope, node);
@@ -7884,6 +7915,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
+ case NodeTypeAnyFrameType:
+ return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
case NodeTypeStringLiteral:
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
case NodeTypeUndefinedLiteral:
@@ -12775,6 +12808,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdArgTuple:
case ZigTypeIdEnum:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdAnyFrame:
operator_allowed = is_equality_cmp;
break;
@@ -14155,6 +14189,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -14180,6 +14215,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdOpaque:
case ZigTypeIdEnumLiteral:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
break;
@@ -15720,7 +15756,9 @@ static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
+
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -17443,6 +17481,20 @@ static IrInstruction *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_instruction_any_frame_type(IrAnalyze *ira,
+ IrInstructionAnyFrameType *instruction)
+{
+ ZigType *payload_type = nullptr;
+ if (instruction->payload_type != nullptr) {
+ payload_type = ir_resolve_type(ira, instruction->payload_type->child);
+ if (type_is_invalid(payload_type))
+ return ira->codegen->invalid_instruction;
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type);
+ return ir_const_type(ira, &instruction->base, any_frame_type);
+}
+
static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
IrInstructionSliceType *slice_type_instruction)
{
@@ -17492,6 +17544,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
ResolveStatus needed_status = (align_bytes == 0) ?
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
@@ -17607,6 +17660,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->invalid_instruction;
@@ -17658,6 +17712,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
@@ -18222,6 +18277,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, &switch_target_instruction->base,
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->invalid_instruction;
@@ -19656,6 +19712,22 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
+ case ZigTypeIdAnyFrame: {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr);
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // child: ?type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ fields[0].data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr :
+ create_const_type(ira->codegen, type_entry->data.any_frame.result_type);
+ break;
+ }
case ZigTypeIdEnum:
{
result = create_const_vals(1);
@@ -20062,7 +20134,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
case ZigTypeIdCoroFrame:
- zig_panic("TODO @typeInfo for coro frames");
+ zig_panic("TODO @typeInfo for async function frames");
}
assert(result != nullptr);
@@ -21852,6 +21924,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry);
return ir_const_unsigned(ira, &instruction->base, align_in_bytes);
@@ -23004,7 +23077,9 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case ZigTypeIdUnion:
zig_panic("TODO buf_write_value_bytes union type");
case ZigTypeIdCoroFrame:
- zig_panic("TODO buf_write_value_bytes coro frame type");
+ zig_panic("TODO buf_write_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_write_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -23185,7 +23260,9 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
case ZigTypeIdUnion:
zig_panic("TODO buf_read_value_bytes union type");
case ZigTypeIdCoroFrame:
- zig_panic("TODO buf_read_value_bytes coro frame type");
+ zig_panic("TODO buf_read_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_read_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24327,6 +24404,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction);
case IrInstructionIdSetFloatMode:
return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction);
+ case IrInstructionIdAnyFrameType:
+ return ir_analyze_instruction_any_frame_type(ira, (IrInstructionAnyFrameType *)instruction);
case IrInstructionIdSliceType:
return ir_analyze_instruction_slice_type(ira, (IrInstructionSliceType *)instruction);
case IrInstructionIdGlobalAsm:
@@ -24707,6 +24786,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdStructFieldPtr:
case IrInstructionIdArrayType:
case IrInstructionIdSliceType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
case IrInstructionIdOptionalUnwrapPtr:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index ae467bdc8c..284ebed2f3 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -471,6 +471,15 @@ static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instructio
ir_print_other_instruction(irp, instruction->child_type);
}
+static void ir_print_any_frame_type(IrPrint *irp, IrInstructionAnyFrameType *instruction) {
+ if (instruction->payload_type == nullptr) {
+ fprintf(irp->f, "anyframe");
+ } else {
+ fprintf(irp->f, "anyframe->");
+ ir_print_other_instruction(irp, instruction->payload_type);
+ }
+}
+
static void ir_print_global_asm(IrPrint *irp, IrInstructionGlobalAsm *instruction) {
fprintf(irp->f, "asm(\"%s\")", buf_ptr(instruction->asm_code));
}
@@ -1629,6 +1638,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdSliceType:
ir_print_slice_type(irp, (IrInstructionSliceType *)instruction);
break;
+ case IrInstructionIdAnyFrameType:
+ ir_print_any_frame_type(irp, (IrInstructionAnyFrameType *)instruction);
+ break;
case IrInstructionIdGlobalAsm:
ir_print_global_asm(irp, (IrInstructionGlobalAsm *)instruction);
break;
diff --git a/src/parser.cpp b/src/parser.cpp
index b1a593d9c9..82312aacf3 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -282,6 +282,9 @@ static AstNode *ast_parse_prefix_op_expr(
case NodeTypeAwaitExpr:
right = &prefix->data.await_expr.expr;
break;
+ case NodeTypeAnyFrameType:
+ right = &prefix->data.anyframe_type.payload_type;
+ break;
case NodeTypeArrayType:
right = &prefix->data.array_type.child_type;
break;
@@ -1640,6 +1643,10 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (null != nullptr)
return ast_create_node(pc, NodeTypeNullLiteral, null);
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr)
+ return ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
+
Token *true_token = eat_token_if(pc, TokenIdKeywordTrue);
if (true_token != nullptr) {
AstNode *res = ast_create_node(pc, NodeTypeBoolLiteral, true_token);
@@ -2510,7 +2517,7 @@ static AstNode *ast_parse_prefix_op(ParseContext *pc) {
// PrefixTypeOp
// <- QUESTIONMARK
-// / KEYWORD_promise MINUSRARROW
+// / KEYWORD_anyframe MINUSRARROW
// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile)*
// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile)*
static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
@@ -2521,6 +2528,16 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
return res;
}
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr) {
+ if (eat_token_if(pc, TokenIdArrow) != nullptr) {
+ AstNode *res = ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
+ return res;
+ }
+
+ put_back_token(pc);
+ }
+
AstNode *array = ast_parse_array_type_start(pc);
if (array != nullptr) {
assert(array->type == NodeTypeArrayType);
@@ -3005,6 +3022,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeInferredArrayType:
visit_field(&node->data.array_type.child_type, visit, context);
break;
+ case NodeTypeAnyFrameType:
+ visit_field(&node->data.anyframe_type.payload_type, visit, context);
+ break;
case NodeTypeErrorType:
// none
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 0869c3ba9c..38c6c7153e 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -109,6 +109,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"align", TokenIdKeywordAlign},
{"allowzero", TokenIdKeywordAllowZero},
{"and", TokenIdKeywordAnd},
+ {"anyframe", TokenIdKeywordAnyFrame},
{"asm", TokenIdKeywordAsm},
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
@@ -1533,6 +1534,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
+ case TokenIdKeywordAnyFrame: return "anyframe";
case TokenIdKeywordAsm: return "asm";
case TokenIdKeywordBreak: return "break";
case TokenIdKeywordCatch: return "catch";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 253e0bd1e5..98bdfea907 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -53,6 +53,7 @@ enum TokenId {
TokenIdKeywordAlign,
TokenIdKeywordAllowZero,
TokenIdKeywordAnd,
+ TokenIdKeywordAnyFrame,
TokenIdKeywordAsm,
TokenIdKeywordAsync,
TokenIdKeywordAwait,
diff --git a/std/hash_map.zig b/std/hash_map.zig
index bdd6cc7519..431fbb35ab 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -540,6 +540,7 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type
.Undefined,
.ArgTuple,
.Frame,
+ .AnyFrame,
=> @compileError("cannot hash this type"),
.Void,
diff --git a/std/testing.zig b/std/testing.zig
index 3c4772cf37..7f347b0c24 100644
--- a/std/testing.zig
+++ b/std/testing.zig
@@ -30,6 +30,7 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
.ArgTuple,
.Opaque,
.Frame,
+ .AnyFrame,
=> @compileError("value of type " ++ @typeName(@typeOf(actual)) ++ " encountered"),
.Undefined,
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 38bd94339f..475a0e4e13 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -400,7 +400,7 @@ pub const Node = struct {
VarType,
ErrorType,
FnProto,
- PromiseType,
+ AnyFrameType,
// Primary expressions
IntegerLiteral,
@@ -952,9 +952,9 @@ pub const Node = struct {
}
};
- pub const PromiseType = struct {
+ pub const AnyFrameType = struct {
base: Node,
- promise_token: TokenIndex,
+ anyframe_token: TokenIndex,
result: ?Result,
pub const Result = struct {
@@ -962,7 +962,7 @@ pub const Node = struct {
return_type: *Node,
};
- pub fn iterate(self: *PromiseType, index: usize) ?*Node {
+ pub fn iterate(self: *AnyFrameType, index: usize) ?*Node {
var i = index;
if (self.result) |result| {
@@ -973,13 +973,13 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *const PromiseType) TokenIndex {
- return self.promise_token;
+ pub fn firstToken(self: *const AnyFrameType) TokenIndex {
+ return self.anyframe_token;
}
- pub fn lastToken(self: *const PromiseType) TokenIndex {
+ pub fn lastToken(self: *const AnyFrameType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
- return self.promise_token;
+ return self.anyframe_token;
}
};
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 59acf99890..600178cdce 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -1201,7 +1201,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / KEYWORD_error DOT IDENTIFIER
/// / KEYWORD_false
/// / KEYWORD_null
-/// / KEYWORD_promise
+/// / KEYWORD_anyframe
/// / KEYWORD_true
/// / KEYWORD_undefined
/// / KEYWORD_unreachable
@@ -1256,11 +1256,11 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
if (eatToken(it, .Keyword_false)) |token| return createLiteral(arena, Node.BoolLiteral, token);
if (eatToken(it, .Keyword_null)) |token| return createLiteral(arena, Node.NullLiteral, token);
- if (eatToken(it, .Keyword_promise)) |token| {
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
+ if (eatToken(it, .Keyword_anyframe)) |token| {
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
.result = null,
};
return &node.base;
@@ -2194,7 +2194,7 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// PrefixTypeOp
/// <- QUESTIONMARK
-/// / KEYWORD_promise MINUSRARROW
+/// / KEYWORD_anyframe MINUSRARROW
/// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
@@ -2209,20 +2209,20 @@ fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
- // TODO: Returning a PromiseType instead of PrefixOp makes casting and setting .rhs or
+ // TODO: Returning a AnyFrameType instead of PrefixOp makes casting and setting .rhs or
// .return_type more difficult for the caller (see parsePrefixOpExpr helper).
- // Consider making the PromiseType a member of PrefixOp and add a
- // PrefixOp.PromiseType variant?
- if (eatToken(it, .Keyword_promise)) |token| {
+ // Consider making the AnyFrameType a member of PrefixOp and add a
+ // PrefixOp.AnyFrameType variant?
+ if (eatToken(it, .Keyword_anyframe)) |token| {
const arrow = eatToken(it, .Arrow) orelse {
putBackToken(it, token);
return null;
};
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
- .result = Node.PromiseType.Result{
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
+ .result = Node.AnyFrameType.Result{
.arrow_token = arrow,
.return_type = undefined, // set by caller
},
@@ -2903,8 +2903,8 @@ fn parsePrefixOpExpr(
rightmost_op = rhs;
} else break;
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
if (try opParseFn(arena, it, tree)) |rhs| {
prom.result.?.return_type = rhs;
rightmost_op = rhs;
@@ -2922,8 +2922,8 @@ fn parsePrefixOpExpr(
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
prom.result.?.return_type = try expectNode(arena, it, tree, childParseFn, AstError{
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index f6f3363bf6..28cde6de01 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -2111,12 +2111,12 @@ test "zig fmt: coroutines" {
\\ suspend;
\\ x += 1;
\\ suspend;
- \\ const p: promise->void = async simpleAsyncFn() catch unreachable;
+ \\ const p: anyframe->void = async simpleAsyncFn() catch unreachable;
\\ await p;
\\}
\\
\\test "coroutine suspend, resume, cancel" {
- \\ const p: promise = try async testAsyncSeq();
+ \\ const p: anyframe = try async testAsyncSeq();
\\ resume p;
\\ cancel p;
\\}
diff --git a/std/zig/render.zig b/std/zig/render.zig
index b85c11c6ac..c6bb51267d 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -1205,15 +1205,15 @@ fn renderExpression(
}
},
- ast.Node.Id.PromiseType => {
- const promise_type = @fieldParentPtr(ast.Node.PromiseType, "base", base);
+ ast.Node.Id.AnyFrameType => {
+ const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base);
- if (promise_type.result) |result| {
- try renderToken(tree, stream, promise_type.promise_token, indent, start_col, Space.None); // promise
+ if (anyframe_type.result) |result| {
+ try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe
try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // ->
return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space);
} else {
- return renderToken(tree, stream, promise_type.promise_token, indent, start_col, space); // promise
+ return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe
}
},
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 4539e1e5b2..9de20c39f2 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -15,6 +15,7 @@ pub const Token = struct {
Keyword{ .bytes = "align", .id = Id.Keyword_align },
Keyword{ .bytes = "allowzero", .id = Id.Keyword_allowzero },
Keyword{ .bytes = "and", .id = Id.Keyword_and },
+ Keyword{ .bytes = "anyframe", .id = Id.Keyword_anyframe },
Keyword{ .bytes = "asm", .id = Id.Keyword_asm },
Keyword{ .bytes = "async", .id = Id.Keyword_async },
Keyword{ .bytes = "await", .id = Id.Keyword_await },
@@ -42,7 +43,6 @@ pub const Token = struct {
Keyword{ .bytes = "or", .id = Id.Keyword_or },
Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
- Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
Keyword{ .bytes = "resume", .id = Id.Keyword_resume },
Keyword{ .bytes = "return", .id = Id.Keyword_return },
@@ -174,7 +174,7 @@ pub const Token = struct {
Keyword_or,
Keyword_orelse,
Keyword_packed,
- Keyword_promise,
+ Keyword_anyframe,
Keyword_pub,
Keyword_resume,
Keyword_return,
diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig
index 6a51015124..b86ba27c13 100644
--- a/test/stage1/behavior/type_info.zig
+++ b/test/stage1/behavior/type_info.zig
@@ -177,11 +177,11 @@ fn testUnion() void {
expect(TypeId(typeinfo_info) == TypeId.Union);
expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
expect(typeinfo_info.Union.tag_type.? == TypeId);
- expect(typeinfo_info.Union.fields.len == 25);
+ expect(typeinfo_info.Union.fields.len == 26);
expect(typeinfo_info.Union.fields[4].enum_field != null);
expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
- expect(typeinfo_info.Union.decls.len == 20);
+ expect(typeinfo_info.Union.decls.len == 21);
const TestNoTagUnion = union {
Foo: void,
@@ -280,6 +280,25 @@ fn testVector() void {
expect(vec_info.Vector.child == i32);
}
+test "type info: anyframe and anyframe->T" {
+ testAnyFrame();
+ comptime testAnyFrame();
+}
+
+fn testAnyFrame() void {
+ {
+ const anyframe_info = @typeInfo(anyframe->i32);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child.? == i32);
+ }
+
+ {
+ const anyframe_info = @typeInfo(anyframe);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child == null);
+ }
+}
+
test "type info: optional field unwrapping" {
const Struct = struct {
cdOffset: u32,
--
cgit v1.2.3
From 13b5a4bf8ca65c569e6b28ca0e41d101d12d0ff1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 15 Aug 2019 14:05:12 -0400
Subject: remove `cancel`
---
doc/docgen.zig | 1 -
doc/langref.html.in | 14 +-
src-self-hosted/main.zig | 3 -
src/all_types.hpp | 33 +--
src/analyze.cpp | 53 +++--
src/analyze.hpp | 2 +-
src/ast_render.cpp | 8 -
src/codegen.cpp | 412 +++++++++++++++-----------------------
src/ir.cpp | 230 +++------------------
src/ir_print.cpp | 24 ---
src/parser.cpp | 12 --
src/tokenizer.cpp | 2 -
src/tokenizer.hpp | 1 -
std/event/fs.zig | 2 +-
std/event/future.zig | 2 +-
std/event/group.zig | 20 +-
std/event/net.zig | 8 +-
std/zig/parse.zig | 15 --
std/zig/parser_test.zig | 4 +-
std/zig/tokenizer.zig | 2 -
test/compile_errors.zig | 8 +-
test/stage1/behavior.zig | 1 -
test/stage1/behavior/async_fn.zig | 90 ++-------
test/stage1/behavior/cancel.zig | 115 -----------
24 files changed, 256 insertions(+), 806 deletions(-)
delete mode 100644 test/stage1/behavior/cancel.zig
(limited to 'src/tokenizer.cpp')
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 92764d7642..458b97d2c0 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -750,7 +750,6 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
.Keyword_async,
.Keyword_await,
.Keyword_break,
- .Keyword_cancel,
.Keyword_catch,
.Keyword_comptime,
.Keyword_const,
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 23e4dd194e..0f964373c5 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -5971,7 +5971,7 @@ test "global assembly" {
{#header_open|Async Functions#}
An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation,
- followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled.
+ followed by an {#syntax#}await{#endsyntax#} completion.
When you call a function, it creates a stack frame,
@@ -6013,11 +6013,11 @@ test "global assembly" {
The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#}
is the return type of the async function. Once a promise has been created, it must be
- consumed, either with {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}:
+ consumed with {#syntax#}await{#endsyntax#}:
Async functions start executing when created, so in the following example, the entire
- async function completes before it is canceled:
+ TODO
{#code_begin|test#}
const std = @import("std");
@@ -6048,7 +6048,7 @@ fn simpleAsyncFn() void {
When an async function suspends itself, it must be sure that it will be
- resumed or canceled somehow, for example by registering its promise handle
+ resumed somehow, for example by registering its promise handle
in an event loop. Use a suspend capture block to gain access to the
promise (TODO this is outdated):
@@ -6134,7 +6134,7 @@ async fn testResumeFromSuspend(my_result: *i32) void {
resumes the awaiter.
- A promise handle must be consumed exactly once after it is created, either by {#syntax#}cancel{#endsyntax#} or {#syntax#}await{#endsyntax#}.
+ A frame handle must be consumed exactly once after it is created with {#syntax#}await{#endsyntax#}.
{#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#},
@@ -9764,7 +9764,6 @@ PrimaryExpr
<- AsmExpr
/ IfExpr
/ KEYWORD_break BreakLabel? Expr?
- / KEYWORD_cancel Expr
/ KEYWORD_comptime Expr
/ KEYWORD_continue BreakLabel?
/ KEYWORD_resume Expr
@@ -10120,7 +10119,6 @@ KEYWORD_asm <- 'asm' end_of_word
KEYWORD_async <- 'async' end_of_word
KEYWORD_await <- 'await' end_of_word
KEYWORD_break <- 'break' end_of_word
-KEYWORD_cancel <- 'cancel' end_of_word
KEYWORD_catch <- 'catch' end_of_word
KEYWORD_comptime <- 'comptime' end_of_word
KEYWORD_const <- 'const' end_of_word
@@ -10165,7 +10163,7 @@ KEYWORD_volatile <- 'volatile' end_of_word
KEYWORD_while <- 'while' end_of_word
keyword <- KEYWORD_align / KEYWORD_and / KEYWORD_allowzero / KEYWORD_asm
- / KEYWORD_async / KEYWORD_await / KEYWORD_break / KEYWORD_cancel
+ / KEYWORD_async / KEYWORD_await / KEYWORD_break
/ KEYWORD_catch / KEYWORD_comptime / KEYWORD_const / KEYWORD_continue
/ KEYWORD_defer / KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer
/ KEYWORD_error / KEYWORD_export / KEYWORD_extern / KEYWORD_false
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index bc5d078950..5136b32735 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -467,7 +467,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.start();
// TODO const process_build_events_handle = try async processBuildEvents(comp, color);
- defer cancel process_build_events_handle;
loop.run();
}
@@ -579,7 +578,6 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
defer zig_compiler.deinit();
// TODO const handle = try async findLibCAsync(&zig_compiler);
- defer cancel handle;
loop.run();
}
@@ -669,7 +667,6 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
// TODO &flags,
// TODO color,
// TODO );
- defer cancel main_handle;
loop.run();
return result;
}
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 22e38b9f0c..f1c699ba10 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -476,7 +476,6 @@ enum NodeType {
NodeTypeIfErrorExpr,
NodeTypeIfOptional,
NodeTypeErrorSetDecl,
- NodeTypeCancel,
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
@@ -911,10 +910,6 @@ struct AstNodeBreakExpr {
AstNode *expr; // may be null
};
-struct AstNodeCancelExpr {
- AstNode *expr;
-};
-
struct AstNodeResumeExpr {
AstNode *expr;
};
@@ -1003,7 +998,6 @@ struct AstNode {
AstNodeInferredArrayType inferred_array_type;
AstNodeErrorType error_type;
AstNodeErrorSetDecl err_set_decl;
- AstNodeCancelExpr cancel_expr;
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
@@ -1561,7 +1555,6 @@ enum PanicMsgId {
PanicMsgIdBadAwait,
PanicMsgIdBadReturn,
PanicMsgIdResumedAnAwaitingFn,
- PanicMsgIdResumedACancelingFn,
PanicMsgIdFrameTooSmall,
PanicMsgIdResumedFnPendingAwait,
@@ -1729,8 +1722,6 @@ struct CodeGen {
LLVMValueRef cur_async_switch_instr;
LLVMValueRef cur_async_resume_index_ptr;
LLVMValueRef cur_async_awaiter_ptr;
- LLVMValueRef cur_async_prev_val;
- LLVMValueRef cur_async_prev_val_field_ptr;
LLVMBasicBlockRef cur_preamble_llvm_block;
size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
@@ -1822,7 +1813,6 @@ struct CodeGen {
ZigType *align_amt_type;
ZigType *stack_trace_type;
- ZigType *ptr_to_stack_trace_type;
ZigType *err_tag_type;
ZigType *test_fn_type;
@@ -1892,7 +1882,6 @@ struct CodeGen {
bool system_linker_hack;
bool reported_bad_link_libc_error;
bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl.
- bool cur_is_after_return;
//////////////////////////// Participates in Input Parameter Cache Hash
/////// Note: there is a separate cache hash for builtin.zig, when adding fields,
@@ -2235,7 +2224,6 @@ enum IrInstructionId {
IrInstructionIdCallGen,
IrInstructionIdConst,
IrInstructionIdReturn,
- IrInstructionIdReturnBegin,
IrInstructionIdCast,
IrInstructionIdResizeSlice,
IrInstructionIdContainerInitList,
@@ -2345,7 +2333,6 @@ enum IrInstructionId {
IrInstructionIdExport,
IrInstructionIdErrorReturnTrace,
IrInstructionIdErrorUnion,
- IrInstructionIdCancel,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
IrInstructionIdSaveErrRetAddr,
@@ -2370,7 +2357,6 @@ enum IrInstructionId {
IrInstructionIdAwaitSrc,
IrInstructionIdAwaitGen,
IrInstructionIdResume,
- IrInstructionIdTestCancelRequested,
IrInstructionIdSpillBegin,
IrInstructionIdSpillEnd,
};
@@ -2649,12 +2635,6 @@ struct IrInstructionReturn {
IrInstruction *operand;
};
-struct IrInstructionReturnBegin {
- IrInstruction base;
-
- IrInstruction *operand;
-};
-
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@@ -3440,12 +3420,6 @@ struct IrInstructionErrorUnion {
IrInstruction *payload;
};
-struct IrInstructionCancel {
- IrInstruction base;
-
- IrInstruction *frame;
-};
-
struct IrInstructionAtomicRmw {
IrInstruction base;
@@ -3647,10 +3621,6 @@ struct IrInstructionResume {
IrInstruction *frame;
};
-struct IrInstructionTestCancelRequested {
- IrInstruction base;
-};
-
enum SpillId {
SpillIdInvalid,
SpillIdRetErrCode,
@@ -3756,8 +3726,7 @@ static const size_t err_union_err_index = 1;
static const size_t frame_fn_ptr_index = 0;
static const size_t frame_resume_index = 1;
static const size_t frame_awaiter_index = 2;
-static const size_t frame_prev_val_index = 3;
-static const size_t frame_ret_start = 4;
+static const size_t frame_ret_start = 3;
// TODO https://github.com/ziglang/zig/issues/3056
// We require this to be a power of 2 so that we can use shifting rather than
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 1b6de6e7df..fc42abaf26 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g) {
+ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
g->stack_trace_type = stack_trace_type_val->data.x_type;
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
-
- g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false);
}
- return g->ptr_to_stack_trace_type;
+ return g->stack_trace_type;
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
@@ -3035,7 +3033,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeIfErrorExpr:
case NodeTypeIfOptional:
case NodeTypeErrorSetDecl:
- case NodeTypeCancel:
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
@@ -3822,11 +3819,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
} else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("await is a suspend point"));
- } else if (fn->inferred_async_node->type == NodeTypeCancel) {
- add_error_note(g, msg, fn->inferred_async_node,
- buf_sprintf("cancel is a suspend point"));
} else {
- zig_unreachable();
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
}
}
@@ -5231,12 +5226,21 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0});
fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
- fields.append({"@prev_val", g->builtin_types.entry_usize, 0});
fields.append({"@result_ptr_callee", ptr_return_type, 0});
fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
fields.append({"@result", fn_type_id->return_type, 0});
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0});
+
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
fields.items, fields.length, target_fn_align(g->zig_target));
frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
@@ -5311,14 +5315,15 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
fields.append({"@fn_ptr", fn_type, 0});
fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
- fields.append({"@prev_val", g->builtin_types.entry_usize, 0});
fields.append({"@result_ptr_callee", ptr_return_type, 0});
fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
fields.append({"@result", fn_type_id->return_type, 0});
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
- fields.append({"@ptr_stack_trace", get_ptr_to_stack_trace_type(g), 0});
+ ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0});
}
for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
@@ -5337,9 +5342,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
}
if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
- (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
-
- fields.append({"@stack_trace", g->stack_trace_type, 0});
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
fields.append({"@instruction_addresses",
get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
}
@@ -7553,7 +7556,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
fn_type->data.fn.gen_return_type = gen_return_type;
if (prefix_arg_error_return_trace && !is_async) {
- ZigType *gen_type = get_ptr_to_stack_trace_type(g);
+ ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
}
@@ -7727,7 +7730,6 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
field_types.append(ptr_fn_llvm_type); // fn_ptr
field_types.append(usize_type_ref); // resume_index
field_types.append(usize_type_ref); // awaiter
- field_types.append(usize_type_ref); // prev_val
bool have_result_type = result_type != nullptr && type_has_bits(result_type);
if (have_result_type) {
@@ -7735,7 +7737,9 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
field_types.append(get_llvm_type(g, result_type)); // result
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
- field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter
}
}
LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
@@ -7792,14 +7796,23 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g))));
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
}
};
diff --git a/src/analyze.hpp b/src/analyze.hpp
index e6336d3cdc..5752c74751 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -195,7 +195,7 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global
ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g);
+ZigType *get_stack_trace_type(CodeGen *g);
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index dd4d9cf646..334dc37b59 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -249,8 +249,6 @@ static const char *node_type_str(NodeType node_type) {
return "IfOptional";
case NodeTypeErrorSetDecl:
return "ErrorSetDecl";
- case NodeTypeCancel:
- return "Cancel";
case NodeTypeResume:
return "Resume";
case NodeTypeAwaitExpr:
@@ -1136,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "}");
break;
}
- case NodeTypeCancel:
- {
- fprintf(ar->f, "cancel ");
- render_node_grouped(ar, node->data.cancel_expr.expr);
- break;
- }
case NodeTypeResume:
{
fprintf(ar->f, "resume ");
diff --git a/src/codegen.cpp b/src/codegen.cpp
index e9f323dd0d..9bf7b0287b 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -28,8 +28,6 @@ enum ResumeId {
ResumeIdManual,
ResumeIdReturn,
ResumeIdCall,
-
- ResumeIdAwaitEarlyReturn // must be last
};
static void init_darwin_native(CodeGen *g) {
@@ -317,8 +315,9 @@ static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
- // [0] *StackTrace
- uint32_t trace_field_count = have_stack_trace ? 1 : 0;
+ // [0] *StackTrace (callee's)
+ // [1] *StackTrace (awaiter's)
+ uint32_t trace_field_count = have_stack_trace ? 2 : 0;
return frame_index_trace_arg(g, return_type) + trace_field_count;
}
@@ -916,8 +915,6 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("async function returned twice");
case PanicMsgIdResumedAnAwaitingFn:
return buf_create_from_str("awaiting function resumed");
- case PanicMsgIdResumedACancelingFn:
- return buf_create_from_str("canceling function resumed");
case PanicMsgIdFrameTooSmall:
return buf_create_from_str("frame too small");
case PanicMsgIdResumedFnPendingAwait:
@@ -946,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0));
}
+static ZigType *ptr_to_stack_trace_type(CodeGen *g) {
+ return get_pointer_to_type(g, get_stack_trace_type(g), false);
+}
+
static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) {
assert(g->panic_fn != nullptr);
LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn);
LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc);
if (stack_trace_arg == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
msg_arg,
@@ -1046,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return g->add_error_return_trace_addr_fn_val;
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
g->builtin_types.entry_usize->llvm_type,
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1127,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMTypeRef arg_types[] = {
// error return trace pointer
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false);
@@ -1205,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMTypeRef fn_type_ref;
if (g->have_err_ret_tracing) {
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, g->ptr_to_stack_trace_type),
+ get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)),
get_llvm_type(g, g->err_tag_type),
};
fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1321,14 +1321,7 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
- if (g->cur_err_ret_trace_val_arg != nullptr) {
- if (fn_is_async(g->cur_fn)) {
- return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
- } else {
- return g->cur_err_ret_trace_val_arg;
- }
- }
- return nullptr;
+ return g->cur_err_ret_trace_val_arg;
}
static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) {
@@ -1337,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
if (g->have_err_ret_tracing) {
LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope);
if (err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
err_ret_trace_val,
@@ -2044,8 +2036,8 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
assert(g->stack_trace_type != nullptr);
LLVMTypeRef param_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
@@ -2058,7 +2050,6 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
if (g->build_mode == BuildModeDebug) {
@@ -2075,7 +2066,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, entry_block);
ZigLLVMClearCurrentDebugLocation(g->builder);
- // if (dest_stack_trace == null) return;
+ // if (dest_stack_trace == null or src_stack_trace == null) return;
// var frame_index: usize = undefined;
// var frames_left: usize = undefined;
// if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
@@ -2093,7 +2084,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
// frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
// }
LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
- LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull");
+ LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull");
LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
@@ -2103,9 +2094,12 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr,
LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), "");
- LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block);
+ LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), "");
+ LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, "");
+ LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block);
- LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
+ LLVMPositionBuilderAtEnd(g->builder, non_null_block);
size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
@@ -2183,13 +2177,11 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- if (fn_is_async(g->cur_fn) && g->cur_fn->calls_or_awaits_errorable_fn &&
- codegen_fn_has_err_ret_tracing_arg(g, g->cur_fn->type_entry->data.fn.fn_type_id.return_type))
- {
- LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
- LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
- ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type), "");
+ LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr);
}
return nullptr;
@@ -2201,16 +2193,9 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
- LLVMValueRef ok_bit;
- if (resume_id == ResumeIdAwaitEarlyReturn) {
- LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
- LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false));
- ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, "");
- } else {
- LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
- LLVMConstInt(usize_type_ref, resume_id, false));
- ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
- }
+ LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false));
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
@@ -2219,36 +2204,19 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume
LLVMPositionBuilderAtEnd(g->builder, end_bb);
}
-static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr,
- ResumeId resume_id, LLVMValueRef arg_val)
-{
+static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (fn_val == nullptr) {
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, "");
fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
}
- if (arg_val == nullptr) {
- arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
- LLVMConstInt(usize_type_ref, resume_id, false), "");
- } else {
- assert(resume_id == ResumeIdAwaitEarlyReturn);
- }
+ LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false), "");
LLVMValueRef args[] = {target_frame_ptr, arg_val};
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
}
-static LLVMValueRef get_cur_async_prev_val(CodeGen *g) {
- if (g->cur_async_prev_val != nullptr) {
- return g->cur_async_prev_val;
- }
- g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, "");
- return g->cur_async_prev_val;
-}
-
static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
- // This becomes invalid when a suspend happens.
- g->cur_async_prev_val = nullptr;
-
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
size_t new_block_index = g->cur_resume_block_count;
@@ -2259,6 +2227,10 @@ static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
return resume_bb;
}
+static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
+ LLVMSetTailCall(call_inst, true);
+}
+
static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val,
LLVMAtomicOrdering order)
{
@@ -2282,32 +2254,32 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV
}
}
-static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
- IrInstructionReturnBegin *instruction)
-{
+static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr;
bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type);
- if (!fn_is_async(g->cur_fn)) {
- return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr;
- }
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ bool ret_type_has_bits = type_has_bits(ret_type);
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (operand_has_bits && instruction->operand != nullptr) {
- ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type);
if (need_store) {
- // It didn't get written to the result ptr. We do that now so that we do not have to spill
- // the return operand.
+ // It didn't get written to the result ptr. We do that now.
ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true);
gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand));
}
}
- // Prepare to be suspended. We might end up not having to suspend though.
- LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume");
+ // Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed.
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
+ LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
+ }
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+
LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr,
all_ones, LLVMAtomicOrderingAcquire);
@@ -2316,7 +2288,6 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
- LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder);
LLVMAddCase(switch_instr, zero, early_return_block);
LLVMAddCase(switch_instr, all_ones, bad_return_block);
@@ -2325,90 +2296,63 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
gen_assertion(g, PanicMsgIdBadReturn, &instruction->base);
- // The caller has not done an await yet. So we suspend at the return instruction, until a
- // cancel or await is performed.
+ // There is no awaiter yet, but we're completely done.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMBuildRetVoid(g->builder);
- // Add a safety check for when getting resumed by the awaiter.
- LLVMPositionBuilderAtEnd(g->builder, resume_bb);
- LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder);
- gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait,
- resume_them_block);
-
- // We need to resume the caller by tail calling them.
- // That will happen when rendering IrInstructionReturn after running the defers/errdefers.
- // We either got here from Entry (function call) or from the switch above
- g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, "");
- LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val };
- LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb };
- LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2);
-
- g->cur_is_after_return = true;
- LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr);
-
- if (!operand_has_bits) {
- return nullptr;
- }
-
- return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true));
-}
-
-static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
- LLVMSetTailCall(call_inst, true);
+ // We need to resume the caller by tail calling them,
+ // but first write through the result pointer and possibly
+ // error return trace pointer.
+ LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
+
+ if (ret_type_has_bits) {
+ // If the awaiter result pointer is non-null, we need to copy the result to there.
+ LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
+ LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
+ LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
+ LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_block);
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, ret_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ LLVMBuildBr(g->builder, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
+ if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type) + 1, "");
+ LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, "");
+ LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ }
+
+ // Resume the caller by tail calling them.
+ ZigType *any_frame_type = get_any_frame_type(g, ret_type);
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), "");
+ LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn);
+ set_tail_call_if_appropriate(g, call_inst);
+ LLVMBuildRetVoid(g->builder);
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
if (fn_is_async(g->cur_fn)) {
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
- bool ret_type_has_bits = type_has_bits(ret_type);
-
- if (ir_want_runtime_safety(g, &instruction->base)) {
- LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
- LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
- }
-
- if (ret_type_has_bits) {
- // If the awaiter result pointer is non-null, we need to copy the result to there.
- LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
- LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
- LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
- LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
- LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
- LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
- LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, copy_block);
- LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
- LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
- LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
- bool is_volatile = false;
- uint32_t abi_align = get_abi_alignment(g, ret_type);
- LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
- ZigLLVMBuildMemCpy(g->builder,
- dest_ptr_casted, abi_align,
- src_ptr_casted, abi_align, byte_count_val, is_volatile);
- LLVMBuildBr(g->builder, copy_end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
- }
-
- // We need to resume the caller by tail calling them.
- ZigType *any_frame_type = get_any_frame_type(g, ret_type);
- LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
- LLVMValueRef mask_val = LLVMConstNot(one);
- LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, "");
- LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val,
- get_llvm_type(g, any_frame_type), "");
- LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
- set_tail_call_if_appropriate(g, call_inst);
- LLVMBuildRetVoid(g->builder);
-
- g->cur_is_after_return = false;
-
+ gen_async_return(g, instruction);
return nullptr;
}
+
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
@@ -3893,6 +3837,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
// even if prefix_arg_err_ret_stack is true, let the async function do its own
// initialization.
} else {
+ // async function called as a normal function
+
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
@@ -3912,7 +3858,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (prefix_arg_err_ret_stack) {
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
- frame_index_trace_arg(g, src_return_type), "");
+ frame_index_trace_arg(g, src_return_type) + 1, "");
LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
}
@@ -4018,7 +3964,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
if (instruction->is_async) {
- gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
+ gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
if (instruction->new_stack != nullptr) {
return frame_result_loc;
}
@@ -4028,7 +3974,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
- LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
+ LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
@@ -4744,8 +4690,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
{
LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
if (cur_err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
return cur_err_ret_trace_val;
}
@@ -5505,60 +5450,6 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
-static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- LLVMValueRef zero = LLVMConstNull(usize_type_ref);
- LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
- src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node);
- ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type;
-
- LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
- LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume");
-
- // supply null for the awaiter return pointer (no copy needed)
- if (type_has_bits(result_type)) {
- LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
- LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))),
- awaiter_ret_ptr_ptr);
- }
-
- // supply null for the error return trace pointer
- if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
- LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
- frame_index_trace_arg(g, result_type), "");
- LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))),
- err_ret_trace_ptr_ptr);
- }
-
- LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
- LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, "");
- LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
-
- LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val,
- LLVMAtomicOrderingRelease);
-
- LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend");
- LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
-
- LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2);
- LLVMAddCase(switch_instr, zero, complete_suspend_block);
- LLVMAddCase(switch_instr, all_ones, early_return_block);
-
- LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
- LLVMBuildRetVoid(g->builder);
-
- LLVMPositionBuilderAtEnd(g->builder, early_return_block);
- LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val);
- set_tail_call_if_appropriate(g, call_inst);
- LLVMBuildRetVoid(g->builder);
-
- LLVMPositionBuilderAtEnd(g->builder, resume_bb);
- gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr);
-
- return nullptr;
-}
-
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
@@ -5568,8 +5459,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Prepare to be suspended
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
+ LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
- // At this point resuming the function will do the correct thing.
+ // At this point resuming the function will continue from resume_bb.
// This code is as if it is running inside the suspend block.
// supply the awaiter return pointer
@@ -5591,15 +5483,15 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
assert(my_err_ret_trace_val != nullptr);
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
- frame_index_trace_arg(g, result_type), "");
+ frame_index_trace_arg(g, result_type) + 1, "");
LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
}
// caller's own frame pointer
LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
- LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
- LLVMAtomicOrderingRelease, g->is_single_threaded);
+ LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
+ LLVMAtomicOrderingRelease);
LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
@@ -5615,20 +5507,42 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
- // Early return: The async function has already completed, but it is suspending before setting the result,
- // populating the error return trace if applicable, and running the defers.
- // Tail resume it now, so that it can complete.
- LLVMPositionBuilderAtEnd(g->builder, early_return_block);
- LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val);
- set_tail_call_if_appropriate(g, call_inst);
- LLVMBuildRetVoid(g->builder);
-
// Rely on the target to resume us from suspension.
LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
LLVMBuildRetVoid(g->builder);
+ // Early return: The async function has already completed. We must copy the result and
+ // the error return trace if applicable.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
+ LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, result_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ }
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
+ LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ LLVMBuildBr(g->builder, end_bb);
+
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ LLVMBuildBr(g->builder, end_bb);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
if (type_has_bits(result_type) && result_loc != nullptr) {
return get_handle_value(g, result_loc, result_type, ptr_result_type);
}
@@ -5640,7 +5554,7 @@ static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrIns
ZigType *frame_type = instruction->frame->value.type;
assert(frame_type->id == ZigTypeIdAnyFrame);
- gen_resume(g, nullptr, frame, ResumeIdManual, nullptr);
+ gen_resume(g, nullptr, frame, ResumeIdManual);
return nullptr;
}
@@ -5651,18 +5565,6 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
return gen_frame_size(g, fn_val);
}
-static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable,
- IrInstructionTestCancelRequested *instruction)
-{
- if (!fn_is_async(g->cur_fn))
- return LLVMConstInt(LLVMInt1Type(), 0, false);
- if (g->cur_is_after_return) {
- return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), "");
- } else {
- zig_panic("TODO");
- }
-}
-
static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable,
IrInstructionSpillBegin *instruction)
{
@@ -5798,8 +5700,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdDeclVarGen:
return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction);
- case IrInstructionIdReturnBegin:
- return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_render_return(g, executable, (IrInstructionReturn *)instruction);
case IrInstructionIdBinOp:
@@ -5918,8 +5818,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction);
case IrInstructionIdErrorReturnTrace:
return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction);
- case IrInstructionIdCancel:
- return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
@@ -5952,8 +5850,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
case IrInstructionIdAwaitGen:
return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
- case IrInstructionIdTestCancelRequested:
- return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction);
case IrInstructionIdSpillBegin:
return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction);
case IrInstructionIdSpillEnd:
@@ -7060,9 +6956,9 @@ static void do_code_gen(CodeGen *g) {
ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count);
err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type));
- // populate g->stack_trace_type
- (void)get_ptr_to_stack_trace_type(g);
- g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type));
+ (void)get_llvm_type(g, get_stack_trace_type(g));
+ g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace",
+ get_abi_alignment(g, g->stack_trace_type));
} else {
g->cur_err_ret_trace_val_stack = nullptr;
}
@@ -7204,18 +7100,12 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, "");
g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, "");
}
- if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
- uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type);
- g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, "");
- }
uint32_t trace_field_index_stack = UINT32_MAX;
if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
}
- g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
- frame_prev_val_index, "");
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
@@ -7227,6 +7117,13 @@ static void do_code_gen(CodeGen *g) {
g->cur_resume_block_count += 1;
LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
if (trace_field_index_stack != UINT32_MAX) {
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, fn_type_id->return_type), "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr);
+ }
+
LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
@@ -7273,8 +7170,6 @@ static void do_code_gen(CodeGen *g) {
LLVMDumpModule(g->module);
}
- // in release mode, we're sooooo confident that we've generated correct ir,
- // that we skip the verify module step in order to get better performance.
#ifndef NDEBUG
char *error = nullptr;
LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error);
@@ -10157,6 +10052,11 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
}
bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) {
- return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
- (is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
+ if (is_async) {
+ return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn ||
+ codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
+ } else {
+ return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
+ !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
+ }
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 3564435ddd..3e80fad270 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -526,10 +526,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) {
return IrInstructionIdReturn;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) {
- return IrInstructionIdReturnBegin;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) {
return IrInstructionIdCast;
}
@@ -974,10 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) {
return IrInstructionIdErrorUnion;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) {
- return IrInstructionIdCancel;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
@@ -1062,10 +1054,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) {
return IrInstructionIdResume;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) {
- return IrInstructionIdTestCancelRequested;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) {
return IrInstructionIdSpillBegin;
}
@@ -1138,18 +1126,6 @@ static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *sou
return &return_instruction->base;
}
-static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *operand)
-{
- IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node);
- return_instruction->operand = operand;
-
- ir_ref_instruction(operand, irb->current_basic_block);
-
- return &return_instruction->base;
-}
-
-
static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node);
const_instruction->base.value.type = irb->codegen->builtin_types.entry_void;
@@ -3284,16 +3260,6 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN
return &instruction->base;
}
-static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
- IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_void;
- instruction->frame = frame;
-
- ir_ref_instruction(frame, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *frame, ResultLoc *result_loc)
{
@@ -3331,13 +3297,6 @@ static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *sou
return &instruction->base;
}
-static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_bool;
-
- return &instruction->base;
-}
-
static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand, SpillId spill_id)
{
@@ -3532,7 +3491,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
}
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
- return_value = ir_build_return_begin(irb, scope, node, return_value);
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
@@ -3545,49 +3503,40 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return result;
}
bool should_inline = ir_should_inline(irb->exec, scope);
- bool need_test_cancel = !should_inline && have_err_defers;
IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
- IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers");
- IrBasicBlock *ok_block = need_test_cancel ?
- ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block;
- IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block;
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
+
+ if (!have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ }
IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
- IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline);
- IrInstruction *err_is_comptime;
+ IrInstruction *is_comptime;
if (should_inline) {
- err_is_comptime = force_comptime;
+ is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
} else {
- err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
+ is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
}
- ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime));
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
ir_set_cursor_at_end_and_append_block(irb, err_block);
+ if (have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ }
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
- ir_build_br(irb, scope, node, all_defers_block, err_is_comptime);
-
- if (need_test_cancel) {
- ir_set_cursor_at_end_and_append_block(irb, ok_block);
- IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node);
- ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled,
- all_defers_block, normal_defers_block, force_comptime));
- }
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
- if (all_defers_block != normal_defers_block) {
- ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
+ ir_set_cursor_at_end_and_append_block(irb, ok_block);
+ if (have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
}
-
- ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
IrInstruction *result = ir_build_return(irb, scope, node, return_value);
@@ -3619,8 +3568,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val,
SpillIdRetErrCode);
- ir_build_return_begin(irb, scope, node, err_val);
- err_val = ir_build_spill_end(irb, scope, node, spill_begin);
ResultLocReturn *result_loc_ret = allocate(1);
result_loc_ret->base.id = ResultLocIdReturn;
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
@@ -3629,6 +3576,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
+ err_val = ir_build_spill_end(irb, scope, node, spill_begin);
IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@@ -3847,38 +3795,10 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
return result;
// no need for save_err_ret_addr because this cannot return error
- // but if it is a canceled async function we do need to run the errdefers
+ // only generate unconditional defers
ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result));
- result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result));
-
- size_t defer_counts[2];
- ir_count_defers(irb, child_scope, outer_block_scope, defer_counts);
- bool have_err_defers = defer_counts[ReturnKindError] > 0;
- if (!have_err_defers) {
- // only generate unconditional defers
- ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
- return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
- }
- IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node);
- IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers");
- IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers");
- IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt");
- bool should_inline = ir_should_inline(irb->exec, child_scope);
- IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node,
- should_inline || !have_err_defers);
- ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled,
- all_defers_block, normal_defers_block, errdefers_is_comptime));
-
- ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
- ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true);
- ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
- ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
@@ -7930,31 +7850,6 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args);
}
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeCancel);
-
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
- if (!fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition"));
- return irb->codegen->invalid_instruction;
- }
- ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
- if (existing_suspend_scope) {
- if (!existing_suspend_scope->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block"));
- add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
- existing_suspend_scope->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
-
- IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr);
- if (operand == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- return ir_build_cancel(irb, scope, node, operand);
-}
-
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
@@ -8149,8 +8044,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
case NodeTypeErrorSetDecl:
return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
- case NodeTypeCancel:
- return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc);
case NodeTypeResume:
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
case NodeTypeAwaitExpr:
@@ -8228,7 +8121,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
if (!instr_is_unreachable(result)) {
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
- result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result));
// no need for save_err_ret_addr because this cannot return error
ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
@@ -8340,7 +8232,6 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
switch (instruction->id) {
case IrInstructionIdUnwrapErrPayload:
case IrInstructionIdUnionFieldPtr:
- case IrInstructionIdReturnBegin:
continue;
default:
break;
@@ -12745,17 +12636,17 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze
return ir_const_void(ira, &instruction->base);
}
-static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) {
+static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
IrInstruction *operand = instruction->operand->child;
if (type_is_invalid(operand->value.type))
- return ira->codegen->invalid_instruction;
+ return ir_unreach_error(ira);
if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
- IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, operand);
- copy_const_val(&result->value, &operand->value, true);
- return result;
+ IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, nullptr);
+ result->value.type = ira->codegen->builtin_types.entry_unreachable;
+ return ir_finish_anal(ira, result);
}
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
@@ -12777,38 +12668,6 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst
return ir_unreach_error(ira);
}
- IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_operand);
- copy_const_val(&result->value, &casted_operand->value, true);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
- IrInstruction *operand = instruction->operand->child;
- if (type_is_invalid(operand->value.type))
- return ir_unreach_error(ira);
-
- if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
- // result location mechanism took care of it.
- IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, nullptr);
- result->value.type = ira->codegen->builtin_types.entry_unreachable;
- return ir_finish_anal(ira, result);
- }
-
- // This cast might have been already done from IrInstructionReturnBegin but it also
- // might not have, in the case of `try`.
- IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
- if (type_is_invalid(casted_operand->value.type)) {
- AstNode *source_node = ira->explicit_return_type_source_node;
- if (source_node != nullptr) {
- ErrorMsg *msg = ira->codegen->errors.last();
- add_error_note(ira->codegen, msg, source_node,
- buf_sprintf("return type declared here"));
- }
- return ir_unreach_error(ira);
- }
-
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
@@ -14540,8 +14399,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
IrInstruction *result = ir_const(ira, &instruction->base, optional_type);
@@ -14559,7 +14418,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, instruction->optional);
- new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen);
+ new_instruction->value.type = ptr_to_stack_trace_type;
return new_instruction;
}
}
@@ -15800,6 +15659,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = impl_fn;
}
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
@@ -15923,6 +15783,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = fn_entry;
}
IrInstruction *result_loc;
@@ -24702,21 +24563,6 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
return casted_frame;
}
-static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
- if (type_is_invalid(frame->value.type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
-
- if (fn_entry->inferred_async_node == nullptr) {
- fn_entry->inferred_async_node = instruction->base.source_node;
- }
-
- return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame);
-}
-
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
if (type_is_invalid(frame->value.type))
@@ -24772,15 +24618,6 @@ static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructio
return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
}
-static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira,
- IrInstructionTestCancelRequested *instruction)
-{
- if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
- return ir_const_bool(ira, &instruction->base, false);
- }
- return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
-}
-
static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) {
if (ir_should_inline(ira->new_irb.exec, instruction->base.scope))
return ir_const_void(ira, &instruction->base);
@@ -24848,8 +24685,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdAwaitGen:
zig_unreachable();
- case IrInstructionIdReturnBegin:
- return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction);
case IrInstructionIdConst:
@@ -25070,8 +24905,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdErrorUnion:
return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction);
- case IrInstructionIdCancel:
- return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
@@ -25114,8 +24947,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction);
case IrInstructionIdAwaitSrc:
return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
- case IrInstructionIdTestCancelRequested:
- return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction);
case IrInstructionIdSpillBegin:
return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction);
case IrInstructionIdSpillEnd:
@@ -25209,7 +25040,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdStorePtr:
case IrInstructionIdCallSrc:
case IrInstructionIdCallGen:
- case IrInstructionIdReturnBegin:
case IrInstructionIdReturn:
case IrInstructionIdUnreachable:
case IrInstructionIdSetCold:
@@ -25235,7 +25065,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
- case IrInstructionIdCancel:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdAtomicRmw:
@@ -25355,7 +25184,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
- case IrInstructionIdTestCancelRequested:
case IrInstructionIdSpillEnd:
return false;
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 63f3711266..7580f19059 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -64,12 +64,6 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) {
}
}
-static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) {
- fprintf(irp->f, "@returnBegin(");
- ir_print_other_instruction(irp, instruction->operand);
- fprintf(irp->f, ")");
-}
-
static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) {
fprintf(irp->f, "return ");
ir_print_other_instruction(irp, instruction->operand);
@@ -1394,11 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct
ir_print_other_instruction(irp, instruction->payload);
}
-static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
- fprintf(irp->f, "cancel ");
- ir_print_other_instruction(irp, instruction->frame);
-}
-
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
fprintf(irp->f, "@atomicRmw(");
if (instruction->operand_type != nullptr) {
@@ -1549,10 +1538,6 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction)
fprintf(irp->f, ")");
}
-static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) {
- fprintf(irp->f, "@testCancelRequested()");
-}
-
static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) {
fprintf(irp->f, "@spillBegin(");
ir_print_other_instruction(irp, instruction->operand);
@@ -1570,9 +1555,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
zig_unreachable();
- case IrInstructionIdReturnBegin:
- ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction);
- break;
case IrInstructionIdReturn:
ir_print_return(irp, (IrInstructionReturn *)instruction);
break;
@@ -1966,9 +1948,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdErrorUnion:
ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction);
break;
- case IrInstructionIdCancel:
- ir_print_cancel(irp, (IrInstructionCancel *)instruction);
- break;
case IrInstructionIdAtomicRmw:
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
break;
@@ -2047,9 +2026,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAwaitGen:
ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
break;
- case IrInstructionIdTestCancelRequested:
- ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction);
- break;
case IrInstructionIdSpillBegin:
ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction);
break;
diff --git a/src/parser.cpp b/src/parser.cpp
index 82312aacf3..afe5735a06 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1167,7 +1167,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) {
// <- AsmExpr
// / IfExpr
// / KEYWORD_break BreakLabel? Expr?
-// / KEYWORD_cancel Expr
// / KEYWORD_comptime Expr
// / KEYWORD_continue BreakLabel?
// / KEYWORD_resume Expr
@@ -1195,14 +1194,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) {
return res;
}
- Token *cancel = eat_token_if(pc, TokenIdKeywordCancel);
- if (cancel != nullptr) {
- AstNode *expr = ast_expect(pc, ast_parse_expr);
- AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel);
- res->data.cancel_expr.expr = expr;
- return res;
- }
-
Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime);
if (comptime != nullptr) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
@@ -3035,9 +3026,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
break;
- case NodeTypeCancel:
- visit_field(&node->data.cancel_expr.expr, visit, context);
- break;
case NodeTypeResume:
visit_field(&node->data.resume_expr.expr, visit, context);
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 38c6c7153e..84f3f2c0ec 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -114,7 +114,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
{"break", TokenIdKeywordBreak},
- {"cancel", TokenIdKeywordCancel},
{"catch", TokenIdKeywordCatch},
{"comptime", TokenIdKeywordCompTime},
{"const", TokenIdKeywordConst},
@@ -1531,7 +1530,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordAwait: return "await";
case TokenIdKeywordResume: return "resume";
case TokenIdKeywordSuspend: return "suspend";
- case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
case TokenIdKeywordAnyFrame: return "anyframe";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 98bdfea907..ce62f5dc87 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -58,7 +58,6 @@ enum TokenId {
TokenIdKeywordAsync,
TokenIdKeywordAwait,
TokenIdKeywordBreak,
- TokenIdKeywordCancel,
TokenIdKeywordCatch,
TokenIdKeywordCompTime,
TokenIdKeywordConst,
diff --git a/std/event/fs.zig b/std/event/fs.zig
index 73a296ca3f..d6d8f2faef 100644
--- a/std/event/fs.zig
+++ b/std/event/fs.zig
@@ -1301,7 +1301,7 @@ async fn testFsWatch(loop: *Loop) !void {
const ev = try async watch.channel.get();
var ev_consumed = false;
- defer if (!ev_consumed) cancel ev;
+ defer if (!ev_consumed) await ev;
// overwrite line 2
const fd = try await try async openReadWrite(loop, file_path, File.default_mode);
diff --git a/std/event/future.zig b/std/event/future.zig
index e5f6d984ce..45bb7759c5 100644
--- a/std/event/future.zig
+++ b/std/event/future.zig
@@ -110,7 +110,7 @@ async fn testFuture(loop: *Loop) void {
const b_result = await b;
const result = a_result + b_result;
- cancel c;
+ await c;
testing.expect(result == 12);
}
diff --git a/std/event/group.zig b/std/event/group.zig
index 1fc4a61e93..f96b938f80 100644
--- a/std/event/group.zig
+++ b/std/event/group.zig
@@ -27,17 +27,6 @@ pub fn Group(comptime ReturnType: type) type {
};
}
- /// Cancel all the outstanding frames. Can be called even if wait was already called.
- pub fn deinit(self: *Self) void {
- while (self.frame_stack.pop()) |node| {
- cancel node.data;
- }
- while (self.alloc_stack.pop()) |node| {
- cancel node.data;
- self.lock.loop.allocator.destroy(node);
- }
- }
-
/// Add a frame to the group. Thread-safe.
pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node);
@@ -64,13 +53,14 @@ pub fn Group(comptime ReturnType: type) type {
const held = self.lock.acquire();
defer held.release();
+ var result: ReturnType = {};
+
while (self.frame_stack.pop()) |node| {
if (Error == void) {
await node.data;
} else {
(await node.data) catch |err| {
- self.deinit();
- return err;
+ result = err;
};
}
}
@@ -81,11 +71,11 @@ pub fn Group(comptime ReturnType: type) type {
await handle;
} else {
(await handle) catch |err| {
- self.deinit();
- return err;
+ result = err;
};
}
}
+ return result;
}
};
}
diff --git a/std/event/net.zig b/std/event/net.zig
index 2a28a0ef93..bed665dcdc 100644
--- a/std/event/net.zig
+++ b/std/event/net.zig
@@ -54,7 +54,7 @@ pub const Server = struct {
self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd));
self.accept_frame = async Server.handler(self);
- errdefer cancel self.accept_frame.?;
+ errdefer await self.accept_frame.?;
self.listen_resume_node.handle = self.accept_frame.?;
try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
@@ -71,7 +71,7 @@ pub const Server = struct {
}
pub fn deinit(self: *Server) void {
- if (self.accept_frame) |accept_frame| cancel accept_frame;
+ if (self.accept_frame) |accept_frame| await accept_frame;
if (self.sockfd) |sockfd| os.close(sockfd);
}
@@ -274,13 +274,9 @@ test "listen on a port, send bytes, receive bytes" {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592
defer socket.close();
- // TODO guarantee elision of this allocation
const next_handler = errorableHandler(self, _addr, socket) catch |err| {
std.debug.panic("unable to handle connection: {}\n", err);
};
- suspend {
- cancel @frame();
- }
}
async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 600178cdce..077870a9ca 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -814,7 +814,6 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// <- AsmExpr
/// / IfExpr
/// / KEYWORD_break BreakLabel? Expr?
-/// / KEYWORD_cancel Expr
/// / KEYWORD_comptime Expr
/// / KEYWORD_continue BreakLabel?
/// / KEYWORD_resume Expr
@@ -839,20 +838,6 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
- if (eatToken(it, .Keyword_cancel)) |token| {
- const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
- .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
- });
- const node = try arena.create(Node.PrefixOp);
- node.* = Node.PrefixOp{
- .base = Node{ .id = .PrefixOp },
- .op_token = token,
- .op = Node.PrefixOp.Op.Cancel,
- .rhs = expr_node,
- };
- return &node.base;
- }
-
if (eatToken(it, .Keyword_comptime)) |token| {
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
.ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 5f2a3934fd..c5c740353e 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -2115,10 +2115,10 @@ test "zig fmt: async functions" {
\\ await p;
\\}
\\
- \\test "suspend, resume, cancel" {
+ \\test "suspend, resume, await" {
\\ const p: anyframe = async testAsyncSeq();
\\ resume p;
- \\ cancel p;
+ \\ await p;
\\}
\\
);
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 9de20c39f2..4d4ceb07db 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -21,7 +21,6 @@ pub const Token = struct {
Keyword{ .bytes = "await", .id = Id.Keyword_await },
Keyword{ .bytes = "break", .id = Id.Keyword_break },
Keyword{ .bytes = "catch", .id = Id.Keyword_catch },
- Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel },
Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime },
Keyword{ .bytes = "const", .id = Id.Keyword_const },
Keyword{ .bytes = "continue", .id = Id.Keyword_continue },
@@ -151,7 +150,6 @@ pub const Token = struct {
Keyword_async,
Keyword_await,
Keyword_break,
- Keyword_cancel,
Keyword_catch,
Keyword_comptime,
Keyword_const,
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index c07786d462..f53b1c9707 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -61,13 +61,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"runtime-known async function called",
\\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\fn amain() void {
\\ var ptr = afunc;
\\ _ = ptr();
\\}
- \\
\\async fn afunc() void {}
,
- "tmp.zig:3:12: error: function is not comptime-known; @asyncCall required",
+ "tmp.zig:6:12: error: function is not comptime-known; @asyncCall required",
);
cases.add(
@@ -3388,7 +3390,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(Foo)); }
,
- "tmp.zig:5:18: error: unable to evaluate constant expression",
+ "tmp.zig:5:25: error: unable to evaluate constant expression",
"tmp.zig:2:12: note: called from here",
"tmp.zig:2:8: note: called from here",
);
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index dba43268e2..6ec1521048 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -41,7 +41,6 @@ comptime {
_ = @import("behavior/bugs/920.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
- _ = @import("behavior/cancel.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/defer.zig");
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
index 2d76b47244..ec0c9e52a6 100644
--- a/test/stage1/behavior/async_fn.zig
+++ b/test/stage1/behavior/async_fn.zig
@@ -150,7 +150,7 @@ test "coroutine suspend, resume" {
seq('a');
var f = async testAsyncSeq();
seq('c');
- cancel f;
+ await f;
seq('g');
}
@@ -271,7 +271,6 @@ test "async function with dot syntax" {
}
};
const p = async S.foo();
- // can't cancel in tests because they are non-async functions
expect(S.y == 2);
}
@@ -286,7 +285,7 @@ test "async fn pointer in a struct field" {
comptime expect(@typeOf(f) == anyframe->void);
expect(data == 2);
resume f;
- expect(data == 2);
+ expect(data == 4);
_ = async doTheAwait(f);
expect(data == 4);
}
@@ -394,7 +393,6 @@ async fn printTrace(p: anyframe->(anyerror!void)) void {
test "break from suspend" {
var my_result: i32 = 1;
const p = async testBreakFromSuspend(&my_result);
- // can't cancel here
std.testing.expect(my_result == 2);
}
async fn testBreakFromSuspend(my_result: *i32) void {
@@ -530,45 +528,6 @@ test "call async function which has struct return type" {
S.doTheTest();
}
-test "errdefers in scope get run when canceling async fn call" {
- const S = struct {
- var frame: anyframe = undefined;
- var x: u32 = 0;
-
- fn doTheTest() void {
- x = 9;
- _ = async cancelIt();
- resume frame;
- expect(x == 6);
-
- x = 9;
- _ = async awaitIt();
- resume frame;
- expect(x == 11);
- }
-
- fn cancelIt() void {
- var f = async func();
- cancel f;
- }
-
- fn awaitIt() void {
- var f = async func();
- await f;
- }
-
- fn func() void {
- defer x += 1;
- errdefer x /= 2;
- defer x += 1;
- suspend {
- frame = @frame();
- }
- }
- };
- S.doTheTest();
-}
-
test "pass string literal to async function" {
const S = struct {
var frame: anyframe = undefined;
@@ -590,7 +549,7 @@ test "pass string literal to async function" {
S.doTheTest();
}
-test "cancel inside an errdefer" {
+test "await inside an errdefer" {
const S = struct {
var frame: anyframe = undefined;
@@ -601,7 +560,7 @@ test "cancel inside an errdefer" {
fn amainWrap() !void {
var foo = async func();
- errdefer cancel foo;
+ errdefer await foo;
return error.Bad;
}
@@ -614,35 +573,6 @@ test "cancel inside an errdefer" {
S.doTheTest();
}
-test "combining try with errdefer cancel" {
- const S = struct {
- var frame: anyframe = undefined;
- var ok = false;
-
- fn doTheTest() void {
- _ = async amain();
- resume frame;
- expect(ok);
- }
-
- fn amain() !void {
- var f = async func("https://example.com/");
- errdefer cancel f;
-
- _ = try await f;
- }
-
- fn func(url: []const u8) ![]u8 {
- errdefer ok = true;
- frame = @frame();
- suspend;
- return error.Bad;
- }
-
- };
- S.doTheTest();
-}
-
test "try in an async function with error union and non-zero-bit payload" {
const S = struct {
var frame: anyframe = undefined;
@@ -730,14 +660,22 @@ fn testAsyncAwaitTypicalUsage(comptime simulate_fail_download: bool, comptime si
fn amain() !void {
const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
var download_frame = async fetchUrl(allocator, "https://example.com/");
- errdefer cancel download_frame;
+ var download_awaited = false;
+ errdefer if (!download_awaited) {
+ if (await download_frame) |x| allocator.free(x) else |_| {}
+ };
var file_frame = async readFile(allocator, "something.txt");
- errdefer cancel file_frame;
+ var file_awaited = false;
+ errdefer if (!file_awaited) {
+ if (await file_frame) |x| allocator.free(x) else |_| {}
+ };
+ download_awaited = true;
const download_text = try await download_frame;
defer allocator.free(download_text);
+ file_awaited = true;
const file_text = try await file_frame;
defer allocator.free(file_text);
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
deleted file mode 100644
index 5dedb20159..0000000000
--- a/test/stage1/behavior/cancel.zig
+++ /dev/null
@@ -1,115 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-
-var defer_f1: bool = false;
-var defer_f2: bool = false;
-var defer_f3: bool = false;
-var f3_frame: anyframe = undefined;
-
-test "cancel forwards" {
- _ = async atest1();
- resume f3_frame;
-}
-
-fn atest1() void {
- const p = async f1();
- cancel &p;
- expect(defer_f1);
- expect(defer_f2);
- expect(defer_f3);
-}
-
-async fn f1() void {
- defer {
- defer_f1 = true;
- }
- var f2_frame = async f2();
- await f2_frame;
-}
-
-async fn f2() void {
- defer {
- defer_f2 = true;
- }
- f3();
-}
-
-async fn f3() void {
- f3_frame = @frame();
- defer {
- defer_f3 = true;
- }
- suspend;
-}
-
-var defer_b1: bool = false;
-var defer_b2: bool = false;
-var defer_b3: bool = false;
-var defer_b4: bool = false;
-
-test "cancel backwards" {
- var b1_frame = async b1();
- resume b4_handle;
- _ = async awaitAFrame(&b1_frame);
- expect(defer_b1);
- expect(defer_b2);
- expect(defer_b3);
- expect(defer_b4);
-}
-
-async fn b1() void {
- defer {
- defer_b1 = true;
- }
- b2();
-}
-
-var b4_handle: anyframe->void = undefined;
-
-async fn b2() void {
- const b3_handle = async b3();
- resume b4_handle;
- defer {
- defer_b2 = true;
- }
- const value = await b3_handle;
- expect(value == 1234);
-}
-
-async fn b3() i32 {
- defer {
- defer_b3 = true;
- }
- b4();
- return 1234;
-}
-
-async fn b4() void {
- defer {
- defer_b4 = true;
- }
- suspend {
- b4_handle = @frame();
- }
- suspend;
-}
-
-fn awaitAFrame(f: anyframe->void) void {
- await f;
-}
-
-test "cancel on a non-pointer" {
- const S = struct {
- fn doTheTest() void {
- _ = async atest();
- }
- fn atest() void {
- var f = async func();
- cancel f;
- }
- fn func() void {
- suspend;
- }
- };
- S.doTheTest();
-}
--
cgit v1.2.3