From e5d032982e7a7b76e08341a0a3d9e287c6ef7c94 Mon Sep 17 00:00:00 2001
From: Michael Dusan
Date: Thu, 18 Jul 2019 13:38:11 -0400
Subject: closes #2916
---
src/ir.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 5193a63ec4..579875dc3c 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14597,7 +14597,7 @@ static IrInstruction *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp *
for (uint64_t x = 0; x < mult_amt; x += 1) {
for (uint64_t y = 0; y < old_array_len; y += 1) {
copy_const_val(&out_val->data.x_array.data.s_none.elements[i],
- &array_val->data.x_array.data.s_none.elements[y], true);
+ &array_val->data.x_array.data.s_none.elements[y], false);
i += 1;
}
}
--
cgit v1.2.3
From af8661405b908c0abfc191501a8ad1a59a54e86a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 19 Jul 2019 16:56:44 -0400
Subject: fix usingnamespace
It used to be that usingnamespace was only allowed at top level. This
made it OK to put the state inside the AST node data structure. However,
now usingnamespace can occur inside any aggregate data structure, and
therefore the state must be in the TopLevelDeclaration rather than in
the AST node.
There were two other problems with the usingnamespace implementation:
* It was passing the wrong destination ScopeDecl, so it could cause an
incorrect error such as "import of file outside package path".
* When doing `usingnamespace` on a file that already had
`pub usingnamespace` in it would "steal" the usingnamespace, causing
incorrect "use of undeclared identifier" errors in the target file.
closes #2632
closes #2580
---
src/all_types.hpp | 16 +-
src/analyze.cpp | 265 +++++++++++++++++---------------
src/analyze.hpp | 2 -
src/ast_render.cpp | 8 +-
src/ir.cpp | 3 +-
src/parser.cpp | 4 +-
test/compile_errors.zig | 2 +-
test/stage1/behavior.zig | 1 +
test/stage1/behavior/usingnamespace.zig | 14 ++
9 files changed, 170 insertions(+), 145 deletions(-)
create mode 100644 test/stage1/behavior/usingnamespace.zig
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 79ade39ef7..a6b2bc51c3 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -366,6 +366,7 @@ enum TldId {
TldIdFn,
TldIdContainer,
TldIdCompTime,
+ TldIdUsingNamespace,
};
enum TldResolution {
@@ -413,6 +414,12 @@ struct TldCompTime {
Tld base;
};
+struct TldUsingNamespace {
+ Tld base;
+
+ ConstExprValue *using_namespace_value;
+};
+
struct TypeEnumField {
Buf *name;
BigInt value;
@@ -453,7 +460,7 @@ enum NodeType {
NodeTypeFieldAccessExpr,
NodeTypePtrDeref,
NodeTypeUnwrapOptional,
- NodeTypeUse,
+ NodeTypeUsingNamespace,
NodeTypeBoolLiteral,
NodeTypeNullLiteral,
NodeTypeUndefinedLiteral,
@@ -715,9 +722,6 @@ struct AstNodeArrayType {
struct AstNodeUsingNamespace {
VisibMod visib_mod;
AstNode *expr;
-
- TldResolution resolution;
- ConstExprValue *using_namespace_value;
};
struct AstNodeIfBoolExpr {
@@ -1745,8 +1749,6 @@ struct CodeGen {
ZigList resolve_queue;
size_t resolve_queue_index;
- ZigList use_queue;
- size_t use_queue_index;
ZigList timing_events;
ZigList tld_ref_source_node_stack;
ZigList inline_fns;
@@ -2005,7 +2007,7 @@ struct ScopeDecls {
Scope base;
HashMap decl_table;
- ZigList use_decls;
+ ZigList use_decls;
AstNode *safety_set_node;
AstNode *fast_math_set_node;
ZigType *import;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 797451c8f8..de4d64f5d6 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -28,6 +28,8 @@ static Error ATTRIBUTE_MUST_USE resolve_union_zero_bits(CodeGen *g, ZigType *uni
static Error ATTRIBUTE_MUST_USE resolve_union_alignment(CodeGen *g, ZigType *union_type);
static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry);
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status);
+static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
+static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
static bool is_top_level_struct(ZigType *import) {
return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr;
@@ -2854,6 +2856,8 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
add_node_error(g, tld->source_node, buf_sprintf("non-extern function has no body"));
return;
}
+ } else if (tld->id == TldIdUsingNamespace) {
+ g->resolve_queue.append(tld);
}
if (is_export) {
g->resolve_queue.append(tld);
@@ -2867,7 +2871,7 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
}
}
- {
+ if (tld->name != nullptr) {
auto entry = decls_scope->decl_table.put_unique(tld->name, tld);
if (entry) {
Tld *other_tld = entry->value;
@@ -2875,9 +2879,7 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
add_error_note(g, msg, other_tld->source_node, buf_sprintf("previous definition is here"));
return;
}
- }
- {
ZigType *type;
if (get_primitive_type(g, tld->name, &type) != ErrorPrimitiveTypeNotFound) {
add_node_error(g, tld->source_node,
@@ -2977,12 +2979,14 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
break;
}
- case NodeTypeUse:
- {
- g->use_queue.append(node);
- decls_scope->use_decls.append(node);
- break;
- }
+ case NodeTypeUsingNamespace: {
+ VisibMod visib_mod = node->data.using_namespace.visib_mod;
+ TldUsingNamespace *tld_using_namespace = allocate(1);
+ init_tld(&tld_using_namespace->base, TldIdUsingNamespace, nullptr, visib_mod, node, &decls_scope->base);
+ add_top_level_decl(g, decls_scope, &tld_using_namespace->base);
+ decls_scope->use_decls.append(tld_using_namespace);
+ break;
+ }
case NodeTypeTestDecl:
preview_test_decl(g, node, decls_scope);
break;
@@ -3266,6 +3270,117 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var) {
g->global_vars.append(tld_var);
}
+static void add_symbols_from_container(CodeGen *g, TldUsingNamespace *src_using_namespace,
+ TldUsingNamespace *dst_using_namespace, ScopeDecls* dest_decls_scope)
+{
+ if (src_using_namespace->base.resolution == TldResolutionUnresolved ||
+ src_using_namespace->base.resolution == TldResolutionResolving)
+ {
+ assert(src_using_namespace->base.parent_scope->id == ScopeIdDecls);
+ ScopeDecls *src_decls_scope = (ScopeDecls *)src_using_namespace->base.parent_scope;
+ preview_use_decl(g, src_using_namespace, src_decls_scope);
+ if (src_using_namespace != dst_using_namespace) {
+ resolve_use_decl(g, src_using_namespace, src_decls_scope);
+ }
+ }
+
+ ConstExprValue *use_expr = src_using_namespace->using_namespace_value;
+ if (type_is_invalid(use_expr->type)) {
+ dest_decls_scope->any_imports_failed = true;
+ return;
+ }
+
+ dst_using_namespace->base.resolution = TldResolutionOk;
+
+ assert(use_expr->special != ConstValSpecialRuntime);
+
+ // The source scope for the imported symbols
+ ScopeDecls *src_scope = get_container_scope(use_expr->data.x_type);
+ // The top-level container where the symbols are defined, it's used in the
+ // loop below in order to exclude the ones coming from an import statement
+ ZigType *src_import = get_scope_import(&src_scope->base);
+ assert(src_import != nullptr);
+
+ if (src_scope->any_imports_failed) {
+ dest_decls_scope->any_imports_failed = true;
+ }
+
+ auto it = src_scope->decl_table.entry_iterator();
+ for (;;) {
+ auto *entry = it.next();
+ if (!entry)
+ break;
+
+ Buf *target_tld_name = entry->key;
+ Tld *target_tld = entry->value;
+
+ if (target_tld->visib_mod == VisibModPrivate) {
+ continue;
+ }
+
+ if (target_tld->import != src_import) {
+ continue;
+ }
+
+ auto existing_entry = dest_decls_scope->decl_table.put_unique(target_tld_name, target_tld);
+ if (existing_entry) {
+ Tld *existing_decl = existing_entry->value;
+ if (existing_decl != target_tld) {
+ ErrorMsg *msg = add_node_error(g, dst_using_namespace->base.source_node,
+ buf_sprintf("import of '%s' overrides existing definition",
+ buf_ptr(target_tld_name)));
+ add_error_note(g, msg, existing_decl->source_node, buf_sprintf("previous definition here"));
+ add_error_note(g, msg, target_tld->source_node, buf_sprintf("imported definition here"));
+ }
+ }
+ }
+
+ for (size_t i = 0; i < src_scope->use_decls.length; i += 1) {
+ TldUsingNamespace *tld_using_namespace = src_scope->use_decls.at(i);
+ if (tld_using_namespace->base.visib_mod != VisibModPrivate)
+ add_symbols_from_container(g, tld_using_namespace, dst_using_namespace, dest_decls_scope);
+ }
+}
+
+static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope) {
+ if (tld_using_namespace->base.resolution == TldResolutionOk ||
+ tld_using_namespace->base.resolution == TldResolutionInvalid)
+ {
+ return;
+ }
+ add_symbols_from_container(g, tld_using_namespace, tld_using_namespace, dest_decls_scope);
+}
+
+static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope) {
+ if (using_namespace->base.resolution == TldResolutionOk ||
+ using_namespace->base.resolution == TldResolutionInvalid)
+ {
+ return;
+ }
+
+ using_namespace->base.resolution = TldResolutionResolving;
+ assert(using_namespace->base.source_node->type == NodeTypeUsingNamespace);
+ ConstExprValue *result = analyze_const_value(g, &dest_decls_scope->base,
+ using_namespace->base.source_node->data.using_namespace.expr, g->builtin_types.entry_type, nullptr);
+ using_namespace->using_namespace_value = result;
+
+ if (type_is_invalid(result->type)) {
+ dest_decls_scope->any_imports_failed = true;
+ using_namespace->base.resolution = TldResolutionInvalid;
+ using_namespace->using_namespace_value = &g->invalid_instruction->value;
+ return;
+ }
+
+ if (!is_container(result->data.x_type)) {
+ add_node_error(g, using_namespace->base.source_node,
+ buf_sprintf("expected struct, enum, or union; found '%s'", buf_ptr(&result->data.x_type->name)));
+ dest_decls_scope->any_imports_failed = true;
+ using_namespace->base.resolution = TldResolutionInvalid;
+ using_namespace->using_namespace_value = &g->invalid_instruction->value;
+ return;
+ }
+}
+
void resolve_top_level_decl(CodeGen *g, Tld *tld, AstNode *source_node) {
if (tld->resolution != TldResolutionUnresolved)
return;
@@ -3299,6 +3414,14 @@ void resolve_top_level_decl(CodeGen *g, Tld *tld, AstNode *source_node) {
resolve_decl_comptime(g, tld_comptime);
break;
}
+ case TldIdUsingNamespace: {
+ TldUsingNamespace *tld_using_namespace = (TldUsingNamespace *)tld;
+ assert(tld_using_namespace->base.parent_scope->id == ScopeIdDecls);
+ ScopeDecls *dest_decls_scope = (ScopeDecls *)tld_using_namespace->base.parent_scope;
+ preview_use_decl(g, tld_using_namespace, dest_decls_scope);
+ resolve_use_decl(g, tld_using_namespace, dest_decls_scope);
+ break;
+ }
}
tld->resolution = TldResolutionOk;
@@ -3308,10 +3431,10 @@ void resolve_top_level_decl(CodeGen *g, Tld *tld, AstNode *source_node) {
Tld *find_container_decl(CodeGen *g, ScopeDecls *decls_scope, Buf *name) {
// resolve all the using_namespace decls
for (size_t i = 0; i < decls_scope->use_decls.length; i += 1) {
- AstNode *use_decl_node = decls_scope->use_decls.at(i);
- if (use_decl_node->data.using_namespace.resolution == TldResolutionUnresolved) {
- preview_use_decl(g, use_decl_node, decls_scope);
- resolve_use_decl(g, use_decl_node, decls_scope);
+ TldUsingNamespace *tld_using_namespace = decls_scope->use_decls.at(i);
+ if (tld_using_namespace->base.resolution == TldResolutionUnresolved) {
+ preview_use_decl(g, tld_using_namespace, decls_scope);
+ resolve_use_decl(g, tld_using_namespace, decls_scope);
}
}
@@ -3752,110 +3875,6 @@ static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
analyze_fn_ir(g, fn_table_entry, return_type_node);
}
-static void add_symbols_from_container(CodeGen *g, AstNode *src_use_node, AstNode *dst_use_node, ScopeDecls* decls_scope) {
- if (src_use_node->data.using_namespace.resolution == TldResolutionUnresolved) {
- preview_use_decl(g, src_use_node, decls_scope);
- }
-
- ConstExprValue *use_expr = src_use_node->data.using_namespace.using_namespace_value;
- if (type_is_invalid(use_expr->type)) {
- decls_scope->any_imports_failed = true;
- return;
- }
-
- dst_use_node->data.using_namespace.resolution = TldResolutionOk;
-
- assert(use_expr->special != ConstValSpecialRuntime);
-
- // The source struct for the imported symbols
- ZigType *src_ty = use_expr->data.x_type;
- assert(src_ty);
-
- if (!is_container(src_ty)) {
- add_node_error(g, dst_use_node,
- buf_sprintf("expected struct, enum, or union; found '%s'", buf_ptr(&src_ty->name)));
- decls_scope->any_imports_failed = true;
- return;
- }
-
- // The source scope for the imported symbols
- ScopeDecls *src_scope = get_container_scope(src_ty);
- // The top-level container where the symbols are defined, it's used in the
- // loop below in order to exclude the ones coming from an import statement
- ZigType *src_import = get_scope_import(&src_scope->base);
- assert(src_import != nullptr);
-
- if (src_scope->any_imports_failed) {
- decls_scope->any_imports_failed = true;
- }
-
- auto it = src_scope->decl_table.entry_iterator();
- for (;;) {
- auto *entry = it.next();
- if (!entry)
- break;
-
- Buf *target_tld_name = entry->key;
- Tld *target_tld = entry->value;
-
- if (target_tld->visib_mod == VisibModPrivate) {
- continue;
- }
-
- if (target_tld->import != src_import) {
- continue;
- }
-
- auto existing_entry = decls_scope->decl_table.put_unique(target_tld_name, target_tld);
- if (existing_entry) {
- Tld *existing_decl = existing_entry->value;
- if (existing_decl != target_tld) {
- ErrorMsg *msg = add_node_error(g, dst_use_node,
- buf_sprintf("import of '%s' overrides existing definition",
- buf_ptr(target_tld_name)));
- add_error_note(g, msg, existing_decl->source_node, buf_sprintf("previous definition here"));
- add_error_note(g, msg, target_tld->source_node, buf_sprintf("imported definition here"));
- }
- }
- }
-
- for (size_t i = 0; i < src_scope->use_decls.length; i += 1) {
- AstNode *use_decl_node = src_scope->use_decls.at(i);
- if (use_decl_node->data.using_namespace.visib_mod != VisibModPrivate)
- add_symbols_from_container(g, use_decl_node, dst_use_node, decls_scope);
- }
-}
-
-void resolve_use_decl(CodeGen *g, AstNode *node, ScopeDecls *decls_scope) {
- assert(node->type == NodeTypeUse);
-
- if (node->data.using_namespace.resolution == TldResolutionOk ||
- node->data.using_namespace.resolution == TldResolutionInvalid)
- {
- return;
- }
- add_symbols_from_container(g, node, node, decls_scope);
-}
-
-void preview_use_decl(CodeGen *g, AstNode *node, ScopeDecls *decls_scope) {
- assert(node->type == NodeTypeUse);
-
- if (node->data.using_namespace.resolution == TldResolutionOk ||
- node->data.using_namespace.resolution == TldResolutionInvalid)
- {
- return;
- }
-
- node->data.using_namespace.resolution = TldResolutionResolving;
- ConstExprValue *result = analyze_const_value(g, &decls_scope->base,
- node->data.using_namespace.expr, g->builtin_types.entry_type, nullptr);
-
- if (type_is_invalid(result->type))
- decls_scope->any_imports_failed = true;
-
- node->data.using_namespace.using_namespace_value = result;
-}
-
ZigType *add_source_file(CodeGen *g, ZigPackage *package, Buf *resolved_path, Buf *source_code,
SourceKind source_kind)
{
@@ -3975,18 +3994,8 @@ ZigType *add_source_file(CodeGen *g, ZigPackage *package, Buf *resolved_path, Bu
void semantic_analyze(CodeGen *g) {
while (g->resolve_queue_index < g->resolve_queue.length ||
- g->fn_defs_index < g->fn_defs.length ||
- g->use_queue_index < g->use_queue.length)
+ g->fn_defs_index < g->fn_defs.length)
{
- for (; g->use_queue_index < g->use_queue.length; g->use_queue_index += 1) {
- AstNode *use_decl_node = g->use_queue.at(g->use_queue_index);
- // Get the top-level scope where `using_namespace` is used
- ScopeDecls *decls_scope = get_container_scope(use_decl_node->owner);
- if (use_decl_node->data.using_namespace.resolution == TldResolutionUnresolved) {
- preview_use_decl(g, use_decl_node, decls_scope);
- resolve_use_decl(g, use_decl_node, decls_scope);
- }
- }
for (; g->resolve_queue_index < g->resolve_queue.length; g->resolve_queue_index += 1) {
Tld *tld = g->resolve_queue.at(g->resolve_queue_index);
AstNode *source_node = nullptr;
diff --git a/src/analyze.hpp b/src/analyze.hpp
index a6ad92110e..b9e9f2df7d 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -86,8 +86,6 @@ bool is_array_ref(ZigType *type_entry);
bool is_container_ref(ZigType *type_entry);
bool is_valid_vector_elem_type(ZigType *elem_type);
void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node);
-void preview_use_decl(CodeGen *g, AstNode *node, ScopeDecls* decls_scope);
-void resolve_use_decl(CodeGen *g, AstNode *node, ScopeDecls* decls_scope);
ZigFn *scope_fn_entry(Scope *scope);
ZigPackage *scope_package(Scope *scope);
ZigType *get_scope_import(Scope *scope);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 154803f884..fe131ab65f 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -193,8 +193,8 @@ static const char *node_type_str(NodeType node_type) {
return "Symbol";
case NodeTypePrefixOpExpr:
return "PrefixOpExpr";
- case NodeTypeUse:
- return "Use";
+ case NodeTypeUsingNamespace:
+ return "UsingNamespace";
case NodeTypeBoolLiteral:
return "BoolLiteral";
case NodeTypeNullLiteral:
@@ -791,7 +791,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
AstNode *decls_node = node->data.container_decl.decls.at(decl_i);
render_node_grouped(ar, decls_node);
- if (decls_node->type == NodeTypeUse ||
+ if (decls_node->type == NodeTypeUsingNamespace ||
decls_node->type == NodeTypeVariableDeclaration ||
decls_node->type == NodeTypeFnProto)
{
@@ -1170,7 +1170,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
case NodeTypeParamDecl:
case NodeTypeTestDecl:
case NodeTypeStructField:
- case NodeTypeUse:
+ case NodeTypeUsingNamespace:
zig_panic("TODO more ast rendering");
}
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 579875dc3c..be7a8e2e51 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -8430,7 +8430,7 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
switch (node->type) {
case NodeTypeStructValueField:
case NodeTypeParamDecl:
- case NodeTypeUse:
+ case NodeTypeUsingNamespace:
case NodeTypeSwitchProng:
case NodeTypeSwitchRange:
case NodeTypeStructField:
@@ -17847,6 +17847,7 @@ static IrInstruction *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source_
switch (tld->id) {
case TldIdContainer:
case TldIdCompTime:
+ case TldIdUsingNamespace:
zig_unreachable();
case TldIdVar:
{
diff --git a/src/parser.cpp b/src/parser.cpp
index 0783d4ec10..fe1f89ac92 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -676,7 +676,7 @@ static AstNode *ast_parse_top_level_decl(ParseContext *pc, VisibMod visib_mod) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
expect_token(pc, TokenIdSemicolon);
- AstNode *res = ast_create_node(pc, NodeTypeUse, usingnamespace);
+ AstNode *res = ast_create_node(pc, NodeTypeUsingNamespace, usingnamespace);
res->data.using_namespace.visib_mod = visib_mod;
res->data.using_namespace.expr = expr;
return res;
@@ -2938,7 +2938,7 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeUnwrapOptional:
visit_field(&node->data.unwrap_optional.expr, visit, context);
break;
- case NodeTypeUse:
+ case NodeTypeUsingNamespace:
visit_field(&node->data.using_namespace.expr, visit, context);
break;
case NodeTypeBoolLiteral:
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index fd365235d8..40ce8d304b 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -234,7 +234,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"usingnamespace with wrong type",
- \\use void;
+ \\usingnamespace void;
,
"tmp.zig:1:1: error: expected struct, enum, or union; found 'void'",
);
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index db8fdcf368..71af5586ed 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -93,6 +93,7 @@ comptime {
_ = @import("behavior/undefined.zig");
_ = @import("behavior/underscore.zig");
_ = @import("behavior/union.zig");
+ _ = @import("behavior/usingnamespace.zig");
_ = @import("behavior/var_args.zig");
_ = @import("behavior/vector.zig");
_ = @import("behavior/void.zig");
diff --git a/test/stage1/behavior/usingnamespace.zig b/test/stage1/behavior/usingnamespace.zig
new file mode 100644
index 0000000000..fb45a9392d
--- /dev/null
+++ b/test/stage1/behavior/usingnamespace.zig
@@ -0,0 +1,14 @@
+const std = @import("std");
+
+fn Foo(comptime T: type) type {
+ return struct {
+ usingnamespace T;
+ };
+}
+
+test "usingnamespace inside a generic struct" {
+ const std2 = Foo(std);
+ const testing2 = Foo(std.testing);
+ std2.testing.expect(true);
+ testing2.expect(true);
+}
--
cgit v1.2.3
From 54e716afdcb0609cfc42229ad925e6dc9b07a66f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 27 Jun 2019 23:40:36 -0400
Subject: remove coroutines implementation and promise type
---
src-self-hosted/main.zig | 16 +-
src/all_types.hpp | 191 -----
src/analyze.cpp | 172 +---
src/analyze.hpp | 4 -
src/ast_render.cpp | 19 +-
src/codegen.cpp | 674 +---------------
src/ir.cpp | 1539 +-----------------------------------
src/ir_print.cpp | 190 +----
src/parser.cpp | 32 -
src/tokenizer.cpp | 2 -
src/tokenizer.hpp | 1 -
std/fmt.zig | 3 -
std/hash_map.zig | 2 +-
std/meta.zig | 4 +-
std/testing.zig | 1 -
test/stage1/behavior.zig | 6 +-
test/stage1/behavior/type_info.zig | 20 +-
17 files changed, 62 insertions(+), 2814 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8917809533..63ac47147d 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -466,7 +466,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_objects = link_objects;
comp.start();
- const process_build_events_handle = try async processBuildEvents(comp, color);
+ // TODO const process_build_events_handle = try async processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
@@ -578,7 +578,7 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
var zig_compiler = try ZigCompiler.init(&loop);
defer zig_compiler.deinit();
- const handle = try async findLibCAsync(&zig_compiler);
+ // TODO const handle = try async findLibCAsync(&zig_compiler);
defer cancel handle;
loop.run();
@@ -663,12 +663,12 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer loop.deinit();
var result: FmtError!void = undefined;
- const main_handle = try async asyncFmtMainChecked(
- &result,
- &loop,
- &flags,
- color,
- );
+ // TODO const main_handle = try async asyncFmtMainChecked(
+ // TODO &result,
+ // TODO &loop,
+ // TODO &flags,
+ // TODO color,
+ // TODO );
defer cancel main_handle;
loop.run();
return result;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index a6b2bc51c3..7fe035ad1c 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -70,17 +70,6 @@ struct IrExecutable {
Scope *begin_scope;
ZigList tld_list;
- IrInstruction *coro_handle;
- IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise
- IrInstruction *coro_result_ptr_field_ptr;
- IrInstruction *coro_result_field_ptr;
- IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise
- IrBasicBlock *coro_early_final;
- IrBasicBlock *coro_normal_final;
- IrBasicBlock *coro_suspend_block;
- IrBasicBlock *coro_final_cleanup_block;
- ZigVar *coro_allocator_var;
-
bool invalid;
bool is_inline;
bool is_generic_instantiation;
@@ -489,7 +478,6 @@ enum NodeType {
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
- NodeTypePromiseType,
NodeTypeEnumLiteral,
};
@@ -522,7 +510,6 @@ struct AstNodeFnProto {
AstNode *section_expr;
bool auto_err_set;
- AstNode *async_allocator_type;
};
struct AstNodeFnDef {
@@ -657,7 +644,6 @@ struct AstNodeFnCallExpr {
bool is_builtin;
bool is_async;
bool seen; // used by @compileLog
- AstNode *async_allocator;
};
struct AstNodeArrayAccessExpr {
@@ -949,10 +935,6 @@ struct AstNodeSuspend {
AstNode *block;
};
-struct AstNodePromiseType {
- AstNode *payload_type; // can be NULL
-};
-
struct AstNodeEnumLiteral {
Token *period;
Token *identifier;
@@ -1018,7 +1000,6 @@ struct AstNode {
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
- AstNodePromiseType promise_type;
AstNodeEnumLiteral enum_literal;
} data;
};
@@ -1047,7 +1028,6 @@ struct FnTypeId {
bool is_var_args;
CallingConvention cc;
uint32_t alignment;
- ZigType *async_allocator_type;
};
uint32_t fn_type_id_hash(FnTypeId*);
@@ -1241,11 +1221,6 @@ struct ZigTypeBoundFn {
ZigType *fn_type;
};
-struct ZigTypePromise {
- // null if `promise` instead of `promise->T`
- ZigType *result_type;
-};
-
struct ZigTypeVector {
// The type must be a pointer, integer, or float
ZigType *elem_type;
@@ -1276,7 +1251,6 @@ enum ZigTypeId {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1314,7 +1288,6 @@ struct ZigType {
ZigTypeUnion unionation;
ZigTypeFn fn;
ZigTypeBoundFn bound_fn;
- ZigTypePromise promise;
ZigTypeVector vector;
ZigTypeOpaque opaque;
} data;
@@ -1322,8 +1295,6 @@ struct ZigType {
// use these fields to make sure we don't duplicate type table entries for the same type
ZigType *pointer_parent[2]; // [0 - mut, 1 - const]
ZigType *optional_parent;
- ZigType *promise_parent;
- ZigType *promise_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
// The type of this is array
ConstExprValue *cached_const_name_val;
@@ -1709,20 +1680,6 @@ struct CodeGen {
LLVMValueRef trap_fn_val;
LLVMValueRef return_address_fn_val;
LLVMValueRef frame_address_fn_val;
- LLVMValueRef coro_destroy_fn_val;
- LLVMValueRef coro_id_fn_val;
- LLVMValueRef coro_alloc_fn_val;
- LLVMValueRef coro_size_fn_val;
- LLVMValueRef coro_begin_fn_val;
- LLVMValueRef coro_suspend_fn_val;
- LLVMValueRef coro_end_fn_val;
- LLVMValueRef coro_free_fn_val;
- LLVMValueRef coro_resume_fn_val;
- LLVMValueRef coro_save_fn_val;
- LLVMValueRef coro_promise_fn_val;
- LLVMValueRef coro_alloc_helper_fn_val;
- LLVMValueRef coro_frame_fn_val;
- LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef add_error_return_trace_addr_fn_val;
LLVMValueRef stacksave_fn_val;
LLVMValueRef stackrestore_fn_val;
@@ -1797,7 +1754,6 @@ struct CodeGen {
ZigType *entry_var;
ZigType *entry_global_error_set;
ZigType *entry_arg_tuple;
- ZigType *entry_promise;
ZigType *entry_enum_literal;
} builtin_types;
ZigType *align_amt_type;
@@ -1985,7 +1941,6 @@ enum ScopeId {
ScopeIdSuspend,
ScopeIdFnDef,
ScopeIdCompTime,
- ScopeIdCoroPrelude,
ScopeIdRuntime,
};
@@ -2128,12 +2083,6 @@ struct ScopeFnDef {
ZigFn *fn_entry;
};
-// This scope is created to indicate that the code in the scope
-// is auto-generated coroutine prelude stuff.
-struct ScopeCoroPrelude {
- Scope base;
-};
-
// synchronized with code in define_builtin_compile_vars
enum AtomicOrder {
AtomicOrderUnordered,
@@ -2231,7 +2180,6 @@ enum IrInstructionId {
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
- IrInstructionIdPromiseType,
IrInstructionIdSliceType,
IrInstructionIdGlobalAsm,
IrInstructionIdAsm,
@@ -2329,26 +2277,10 @@ enum IrInstructionId {
IrInstructionIdErrorReturnTrace,
IrInstructionIdErrorUnion,
IrInstructionIdCancel,
- IrInstructionIdGetImplicitAllocator,
- IrInstructionIdCoroId,
- IrInstructionIdCoroAlloc,
- IrInstructionIdCoroSize,
- IrInstructionIdCoroBegin,
- IrInstructionIdCoroAllocFail,
- IrInstructionIdCoroSuspend,
- IrInstructionIdCoroEnd,
- IrInstructionIdCoroFree,
- IrInstructionIdCoroResume,
- IrInstructionIdCoroSave,
- IrInstructionIdCoroPromise,
- IrInstructionIdCoroAllocHelper,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
- IrInstructionIdPromiseResultType,
- IrInstructionIdAwaitBookkeeping,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
- IrInstructionIdMergeErrRetTraces,
IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdErrSetCast,
IrInstructionIdToBytes,
@@ -2606,7 +2538,6 @@ struct IrInstructionCallSrc {
IrInstruction **args;
ResultLoc *result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2622,7 +2553,6 @@ struct IrInstructionCallGen {
IrInstruction **args;
IrInstruction *result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2743,12 +2673,6 @@ struct IrInstructionPtrType {
bool is_allow_zero;
};
-struct IrInstructionPromiseType {
- IrInstruction base;
-
- IrInstruction *payload_type;
-};
-
struct IrInstructionSliceType {
IrInstruction base;
@@ -3178,7 +3102,6 @@ struct IrInstructionFnProto {
IrInstruction **param_types;
IrInstruction *align_value;
IrInstruction *return_type;
- IrInstruction *async_allocator_type_value;
bool is_var_args;
};
@@ -3414,89 +3337,6 @@ struct IrInstructionCancel {
IrInstruction *target;
};
-enum ImplicitAllocatorId {
- ImplicitAllocatorIdArg,
- ImplicitAllocatorIdLocalVar,
-};
-
-struct IrInstructionGetImplicitAllocator {
- IrInstruction base;
-
- ImplicitAllocatorId id;
-};
-
-struct IrInstructionCoroId {
- IrInstruction base;
-
- IrInstruction *promise_ptr;
-};
-
-struct IrInstructionCoroAlloc {
- IrInstruction base;
-
- IrInstruction *coro_id;
-};
-
-struct IrInstructionCoroSize {
- IrInstruction base;
-};
-
-struct IrInstructionCoroBegin {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_mem_ptr;
-};
-
-struct IrInstructionCoroAllocFail {
- IrInstruction base;
-
- IrInstruction *err_val;
-};
-
-struct IrInstructionCoroSuspend {
- IrInstruction base;
-
- IrInstruction *save_point;
- IrInstruction *is_final;
-};
-
-struct IrInstructionCoroEnd {
- IrInstruction base;
-};
-
-struct IrInstructionCoroFree {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroResume {
- IrInstruction base;
-
- IrInstruction *awaiter_handle;
-};
-
-struct IrInstructionCoroSave {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroPromise {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroAllocHelper {
- IrInstruction base;
-
- IrInstruction *realloc_fn;
- IrInstruction *coro_size;
-};
-
struct IrInstructionAtomicRmw {
IrInstruction base;
@@ -3518,18 +3358,6 @@ struct IrInstructionAtomicLoad {
AtomicOrder resolved_ordering;
};
-struct IrInstructionPromiseResultType {
- IrInstruction base;
-
- IrInstruction *promise_type;
-};
-
-struct IrInstructionAwaitBookkeeping {
- IrInstruction base;
-
- IrInstruction *promise_result_type;
-};
-
struct IrInstructionSaveErrRetAddr {
IrInstruction base;
};
@@ -3540,14 +3368,6 @@ struct IrInstructionAddImplicitReturnType {
IrInstruction *value;
};
-struct IrInstructionMergeErrRetTraces {
- IrInstruction base;
-
- IrInstruction *coro_promise_ptr;
- IrInstruction *src_err_ret_trace_ptr;
- IrInstruction *dest_err_ret_trace_ptr;
-};
-
struct IrInstructionMarkErrRetTracePtr {
IrInstruction base;
@@ -3777,17 +3597,6 @@ static const size_t err_union_payload_index = 1;
// MUST BE A POWER OF TWO.
static const size_t stack_trace_ptr_count = 32;
-// these belong to the async function
-#define RETURN_ADDRESSES_FIELD_NAME "return_addresses"
-#define ERR_RET_TRACE_FIELD_NAME "err_ret_trace"
-#define RESULT_FIELD_NAME "result"
-#define ASYNC_REALLOC_FIELD_NAME "reallocFn"
-#define ASYNC_SHRINK_FIELD_NAME "shrinkFn"
-#define ATOMIC_STATE_FIELD_NAME "atomic_state"
-// these point to data belonging to the awaiter
-#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
-#define RESULT_PTR_FIELD_NAME "result_ptr"
-
#define NAMESPACE_SEP_CHAR '.'
#define NAMESPACE_SEP_STR "."
diff --git a/src/analyze.cpp b/src/analyze.cpp
index de4d64f5d6..15e12caa8d 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -188,12 +188,6 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
return &scope->base;
}
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent) {
- ScopeCoroPrelude *scope = allocate(1);
- init_scope(g, &scope->base, ScopeIdCoroPrelude, node, parent);
- return &scope->base;
-}
-
ZigType *get_scope_import(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDecls) {
@@ -254,7 +248,6 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return nullptr;
}
@@ -307,7 +300,6 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return true;
}
@@ -341,31 +333,6 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type) {
- if (result_type != nullptr && result_type->promise_parent != nullptr) {
- return result_type->promise_parent;
- } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) {
- return g->builtin_types.entry_promise;
- }
-
- ZigType *entry = new_type_table_entry(ZigTypeIdPromise);
- entry->abi_size = g->builtin_types.entry_usize->abi_size;
- entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
- entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->data.promise.result_type = result_type;
- buf_init_from_str(&entry->name, "promise");
- if (result_type != nullptr) {
- buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
- }
-
- if (result_type != nullptr) {
- result_type->promise_parent = entry;
- } else if (result_type == nullptr) {
- g->builtin_types.entry_promise = entry;
- }
- return entry;
-}
-
static const char *ptr_len_to_star_str(PtrLen ptr_len) {
switch (ptr_len) {
case PtrLenSingle:
@@ -490,42 +457,6 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) {
return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false);
}
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type) {
- if (return_type->promise_frame_parent != nullptr) {
- return return_type->promise_frame_parent;
- }
-
- ZigType *atomic_state_type = g->builtin_types.entry_usize;
- ZigType *result_ptr_type = get_pointer_to_type(g, return_type, false);
-
- ZigList field_names = {};
- field_names.append(ATOMIC_STATE_FIELD_NAME);
- field_names.append(RESULT_FIELD_NAME);
- field_names.append(RESULT_PTR_FIELD_NAME);
- if (g->have_err_ret_tracing) {
- field_names.append(ERR_RET_TRACE_PTR_FIELD_NAME);
- field_names.append(ERR_RET_TRACE_FIELD_NAME);
- field_names.append(RETURN_ADDRESSES_FIELD_NAME);
- }
-
- ZigList field_types = {};
- field_types.append(atomic_state_type);
- field_types.append(return_type);
- field_types.append(result_ptr_type);
- if (g->have_err_ret_tracing) {
- field_types.append(get_ptr_to_stack_trace_type(g));
- field_types.append(g->stack_trace_type);
- field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
- }
-
- assert(field_names.length == field_types.length);
- Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name));
- ZigType *entry = get_struct_type(g, buf_ptr(name), field_names.items, field_types.items, field_names.length);
-
- return_type->promise_frame_parent = entry;
- return entry;
-}
-
ZigType *get_optional_type(CodeGen *g, ZigType *child_type) {
if (child_type->optional_parent != nullptr) {
return child_type->optional_parent;
@@ -879,13 +810,8 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
// populate the name of the type
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- assert(fn_type_id->async_allocator_type != nullptr);
- buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
@@ -998,14 +924,8 @@ ZigType *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) {
ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
ZigType *fn_type = new_type_table_entry(ZigTypeIdFn);
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ?
- "var" : buf_ptr(&fn_type_id->async_allocator_type->name);
- buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str);
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
size_t i = 0;
for (; i < fn_type_id->next_param_index; i += 1) {
@@ -1119,7 +1039,6 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1207,7 +1126,6 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVoid:
return false;
case ZigTypeIdOpaque:
@@ -1378,7 +1296,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
@@ -1474,7 +1391,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
@@ -1487,16 +1403,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
break;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (fn_proto->async_allocator_type == nullptr) {
- return get_generic_fn_type(g, &fn_type_id);
- }
- fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type);
- if (type_is_invalid(fn_type_id.async_allocator_type)) {
- return g->builtin_types.entry_invalid;
- }
- }
-
return get_fn_type(g, &fn_type_id);
}
@@ -3039,7 +2945,6 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
- case NodeTypePromiseType:
case NodeTypeEnumLiteral:
zig_unreachable();
}
@@ -3091,7 +2996,6 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return type_entry;
}
@@ -3591,7 +3495,6 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return false;
}
@@ -3648,7 +3551,6 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdInvalid:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
zig_unreachable();
}
@@ -3658,13 +3560,11 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
- if (type->id == ZigTypeIdPromise) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
- if (type->data.maybe.child_type->id == ZigTypeIdPromise) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3691,8 +3591,6 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
- } else if (ptr_type->id == ZigTypeIdPromise) {
- return get_coro_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3704,8 +3602,6 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
- } else if (ptr_type->id == ZigTypeIdPromise) {
- return true;
} else {
zig_unreachable();
}
@@ -4102,7 +3998,6 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdFn:
case ZigTypeIdEnum:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return false;
case ZigTypeIdArray:
@@ -4142,7 +4037,6 @@ uint32_t fn_type_id_hash(FnTypeId *id) {
result += ((uint32_t)(id->cc)) * (uint32_t)3349388391;
result += id->is_var_args ? (uint32_t)1931444534 : 0;
result += hash_ptr(id->return_type);
- result += hash_ptr(id->async_allocator_type);
result += id->alignment * 0xd3b3f3e2;
for (size_t i = 0; i < id->param_count; i += 1) {
FnTypeParamInfo *info = &id->param_info[i];
@@ -4157,8 +4051,7 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
a->return_type != b->return_type ||
a->is_var_args != b->is_var_args ||
a->param_count != b->param_count ||
- a->alignment != b->alignment ||
- a->async_allocator_type != b->async_allocator_type)
+ a->alignment != b->alignment)
{
return false;
}
@@ -4320,9 +4213,6 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case ZigTypeIdPointer:
return hash_const_val_ptr(const_val);
- case ZigTypeIdPromise:
- // TODO better hashing algorithm
- return 223048345;
case ZigTypeIdUndefined:
return 162837799;
case ZigTypeIdNull:
@@ -4418,7 +4308,6 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
return false;
@@ -4488,7 +4377,6 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdPointer:
@@ -4623,7 +4511,6 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBool:
case ZigTypeIdFloat:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
@@ -4712,7 +4599,6 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5278,7 +5164,6 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
zig_unreachable();
}
zig_unreachable();
@@ -5611,8 +5496,6 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "(args value)");
return;
}
- case ZigTypeIdPromise:
- zig_unreachable();
}
zig_unreachable();
}
@@ -5659,7 +5542,6 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5701,7 +5583,6 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
@@ -5874,7 +5755,6 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -5938,12 +5818,10 @@ size_t type_id_index(ZigType *entry) {
return 20;
case ZigTypeIdOpaque:
return 21;
- case ZigTypeIdPromise:
- return 22;
case ZigTypeIdVector:
- return 23;
+ return 22;
case ZigTypeIdEnumLiteral:
- return 24;
+ return 23;
}
zig_unreachable();
}
@@ -5998,8 +5876,6 @@ const char *type_id_name(ZigTypeId id) {
return "ArgTuple";
case ZigTypeIdOpaque:
return "Opaque";
- case ZigTypeIdPromise:
- return "Promise";
case ZigTypeIdVector:
return "Vector";
}
@@ -6066,13 +5942,6 @@ bool type_is_global_error_set(ZigType *err_set_type) {
return err_set_type->data.error_set.err_count == UINT32_MAX;
}
-uint32_t get_coro_frame_align_bytes(CodeGen *g) {
- uint32_t a = g->pointer_size_bytes * 2;
- // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
- if (a < 8) a = 8;
- return a;
-}
-
bool type_can_fail(ZigType *type_entry) {
return type_entry->id == ZigTypeIdErrorUnion || type_entry->id == ZigTypeIdErrorSet;
}
@@ -7105,19 +6974,13 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- {
- // async allocator param
- ZigType *gen_type = fn_type_id->async_allocator_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
-
- {
- // error code pointer
- ZigType *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
+ // coroutine frame pointer
+ // TODO if we can make this typed a little more it will be better for
+ // debug symbols.
+ // TODO do we need to make this aligned more?
+ ZigType *void_star = get_pointer_to_type(g, g->builtin_types.entry_c_void, false);
+ gen_param_types.append(get_llvm_type(g, void_star));
+ param_di_types.append(get_llvm_di_type(g, void_star));
}
fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
@@ -7224,13 +7087,6 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
return resolve_llvm_types_union(g, type, wanted_resolve_status);
case ZigTypeIdPointer:
return resolve_llvm_types_pointer(g, type);
- case ZigTypeIdPromise: {
- if (type->llvm_di_type != nullptr) return;
- ZigType *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
- type->llvm_type = get_llvm_type(g, u8_ptr_type);
- type->llvm_di_type = get_llvm_di_type(g, u8_ptr_type);
- return;
- }
case ZigTypeIdInt:
return resolve_llvm_types_integer(g, type);
case ZigTypeIdOptional:
diff --git a/src/analyze.hpp b/src/analyze.hpp
index b9e9f2df7d..fbbdece8ba 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -39,8 +39,6 @@ ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry);
ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name);
ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
ZigType *field_types[], size_t field_count);
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type);
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type);
ZigType *get_test_fn_type(CodeGen *g);
bool handle_is_ptr(ZigType *type_entry);
@@ -117,7 +115,6 @@ ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry);
Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent);
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent);
Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstruction *is_comptime);
void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str);
@@ -204,7 +201,6 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry);
-uint32_t get_coro_frame_align_bytes(CodeGen *g);
bool fn_type_can_fail(FnTypeId *fn_type_id);
bool type_can_fail(ZigType *type_entry);
bool fn_eval_cacheable(Scope *scope, ZigType *return_type);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index fe131ab65f..d97f58fdec 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -257,8 +257,6 @@ static const char *node_type_str(NodeType node_type) {
return "AwaitExpr";
case NodeTypeSuspend:
return "Suspend";
- case NodeTypePromiseType:
- return "PromiseType";
case NodeTypePointerType:
return "PointerType";
case NodeTypeEnumLiteral:
@@ -692,13 +690,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "@");
}
if (node->data.fn_call_expr.is_async) {
- fprintf(ar->f, "async");
- if (node->data.fn_call_expr.async_allocator != nullptr) {
- fprintf(ar->f, "<");
- render_node_extra(ar, node->data.fn_call_expr.async_allocator, true);
- fprintf(ar->f, ">");
- }
- fprintf(ar->f, " ");
+ fprintf(ar->f, "async ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
@@ -855,15 +847,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_ungrouped(ar, node->data.inferred_array_type.child_type);
break;
}
- case NodeTypePromiseType:
- {
- fprintf(ar->f, "promise");
- if (node->data.promise_type.payload_type != nullptr) {
- fprintf(ar->f, "->");
- render_node_grouped(ar, node->data.promise_type.payload_type);
- }
- break;
- }
case NodeTypeErrorType:
fprintf(ar->f, "anyerror");
break;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 188c5ccc8d..4cc99b39a8 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -721,7 +721,6 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
case ScopeIdLoop:
case ScopeIdSuspend:
case ScopeIdCompTime:
- case ScopeIdCoroPrelude:
case ScopeIdRuntime:
return get_di_scope(g, scope->parent);
}
@@ -1083,177 +1082,6 @@ static LLVMValueRef get_write_register_fn_val(CodeGen *g) {
return g->write_register_fn_val;
}
-static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) {
- if (g->coro_destroy_fn_val)
- return g->coro_destroy_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.destroy");
- g->coro_destroy_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_destroy_fn_val));
-
- return g->coro_destroy_fn_val;
-}
-
-static LLVMValueRef get_coro_id_fn_val(CodeGen *g) {
- if (g->coro_id_fn_val)
- return g->coro_id_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMInt32Type(),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 4, false);
- Buf *name = buf_sprintf("llvm.coro.id");
- g->coro_id_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_id_fn_val));
-
- return g->coro_id_fn_val;
-}
-
-static LLVMValueRef get_coro_alloc_fn_val(CodeGen *g) {
- if (g->coro_alloc_fn_val)
- return g->coro_alloc_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.alloc");
- g->coro_alloc_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_alloc_fn_val));
-
- return g->coro_alloc_fn_val;
-}
-
-static LLVMValueRef get_coro_size_fn_val(CodeGen *g) {
- if (g->coro_size_fn_val)
- return g->coro_size_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType(g->builtin_types.entry_usize->llvm_type, nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.size.i%d", g->pointer_size_bytes * 8);
- g->coro_size_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_size_fn_val));
-
- return g->coro_size_fn_val;
-}
-
-static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) {
- if (g->coro_begin_fn_val)
- return g->coro_begin_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.begin");
- g->coro_begin_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_begin_fn_val));
-
- return g->coro_begin_fn_val;
-}
-
-static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) {
- if (g->coro_suspend_fn_val)
- return g->coro_suspend_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt8Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.suspend");
- g->coro_suspend_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_suspend_fn_val));
-
- return g->coro_suspend_fn_val;
-}
-
-static LLVMValueRef get_coro_end_fn_val(CodeGen *g) {
- if (g->coro_end_fn_val)
- return g->coro_end_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.end");
- g->coro_end_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_end_fn_val));
-
- return g->coro_end_fn_val;
-}
-
-static LLVMValueRef get_coro_free_fn_val(CodeGen *g) {
- if (g->coro_free_fn_val)
- return g->coro_free_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.free");
- g->coro_free_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_free_fn_val));
-
- return g->coro_free_fn_val;
-}
-
-static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) {
- if (g->coro_resume_fn_val)
- return g->coro_resume_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.resume");
- g->coro_resume_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_resume_fn_val));
-
- return g->coro_resume_fn_val;
-}
-
-static LLVMValueRef get_coro_save_fn_val(CodeGen *g) {
- if (g->coro_save_fn_val)
- return g->coro_save_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.save");
- g->coro_save_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_save_fn_val));
-
- return g->coro_save_fn_val;
-}
-
-static LLVMValueRef get_coro_promise_fn_val(CodeGen *g) {
- if (g->coro_promise_fn_val)
- return g->coro_promise_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt32Type(),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 3, false);
- Buf *name = buf_sprintf("llvm.coro.promise");
- g->coro_promise_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_promise_fn_val));
-
- return g->coro_promise_fn_val;
-}
-
static LLVMValueRef get_return_address_fn_val(CodeGen *g) {
if (g->return_address_fn_val)
return g->return_address_fn_val;
@@ -1346,140 +1174,6 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return fn_val;
}
-static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
- if (g->merge_err_ret_traces_fn_val)
- return g->merge_err_ret_traces_fn_val;
-
- assert(g->stack_trace_type != nullptr);
-
- LLVMTypeRef param_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- };
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- add_uwtable_attr(g, fn_val);
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
- if (g->build_mode == BuildModeDebug) {
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
- }
-
- // this is above the ZigLLVMClearCurrentDebugLocation
- LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g);
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
-
- // var frame_index: usize = undefined;
- // var frames_left: usize = undefined;
- // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
- // frame_index = 0;
- // frames_left = src_stack_trace.index;
- // if (frames_left == 0) return;
- // } else {
- // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len;
- // frames_left = src_stack_trace.instruction_addresses.len;
- // }
- // while (true) {
- // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]);
- // frames_left -= 1;
- // if (frames_left == 0) return;
- // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
- // }
- LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
-
- LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
- LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
-
- LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
- LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
-
- size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
- size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
- LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_index_field_index, "");
- LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_addresses_field_index, "");
- ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry;
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, "");
- size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index;
- LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, "");
- LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, "");
- LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, "");
- LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, "");
- LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, "");
- LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap");
- LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap");
- LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop");
- LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block);
-
- LLVMPositionBuilderAtEnd(g->builder, no_wrap_block);
- LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
- LLVMBuildStore(g->builder, usize_zero, frame_index_ptr);
- LLVMBuildStore(g->builder, src_index_val, frames_left_ptr);
- LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, "");
- LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block);
- LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
- LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, "");
- LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, mod_len, frame_index_ptr);
- LLVMBuildStore(g->builder, src_len_val, frames_left_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, loop_block);
- LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
- LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
- LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
- ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
- LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
- LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
- LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
- LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue");
- LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block);
-
- LLVMPositionBuilderAtEnd(g->builder, return_block);
- LLVMBuildRetVoid(g->builder);
-
- LLVMPositionBuilderAtEnd(g->builder, continue_block);
- LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr);
- LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, "");
- LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->merge_err_ret_traces_fn_val = fn_val;
- return fn_val;
-
-}
-
static LLVMValueRef get_return_err_fn(CodeGen *g) {
if (g->return_err_fn != nullptr)
return g->return_err_fn;
@@ -1667,24 +1361,12 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
return fn_val;
}
-static bool is_coro_prelude_scope(Scope *scope) {
- while (scope != nullptr) {
- if (scope->id == ScopeIdCoroPrelude) {
- return true;
- } else if (scope->id == ScopeIdFnDef) {
- break;
- }
- scope = scope->parent;
- }
- return false;
-}
-
static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (!g->have_err_ret_tracing) {
return nullptr;
}
if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return is_coro_prelude_scope(scope) ? g->cur_err_ret_trace_val_arg : g->cur_err_ret_trace_val_stack;
+ return g->cur_err_ret_trace_val_stack;
}
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
@@ -3697,19 +3379,6 @@ static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) {
fn_type_id->cc == CallingConventionAsync);
}
-static size_t get_async_allocator_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return get_prefix_arg_err_ret_stack(g, fn_type_id) ? 1 : 0;
-}
-
-static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return 1 + get_async_allocator_arg_index(g, fn_type_id);
-}
-
-
static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, "");
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, "");
@@ -3778,10 +3447,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
}
if (instruction->is_async) {
- gen_param_values.append(ir_llvm_value(g, instruction->async_allocator));
-
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, "");
- gen_param_values.append(err_val_ptr);
+ zig_panic("TODO codegen async call");
}
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdCall;
@@ -4471,10 +4137,6 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == ZigTypeIdOptional &&
- target_type->data.maybe.child_type->id == ZigTypeIdPromise)
- {
- zig_panic("TODO audit this function");
} else if (target_type->id == ZigTypeIdStruct && target_type->data.structure.is_slice) {
ZigType *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry;
align_bytes = get_ptr_align(g, slice_ptr_type);
@@ -4519,17 +4181,7 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
}
static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
- LLVMValueRef target_handle = ir_llvm_value(g, instruction->target);
- LLVMBuildCall(g->builder, get_coro_destroy_fn_val(g), &target_handle, 1, "");
- return nullptr;
-}
-
-static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable,
- IrInstructionGetImplicitAllocator *instruction)
-{
- assert(instruction->id == ImplicitAllocatorIdArg);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- return LLVMGetParam(g->cur_fn_val, allocator_arg_index);
+ zig_panic("TODO cancel");
}
static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) {
@@ -4840,24 +4492,10 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
}
-static LLVMValueRef get_handle_fn_val(CodeGen *g) {
- if (g->coro_frame_fn_val)
- return g->coro_frame_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0)
- , nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.frame");
- g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_frame_fn_val));
-
- return g->coro_frame_fn_val;
-}
-
static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable,
IrInstructionHandle *instruction)
{
- LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->builtin_types.entry_promise));
- return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, "");
+ zig_panic("TODO @handle() codegen");
}
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
@@ -5123,248 +4761,6 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst
return nullptr;
}
-static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) {
- LLVMValueRef promise_ptr = ir_llvm_value(g, instruction->promise_ptr);
- LLVMValueRef align_val = LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false);
- LLVMValueRef null = LLVMConstIntToPtr(LLVMConstNull(g->builtin_types.entry_usize->llvm_type),
- LLVMPointerType(LLVMInt8Type(), 0));
- LLVMValueRef params[] = {
- align_val,
- promise_ptr,
- null,
- null,
- };
- return LLVMBuildCall(g->builder, get_coro_id_fn_val(g), params, 4, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) {
- LLVMValueRef token = ir_llvm_value(g, instruction->coro_id);
- return LLVMBuildCall(g->builder, get_coro_alloc_fn_val(g), &token, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) {
- return LLVMBuildCall(g->builder, get_coro_size_fn_val(g), nullptr, 0, "");
-}
-
-static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_mem_ptr = ir_llvm_value(g, instruction->coro_mem_ptr);
- LLVMValueRef params[] = {
- coro_id,
- coro_mem_ptr,
- };
- return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocFail *instruction)
-{
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index);
- LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val);
- LLVMBuildStore(g->builder, err_code, err_code_ptr_val);
-
- LLVMValueRef return_value;
- if (ir_want_runtime_safety(g, &instruction->base)) {
- return_value = LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0));
- } else {
- return_value = LLVMGetUndef(LLVMPointerType(LLVMInt8Type(), 0));
- }
- LLVMBuildRet(g->builder, return_value);
- return nullptr;
-}
-
-static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) {
- LLVMValueRef save_point;
- if (instruction->save_point == nullptr) {
- save_point = LLVMConstNull(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()));
- } else {
- save_point = ir_llvm_value(g, instruction->save_point);
- }
- LLVMValueRef is_final = ir_llvm_value(g, instruction->is_final);
- LLVMValueRef params[] = {
- save_point,
- is_final,
- };
- return LLVMBuildCall(g->builder, get_coro_suspend_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) {
- LLVMValueRef params[] = {
- LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)),
- LLVMConstNull(LLVMInt1Type()),
- };
- return LLVMBuildCall(g->builder, get_coro_end_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_id,
- coro_handle,
- };
- return LLVMBuildCall(g->builder, get_coro_free_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) {
- LLVMValueRef awaiter_handle = ir_llvm_value(g, instruction->awaiter_handle);
- return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, IrInstructionCoroSave *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, IrInstructionCoroPromise *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_handle,
- LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false),
- LLVMConstNull(LLVMInt1Type()),
- };
- LLVMValueRef uncasted_result = LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, "");
- return LLVMBuildBitCast(g->builder, uncasted_result, get_llvm_type(g, instruction->base.value.type), "");
-}
-
-static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, ZigType *fn_type) {
- if (g->coro_alloc_helper_fn_val != nullptr)
- return g->coro_alloc_helper_fn_val;
-
- assert(fn_type->id == ZigTypeIdFn);
-
- ZigType *ptr_to_err_code_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
-
- LLVMTypeRef alloc_raw_fn_type_ref = LLVMGetElementType(alloc_fn_type_ref);
- LLVMTypeRef *alloc_fn_arg_types = allocate(LLVMCountParamTypes(alloc_raw_fn_type_ref));
- LLVMGetParamTypes(alloc_raw_fn_type_ref, alloc_fn_arg_types);
-
- ZigList arg_types = {};
- arg_types.append(alloc_fn_type_ref);
- if (g->have_err_ret_tracing) {
- arg_types.append(alloc_fn_arg_types[1]);
- }
- arg_types.append(alloc_fn_arg_types[g->have_err_ret_tracing ? 2 : 1]);
- arg_types.append(get_llvm_type(g, ptr_to_err_code_type));
- arg_types.append(g->builtin_types.entry_usize->llvm_type);
-
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0),
- arg_types.items, arg_types.length, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_coro_alloc_helper"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
-
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- ZigFn *prev_cur_fn = g->cur_fn;
- LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
- g->cur_fn = nullptr;
- g->cur_fn_val = fn_val;
-
- LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), "");
-
- size_t next_arg = 0;
- LLVMValueRef realloc_fn_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
-
- LLVMValueRef stack_trace_val;
- if (g->have_err_ret_tracing) {
- stack_trace_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- }
-
- LLVMValueRef allocator_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef err_code_ptr = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->llvm_type,
- get_coro_frame_align_bytes(g), false);
-
- ConstExprValue *zero_array = create_const_str_lit(g, buf_create_from_str(""));
- ConstExprValue *undef_slice_zero = create_const_slice(g, zero_array, 0, 0, false);
- render_const_val(g, undef_slice_zero, "");
- render_const_val_global(g, undef_slice_zero, "");
-
- ZigList args = {};
- args.append(sret_ptr);
- if (g->have_err_ret_tracing) {
- args.append(stack_trace_val);
- }
- args.append(allocator_val);
- args.append(undef_slice_zero->global_refs->llvm_global);
- args.append(LLVMGetUndef(g->builtin_types.entry_u29->llvm_type));
- args.append(coro_size);
- args.append(alignment_val);
- LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, realloc_fn_val, args.items, args.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- set_call_instr_sret(g, call_instruction);
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, "");
- LLVMValueRef err_val = LLVMBuildLoad(g->builder, err_val_ptr, "");
- LLVMBuildStore(g->builder, err_val, err_code_ptr);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, LLVMConstNull(LLVMTypeOf(err_val)), "");
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(fn_val, "AllocOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(fn_val, "AllocFail");
- LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
-
- LLVMPositionBuilderAtEnd(g->builder, ok_block);
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, "");
- ZigType *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
- PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0, false);
- ZigType *slice_type = get_slice_type(g, u8_ptr_type);
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, "");
- LLVMValueRef ptr_val = LLVMBuildLoad(g->builder, ptr_field_ptr, "");
- LLVMBuildRet(g->builder, ptr_val);
-
- LLVMPositionBuilderAtEnd(g->builder, fail_block);
- LLVMBuildRet(g->builder, LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)));
-
- g->cur_fn = prev_cur_fn;
- g->cur_fn_val = prev_cur_fn_val;
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->coro_alloc_helper_fn_val = fn_val;
- return fn_val;
-}
-
-static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocHelper *instruction)
-{
- LLVMValueRef realloc_fn = ir_llvm_value(g, instruction->realloc_fn);
- LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size);
- LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(realloc_fn), instruction->realloc_fn->value.type);
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
-
- ZigList params = {};
- params.append(realloc_fn);
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn);
- if (err_ret_trace_arg_index != UINT32_MAX) {
- params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index));
- }
- params.append(LLVMGetParam(g->cur_fn_val, allocator_arg_index));
- params.append(LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index));
- params.append(coro_size);
-
- return ZigLLVMBuildCall(g->builder, fn_val, params.items, params.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
-}
-
static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
IrInstructionAtomicRmw *instruction)
{
@@ -5402,19 +4798,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
return load_inst;
}
-static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable,
- IrInstructionMergeErrRetTraces *instruction)
-{
- assert(g->have_err_ret_tracing);
-
- LLVMValueRef src_trace_ptr = ir_llvm_value(g, instruction->src_err_ret_trace_ptr);
- LLVMValueRef dest_trace_ptr = ir_llvm_value(g, instruction->dest_err_ret_trace_ptr);
-
- LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
- ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- return nullptr;
-}
-
static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable,
IrInstructionMarkErrRetTracePtr *instruction)
{
@@ -5559,7 +4942,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdSwitchTarget:
@@ -5599,8 +4981,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTagType:
case IrInstructionIdExport:
case IrInstructionIdErrorUnion:
- case IrInstructionIdPromiseResultType:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdIntCast:
case IrInstructionIdFloatCast:
@@ -5757,40 +5137,12 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdCancel:
return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroId:
- return ir_render_coro_id(g, executable, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_render_coro_alloc(g, executable, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_render_coro_suspend(g, executable, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_render_coro_end(g, executable, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_render_coro_promise(g, executable, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction);
case IrInstructionIdMarkErrRetTracePtr:
return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
@@ -6008,7 +5360,6 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
case ZigTypeIdPointer:
case ZigTypeIdFn:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
LLVMValueRef ptr_size_int_val = LLVMConstPtrToInt(ptr_val, g->builtin_types.entry_usize->llvm_type);
@@ -6591,7 +5942,6 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
zig_unreachable();
}
@@ -7294,13 +6644,6 @@ static void define_builtin_types(CodeGen *g) {
g->primitive_type_table.put(&entry->name, entry);
}
- {
- ZigType *entry = get_promise_type(g, nullptr);
- g->primitive_type_table.put(&entry->name, entry);
- entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
- entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->abi_size = g->builtin_types.entry_usize->abi_size;
- }
}
@@ -7729,7 +7072,6 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" BoundFn: Fn,\n"
" ArgTuple: void,\n"
" Opaque: void,\n"
- " Promise: Promise,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -7842,14 +7184,9 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" is_generic: bool,\n"
" is_var_args: bool,\n"
" return_type: ?type,\n"
- " async_allocator_type: ?type,\n"
" args: []FnArg,\n"
" };\n"
"\n"
- " pub const Promise = struct {\n"
- " child: ?type,\n"
- " };\n"
- "\n"
" pub const Vector = struct {\n"
" len: comptime_int,\n"
" child: type,\n"
@@ -8998,7 +8335,6 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdArgTuple:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
- case ZigTypeIdPromise:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -9182,7 +8518,6 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
zig_unreachable();
}
}
@@ -9349,7 +8684,6 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdArgTuple:
case ZigTypeIdOptional:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
zig_unreachable();
case ZigTypeIdEnum:
diff --git a/src/ir.cpp b/src/ir.cpp
index be7a8e2e51..f23fe1b7d0 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -99,7 +99,6 @@ struct ConstCastOnly {
ConstCastErrUnionErrSetMismatch *error_union_error_set;
ConstCastTypeMismatch *type_mismatch;
ConstCastOnly *return_type;
- ConstCastOnly *async_allocator_type;
ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
@@ -318,7 +317,6 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
return false;
}
@@ -564,10 +562,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) {
return IrInstructionIdArrayType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseType *) {
- return IrInstructionIdPromiseType;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) {
return IrInstructionIdSliceType;
}
@@ -964,58 +958,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) {
return IrInstructionIdCancel;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAllocator *) {
- return IrInstructionIdGetImplicitAllocator;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroId *) {
- return IrInstructionIdCoroId;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAlloc *) {
- return IrInstructionIdCoroAlloc;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSize *) {
- return IrInstructionIdCoroSize;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) {
- return IrInstructionIdCoroBegin;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) {
- return IrInstructionIdCoroAllocFail;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSuspend *) {
- return IrInstructionIdCoroSuspend;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroEnd *) {
- return IrInstructionIdCoroEnd;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroFree *) {
- return IrInstructionIdCoroFree;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
- return IrInstructionIdCoroResume;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) {
- return IrInstructionIdCoroSave;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroPromise *) {
- return IrInstructionIdCoroPromise;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) {
- return IrInstructionIdCoroAllocHelper;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
@@ -1024,14 +966,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
return IrInstructionIdAtomicLoad;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) {
- return IrInstructionIdPromiseResultType;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitBookkeeping *) {
- return IrInstructionIdAwaitBookkeeping;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
return IrInstructionIdSaveErrRetAddr;
}
@@ -1040,10 +974,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur
return IrInstructionIdAddImplicitReturnType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMergeErrRetTraces *) {
- return IrInstructionIdMergeErrRetTraces;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) {
return IrInstructionIdMarkErrRetTracePtr;
}
@@ -1213,14 +1143,6 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode
return &const_instruction->base;
}
-static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) {
- IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node);
- const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8;
- const_instruction->base.value.special = ConstValSpecialStatic;
- bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value);
- return &const_instruction->base;
-}
-
static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigType *type_entry)
{
@@ -1428,7 +1350,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator,
+ bool is_comptime, FnInline fn_inline, bool is_async,
IrInstruction *new_stack, ResultLoc *result_loc)
{
IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node);
@@ -1439,14 +1361,12 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], irb->current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, irb->current_basic_block);
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
@@ -1454,7 +1374,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *new_stack,
+ FnInline fn_inline, bool is_async, IrInstruction *new_stack,
IrInstruction *result_loc, ZigType *return_type)
{
IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb,
@@ -1466,14 +1386,12 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, ira->new_irb.current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], ira->new_irb.current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, ira->new_irb.current_basic_block);
if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
@@ -1753,17 +1671,6 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *payload_type)
-{
- IrInstructionPromiseType *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->payload_type = payload_type;
-
- if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero)
{
@@ -2595,13 +2502,12 @@ static IrInstruction *ir_build_unwrap_err_payload(IrBuilder *irb, Scope *scope,
static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction **param_types, IrInstruction *align_value, IrInstruction *return_type,
- IrInstruction *async_allocator_type_value, bool is_var_args)
+ bool is_var_args)
{
IrInstructionFnProto *instruction = ir_build_instruction(irb, scope, source_node);
instruction->param_types = param_types;
instruction->align_value = align_value;
instruction->return_type = return_type;
- instruction->async_allocator_type_value = async_allocator_type_value;
instruction->is_var_args = is_var_args;
assert(source_node->type == NodeTypeFnProto);
@@ -2611,7 +2517,6 @@ static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *s
if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block);
}
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
- if (async_allocator_type_value != nullptr) ir_ref_instruction(async_allocator_type_value, irb->current_basic_block);
ir_ref_instruction(return_type, irb->current_basic_block);
return &instruction->base;
@@ -3055,149 +2960,6 @@ static IrInstruction *ir_build_error_union(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *target)
-{
- IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->target = target;
-
- ir_ref_instruction(target, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ImplicitAllocatorId id)
-{
- IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->id = id;
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_ptr) {
- IrInstructionCoroId *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_ptr = promise_ptr;
-
- ir_ref_instruction(promise_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id) {
- IrInstructionCoroAlloc *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_size(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroSize *instruction = ir_build_instruction(irb, scope, source_node);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id, IrInstruction *coro_mem_ptr) {
- IrInstructionCoroBegin *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_mem_ptr = coro_mem_ptr;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_mem_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_val) {
- IrInstructionCoroAllocFail *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
- instruction->base.value.special = ConstValSpecialStatic;
- instruction->err_val = err_val;
-
- ir_ref_instruction(err_val, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_suspend(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *save_point, IrInstruction *is_final)
-{
- IrInstructionCoroSuspend *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->save_point = save_point;
- instruction->is_final = is_final;
-
- if (save_point != nullptr) ir_ref_instruction(save_point, irb->current_basic_block);
- ir_ref_instruction(is_final, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_end(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroEnd *instruction = ir_build_instruction(irb, scope, source_node);
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_free(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_id, IrInstruction *coro_handle)
-{
- IrInstructionCoroFree *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *awaiter_handle)
-{
- IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->awaiter_handle = awaiter_handle;
-
- ir_ref_instruction(awaiter_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroSave *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroPromise *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *realloc_fn, IrInstruction *coro_size)
-{
- IrInstructionCoroAllocHelper *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->realloc_fn = realloc_fn;
- instruction->coro_size = coro_size;
-
- ir_ref_instruction(realloc_fn, irb->current_basic_block);
- ir_ref_instruction(coro_size, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand,
IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering)
@@ -3237,28 +2999,6 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_type)
-{
- IrInstructionPromiseResultType *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_type = promise_type;
-
- ir_ref_instruction(promise_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_await_bookkeeping(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_result_type)
-{
- IrInstructionAwaitBookkeeping *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->promise_result_type = promise_result_type;
-
- ir_ref_instruction(promise_result_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionSaveErrRetAddr *instruction = ir_build_instruction(irb, scope, source_node);
return &instruction->base;
@@ -3275,21 +3015,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s
return &instruction->base;
}
-static IrInstruction *ir_build_merge_err_ret_traces(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_promise_ptr, IrInstruction *src_err_ret_trace_ptr, IrInstruction *dest_err_ret_trace_ptr)
-{
- IrInstructionMergeErrRetTraces *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->coro_promise_ptr = coro_promise_ptr;
- instruction->src_err_ret_trace_ptr = src_err_ret_trace_ptr;
- instruction->dest_err_ret_trace_ptr = dest_err_ret_trace_ptr;
-
- ir_ref_instruction(coro_promise_ptr, irb->current_basic_block);
- ir_ref_instruction(src_err_ret_trace_ptr, irb->current_basic_block);
- ir_ref_instruction(dest_err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) {
IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->err_ret_trace_ptr = err_ret_trace_ptr;
@@ -3488,7 +3213,6 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3544,7 +3268,6 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3563,18 +3286,6 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock *
ir_set_cursor_at_end(irb, basic_block);
}
-static ScopeSuspend *get_scope_suspend(Scope *scope) {
- while (scope) {
- if (scope->id == ScopeIdSuspend)
- return (ScopeSuspend *)scope;
- if (scope->id == ScopeIdFnDef)
- return nullptr;
-
- scope = scope->parent;
- }
- return nullptr;
-}
-
static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDeferExpr)
@@ -3604,47 +3315,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
return return_inst;
}
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
- IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter");
- IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled");
-
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
-
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
- IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
- ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, check_canceled_block);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime);
+ zig_panic("TODO async return");
}
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
@@ -5386,7 +5057,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- fn_inline, false, nullptr, nullptr, result_loc);
+ fn_inline, false, nullptr, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdNewStackCall:
@@ -5417,7 +5088,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, false, nullptr, new_stack, result_loc);
+ FnInlineAuto, false, new_stack, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdTypeId:
@@ -5722,17 +5393,12 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
}
bool is_async = node->data.fn_call_expr.is_async;
- IrInstruction *async_allocator = nullptr;
if (is_async) {
- if (node->data.fn_call_expr.async_allocator) {
- async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope);
- if (async_allocator == irb->codegen->invalid_instruction)
- return async_allocator;
- }
+ zig_panic("TODO async fn call");
}
IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto,
- is_async, async_allocator, nullptr, result_loc);
+ is_async, nullptr, result_loc);
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}
@@ -6751,22 +6417,6 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n
}
}
-static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypePromiseType);
-
- AstNode *payload_type_node = node->data.promise_type.payload_type;
- IrInstruction *payload_type_value = nullptr;
-
- if (payload_type_node != nullptr) {
- payload_type_value = ir_gen_node(irb, payload_type_node, scope);
- if (payload_type_value == irb->codegen->invalid_instruction)
- return payload_type_value;
-
- }
-
- return ir_build_promise_type(irb, scope, node, payload_type_value);
-}
-
static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeUndefinedLiteral);
return ir_build_const_undefined(irb, scope, node);
@@ -7969,87 +7619,7 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
//return_type = nullptr;
}
- IrInstruction *async_allocator_type_value = nullptr;
- if (node->data.fn_proto.async_allocator_type != nullptr) {
- async_allocator_type_value = ir_gen_node(irb, node->data.fn_proto.async_allocator_type, parent_scope);
- if (async_allocator_type_value == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
- }
-
- return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type,
- async_allocator_type_value, is_var_args);
-}
-
-static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
- IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // set the is_canceled bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
- ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, post_return_block);
- if (cancel_awaited) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
- if (cancel_awaited) {
- if (cancel_non_suspended) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
- }
- } else {
- ir_build_br(irb, scope, node, done_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
+ return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args);
}
static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -8059,57 +7629,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
-}
-
-static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // clear the is_suspended bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr,
- AtomicRmwOp_and, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_coro_resume(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
+ zig_panic("TODO ir_gen_cancel");
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -8119,7 +7639,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_gen_resume_target(irb, scope, node, target_inst);
+ zig_panic("TODO ir_gen_resume");
}
static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -8129,298 +7649,13 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
- if (!fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
- return irb->codegen->invalid_instruction;
- }
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("await in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression"));
- scope_defer_expr->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
-
- Scope *outer_scope = irb->exec->begin_scope;
-
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst);
- Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
-
- if (irb->codegen->have_err_ret_tracing) {
- IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
- }
-
- IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
- IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
- IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
- IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
- IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock");
- IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended");
- IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended");
- IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend");
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *undef = ir_build_const_undefined(irb, scope, node);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- ZigVar *result_var = ir_create_var(irb, node, scope, nullptr,
- false, false, true, const_bool_false);
- IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
- IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
- ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
- IrInstruction *undef_promise_result = ir_build_implicit_cast(irb, scope, node, promise_result_type, undef, nullptr);
- build_decl_var_and_init(irb, scope, node, result_var, undef_promise_result, "result", const_bool_false);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
- ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
- IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
-
- IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
- IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, already_awaited_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to,
- // because we're about to destroy the memory. So we store it into our result variable.
- IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr);
- ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
-
- ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
- IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false);
- IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, my_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block);
- IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false);
- IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_suspend_block);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
-
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = destroy_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
- 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, destroy_block);
- ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
- IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false);
- IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
- IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
- ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
- ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false);
- ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, merge_block);
- return ir_build_load_ptr(irb, scope, node, my_result_var_ptr);
+ zig_panic("TODO ir_gen_await_expr");
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
assert(node->type == NodeTypeSuspend);
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
- if (!fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
- return irb->codegen->invalid_instruction;
- }
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("suspend in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
- add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
- scope_defer_expr->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
- ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
- if (existing_suspend_scope) {
- if (!existing_suspend_scope->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block"));
- add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here"));
- existing_suspend_scope->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
-
- Scope *outer_scope = irb->exec->begin_scope;
-
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
- IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
- IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
- IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
-
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, canceled_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrBasicBlock *post_canceled_block = irb->current_basic_block;
- ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
- ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
- IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block;
- ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, parent_scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *suspend_code;
- if (node->data.suspend.block == nullptr) {
- suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false);
- } else {
- Scope *child_scope;
- ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
- suspend_scope->resume_block = resume_block;
- child_scope = &suspend_scope->base;
- IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle);
- ir_gen_node(irb, node->data.suspend.block, child_scope);
- suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false));
- }
-
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0));
- cases[0].block = resume_block;
- cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1));
- cases[1].block = canceled_block;
- IrInstructionSwitchBr *switch_br = ir_build_switch_br(irb, parent_scope, node, suspend_code,
- irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
- ir_mark_gen(&switch_br->base);
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrBasicBlock **incoming_blocks = allocate(2);
- IrInstruction **incoming_values = allocate(2);
- incoming_blocks[0] = post_canceled_block;
- incoming_values[0] = const_bool_true;
- incoming_blocks[1] = post_cancel_awaiter_block;
- incoming_values[1] = const_bool_false;
- IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values,
- nullptr);
- ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- return ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
+ zig_panic("TODO ir_gen_suspend");
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -8512,8 +7747,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
- case NodeTypePromiseType:
- return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval, result_loc);
case NodeTypeStringLiteral:
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
case NodeTypeUndefinedLiteral:
@@ -8624,105 +7857,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
ZigFn *fn_entry = exec_fn_entry(irb->exec);
bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- IrInstruction *coro_id;
- IrInstruction *u8_ptr_type;
- IrInstruction *const_bool_false;
- IrInstruction *coro_promise_ptr;
- IrInstruction *err_ret_trace_ptr;
- ZigType *return_type;
- Buf *result_ptr_field_name;
- ZigVar *coro_size_var;
if (is_async) {
- // create the coro promise
- Scope *coro_scope = create_coro_prelude_scope(irb->codegen, node, scope);
- const_bool_false = ir_build_const_bool(irb, coro_scope, node, false);
- ZigVar *promise_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
-
- return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type;
- IrInstruction *undef = ir_build_const_undefined(irb, coro_scope, node);
- // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
- ZigType *coro_frame_type = get_promise_frame_type(irb->codegen, return_type);
- IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
- IrInstruction *undef_coro_frame = ir_build_implicit_cast(irb, coro_scope, node, coro_frame_type_value, undef, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, promise_var, undef_coro_frame, "promise", const_bool_false);
- coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
-
- ZigVar *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
- IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
- get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- IrInstruction *null_await_handle = ir_build_implicit_cast(irb, coro_scope, node, await_handle_type_val, null_value, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, await_handle_var, null_await_handle, "await_handle", const_bool_false);
- irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
-
- u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
- get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
- IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type,
- coro_promise_ptr, false);
- coro_id = ir_build_coro_id(irb, coro_scope, node, promise_as_u8_ptr);
- coro_size_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *coro_size = ir_build_coro_size(irb, coro_scope, node);
- build_decl_var_and_init(irb, coro_scope, node, coro_size_var, coro_size, "coro_size", const_bool_false);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, coro_scope, node,
- ImplicitAllocatorIdArg);
- irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false);
- build_decl_var_and_init(irb, coro_scope, node, irb->exec->coro_allocator_var, implicit_allocator_ptr,
- "allocator", const_bool_false);
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name, false);
- IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr);
- IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size);
- IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr);
- IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, coro_scope, "AllocError");
- IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, coro_scope, "AllocOk");
- ir_build_cond_br(irb, coro_scope, node, alloc_result_is_ok, alloc_ok_block, alloc_err_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_err_block);
- // we can return undefined here, because the caller passes a pointer to the error struct field
- // in the error union result, and we populate it in case of allocation failure.
- ir_build_return(irb, coro_scope, node, undef);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block);
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr,
- false);
- irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero);
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, irb->exec->coro_result_field_ptr);
- if (irb->codegen->have_err_ret_tracing) {
- // initialize the error return trace
- Buf *return_addresses_field_name = buf_create_from_str(RETURN_ADDRESSES_FIELD_NAME);
- IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name, false);
-
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- ir_build_mark_err_ret_trace_ptr(irb, scope, node, err_ret_trace_ptr);
-
- // coordinate with builtin.zig
- Buf *index_name = buf_create_from_str("index");
- IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name, false);
- ir_build_store_ptr(irb, scope, node, index_ptr, zero);
-
- Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses");
- IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name, false);
-
- IrInstruction *slice_value = ir_build_slice_src(irb, scope, node, return_addresses_ptr, zero, nullptr, false, no_result_loc());
- ir_build_store_ptr(irb, scope, node, addrs_slice_ptr, slice_value);
- }
-
-
- irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal");
- irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal");
- irb->exec->coro_suspend_block = ir_create_basic_block(irb, scope, "Suspend");
- irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup");
+ zig_panic("ir_gen async fn");
}
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
@@ -8735,117 +7871,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
ir_gen_async_return(irb, scope, result->source_node, result, true);
}
- if (is_async) {
- IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume");
- IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree");
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, scope, node, true);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, nullptr, const_bool_true);
- IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = invalid_resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = irb->exec->coro_final_cleanup_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block);
- ir_build_coro_end(irb, scope, node);
- ir_build_return(irb, scope, node, irb->exec->coro_handle);
-
- ir_set_cursor_at_end_and_append_block(irb, invalid_resume_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final);
- if (type_has_bits(return_type)) {
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr);
- IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- result_ptr, false);
- IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node,
- u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr, false);
- IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node,
- fn_entry->type_entry->data.fn.fn_type_id.return_type);
- IrInstruction *size_of_ret_val = ir_build_size_of(irb, scope, node, return_type_inst);
- ir_build_memcpy(irb, scope, node, result_ptr_as_u8_ptr, return_value_ptr_as_u8_ptr, size_of_ret_val);
- }
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- // Before we destroy the coroutine frame, we need to load the target promise into
- // a register or local variable which does not get spilled into the frame,
- // otherwise llvm tries to access memory inside the destroyed frame.
- IrInstruction *unwrapped_await_handle_ptr = ir_build_optional_unwrap_ptr(irb, scope, node,
- irb->exec->await_handle_var_ptr, false, false);
- IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, check_free_block);
- IrBasicBlock **incoming_blocks = allocate(2);
- IrInstruction **incoming_values = allocate(2);
- incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- incoming_values[0] = const_bool_false;
- incoming_blocks[1] = irb->exec->coro_normal_final;
- incoming_values[1] = const_bool_true;
- IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
-
- IrBasicBlock **merge_incoming_blocks = allocate(2);
- IrInstruction **merge_incoming_values = allocate(2);
- merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node);
- merge_incoming_blocks[1] = irb->exec->coro_normal_final;
- merge_incoming_values[1] = await_handle_in_block;
- IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values, nullptr);
-
- Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
- ImplicitAllocatorIdLocalVar);
- IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name, false);
- IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- coro_mem_ptr_maybe, false);
- IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
- IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
- IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
- IrInstruction *mem_slice = ir_build_slice_src(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false,
- no_result_loc());
- size_t arg_count = 5;
- IrInstruction **args = allocate(arg_count);
- args[0] = implicit_allocator_ptr; // self
- args[1] = mem_slice; // old_mem
- args[2] = ir_build_const_usize(irb, scope, node, 8); // old_align
- // TODO: intentional memory leak here. If this is set to 0 then there is an issue where a coroutine
- // calls the function and it frees its own stack frame, but then the return value is a slice, which
- // is implemented as an sret struct. writing to the return pointer causes invalid memory write.
- // We could work around it by having a global helper function which has a void return type
- // and calling that instead. But instead this hack will suffice until I rework coroutines to be
- // non-allocating. Basically coroutines are not supported right now until they are reworked.
- args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size
- args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align
- ir_build_call_src(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr,
- nullptr, no_result_loc());
-
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume");
- ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_gen_resume_target(irb, scope, node, awaiter_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
- }
-
return true;
}
@@ -10189,12 +9214,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
- if (wanted_type == ira->codegen->builtin_types.entry_promise &&
- actual_type->id == ZigTypeIdPromise)
- {
- return result;
- }
-
// fn
if (wanted_type->id == ZigTypeIdFn &&
actual_type->id == ZigTypeIdFn)
@@ -10229,20 +9248,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
}
- if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira,
- actual_type->data.fn.fn_type_id.async_allocator_type,
- wanted_type->data.fn.fn_type_id.async_allocator_type,
- source_node, false);
- if (child.id == ConstCastResultIdInvalid)
- return child;
- if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdAsyncAllocatorType;
- result.data.async_allocator_type = allocate_nonzero(1);
- *result.data.async_allocator_type = child;
- return result;
- }
- }
if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
@@ -12559,12 +11564,10 @@ static IrInstruction *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInstruction *sou
static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) {
if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ty->id == ZigTypeIdFn) return true;
- if (ty->id == ZigTypeIdPromise) return true;
if (ty->id == ZigTypeIdOptional) {
ZigType *ptr_ty = ty->data.maybe.child_type;
if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ptr_ty->id == ZigTypeIdFn) return true;
- if (ptr_ty->id == ZigTypeIdPromise) return true;
}
return false;
}
@@ -13640,7 +12643,6 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdOpaque:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdEnum:
case ZigTypeIdEnumLiteral:
operator_allowed = is_equality_cmp;
@@ -15021,7 +14023,6 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -15045,7 +14046,6 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdEnumLiteral:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
@@ -15124,42 +14124,6 @@ static IrInstruction *ir_analyze_instruction_error_union(IrAnalyze *ira,
return ir_const_type(ira, &instruction->base, result_type);
}
-IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) {
- ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
- if (parent_fn_entry == nullptr) {
- ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available"));
- return ira->codegen->invalid_instruction;
- }
-
- FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id;
- if (parent_fn_type->cc != CallingConventionAsync) {
- ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter"));
- return ira->codegen->invalid_instruction;
- }
-
- assert(parent_fn_type->async_allocator_type != nullptr);
-
- switch (id) {
- case ImplicitAllocatorIdArg:
- {
- IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope,
- source_instr->source_node, ImplicitAllocatorIdArg);
- result->value.type = parent_fn_type->async_allocator_type;
- return result;
- }
- case ImplicitAllocatorIdLocalVar:
- {
- ZigVar *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
- assert(coro_allocator_var != nullptr);
- IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
- IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst, nullptr);
- assert(result->value.type != nullptr);
- return result;
- }
- }
- zig_unreachable();
-}
-
static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type,
uint32_t align, const char *name_hint, bool force_comptime)
{
@@ -15589,50 +14553,6 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
return ir_const_void(ira, &instruction->base);
}
-static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
- ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
- IrInstruction *async_allocator_inst)
-{
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- ir_assert(async_allocator_inst->value.type->id == ZigTypeIdPointer, &call_instruction->base);
- ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type;
- IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base,
- async_allocator_inst, container_type, false);
- if (type_is_invalid(field_ptr_inst->value.type)) {
- return ira->codegen->invalid_instruction;
- }
- ZigType *ptr_to_realloc_fn_type = field_ptr_inst->value.type;
- ir_assert(ptr_to_realloc_fn_type->id == ZigTypeIdPointer, &call_instruction->base);
-
- ZigType *realloc_fn_type = ptr_to_realloc_fn_type->data.pointer.child_type;
- if (realloc_fn_type->id != ZigTypeIdFn) {
- ir_add_error(ira, &call_instruction->base,
- buf_sprintf("expected reallocation function, found '%s'", buf_ptr(&realloc_fn_type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- ZigType *realloc_fn_return_type = realloc_fn_type->data.fn.fn_type_id.return_type;
- if (realloc_fn_return_type->id != ZigTypeIdErrorUnion) {
- ir_add_error(ira, fn_ref,
- buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&realloc_fn_return_type->name)));
- return ira->codegen->invalid_instruction;
- }
- ZigType *alloc_fn_error_set_type = realloc_fn_return_type->data.error_union.err_set_type;
- ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
- ZigType *promise_type = get_promise_type(ira->codegen, return_type);
- ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
-
- IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(),
- async_return_type, nullptr, true, true);
- if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
- return result_loc;
- }
-
- return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, true, async_allocator_inst, nullptr, result_loc,
- async_return_type);
-}
-
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
{
@@ -16330,32 +15250,8 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
break;
}
}
- IrInstruction *async_allocator_inst = nullptr;
if (call_instruction->is_async) {
- AstNode *async_allocator_type_node = fn_proto_node->data.fn_proto.async_allocator_type;
- if (async_allocator_type_node != nullptr) {
- ZigType *async_allocator_type = ir_analyze_type_expr(ira, impl_fn->child_scope, async_allocator_type_node);
- if (type_is_invalid(async_allocator_type))
- return ira->codegen->invalid_instruction;
- inst_fn_type_id.async_allocator_type = async_allocator_type;
- }
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- }
- if (inst_fn_type_id.async_allocator_type == nullptr) {
- inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type;
- }
- async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
+ zig_panic("TODO async call");
}
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
@@ -16398,15 +15294,12 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
- IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
- fn_ref, casted_args, impl_param_count, async_allocator_inst);
- return ir_finish_anal(ira, result);
+ zig_panic("TODO async call");
}
- assert(async_allocator_inst == nullptr);
IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- call_instruction->is_async, nullptr, casted_new_stack, result_loc,
+ call_instruction->is_async, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
return ir_finish_anal(ira, new_call_instruction);
@@ -16474,25 +15367,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
return ira->codegen->invalid_instruction;
if (call_instruction->is_async) {
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
-
- }
- IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
- casted_args, call_param_count, async_allocator_inst);
- return ir_finish_anal(ira, result);
+ zig_panic("TODO async call");
}
if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
@@ -16513,7 +15388,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
- call_param_count, casted_args, fn_inline, false, nullptr, casted_new_stack,
+ call_param_count, casted_args, fn_inline, false, casted_new_stack,
result_loc, return_type);
return ir_finish_anal(ira, new_call_instruction);
}
@@ -16694,7 +15569,6 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
@@ -18465,7 +17339,6 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
{
ResolveStatus needed_status = (align_bytes == 0) ?
@@ -18580,7 +17453,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
@@ -18592,22 +17464,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
zig_unreachable();
}
-static IrInstruction *ir_analyze_instruction_promise_type(IrAnalyze *ira, IrInstructionPromiseType *instruction) {
- ZigType *promise_type;
-
- if (instruction->payload_type == nullptr) {
- promise_type = ira->codegen->builtin_types.entry_promise;
- } else {
- ZigType *payload_type = ir_resolve_type(ira, instruction->payload_type->child);
- if (type_is_invalid(payload_type))
- return ira->codegen->invalid_instruction;
-
- promise_type = get_promise_type(ira->codegen, payload_type);
- }
-
- return ir_const_type(ira, &instruction->base, promise_type);
-}
-
static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
IrInstructionSizeOf *size_of_instruction)
{
@@ -18647,7 +17503,6 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
@@ -19134,7 +17989,6 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdComptimeInt:
case ZigTypeIdEnumLiteral:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdFn:
case ZigTypeIdErrorSet: {
if (pointee_val) {
@@ -20645,32 +19499,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
fields[0].type = ira->codegen->builtin_types.entry_type;
fields[0].data.x_type = type_entry->data.maybe.child_type;
- break;
- }
- case ZigTypeIdPromise:
- {
- result = create_const_vals(1);
- result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Promise", nullptr);
-
- ConstExprValue *fields = create_const_vals(1);
- result->data.x_struct.fields = fields;
-
- // child: ?type
- ensure_field_index(result->type, "child", 0);
- fields[0].special = ConstValSpecialStatic;
- fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
-
- if (type_entry->data.promise.result_type == nullptr)
- fields[0].data.x_optional = nullptr;
- else {
- ConstExprValue *child_type = create_const_vals(1);
- child_type->special = ConstValSpecialStatic;
- child_type->type = ira->codegen->builtin_types.entry_type;
- child_type->data.x_type = type_entry->data.promise.result_type;
- fields[0].data.x_optional = child_type;
- }
-
break;
}
case ZigTypeIdEnum:
@@ -20982,7 +19810,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Fn", nullptr);
- ConstExprValue *fields = create_const_vals(6);
+ ConstExprValue *fields = create_const_vals(5);
result->data.x_struct.fields = fields;
// calling_convention: TypeInfo.CallingConvention
@@ -21015,19 +19843,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
fields[3].data.x_optional = return_type;
}
- // async_allocator_type: type
- ensure_field_index(result->type, "async_allocator_type", 4);
- fields[4].special = ConstValSpecialStatic;
- fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
- if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr)
- fields[4].data.x_optional = nullptr;
- else {
- ConstExprValue *async_alloc_type = create_const_vals(1);
- async_alloc_type->special = ConstValSpecialStatic;
- async_alloc_type->type = ira->codegen->builtin_types.entry_type;
- async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
- fields[4].data.x_optional = async_alloc_type;
- }
// args: []TypeInfo.FnArg
ZigType *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr);
if ((err = type_resolve(ira->codegen, type_info_fn_arg_type, ResolveStatusSizeKnown))) {
@@ -21042,10 +19857,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
fn_arg_array->data.x_array.special = ConstArraySpecialNone;
fn_arg_array->data.x_array.data.s_none.elements = create_const_vals(fn_arg_count);
- init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false);
+ init_const_slice(ira->codegen, &fields[4], fn_arg_array, 0, fn_arg_count, false);
- for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++)
- {
+ for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.data.s_none.elements[fn_arg_index];
@@ -22803,11 +21617,7 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns
}
static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) {
- IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- assert(fn_entry != nullptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
- return result;
+ zig_panic("TODO anlayze @handle()");
}
static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) {
@@ -22841,7 +21651,6 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdInt:
case ZigTypeIdFloat:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdArray:
case ZigTypeIdStruct:
case ZigTypeIdOptional:
@@ -23401,15 +22210,7 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct
}
if (fn_type_id.cc == CallingConventionAsync) {
- if (instruction->async_allocator_type_value == nullptr) {
- ir_add_error(ira, &instruction->base,
- buf_sprintf("async fn proto missing allocator type"));
- return ira->codegen->invalid_instruction;
- }
- IrInstruction *async_allocator_type_value = instruction->async_allocator_type_value->child;
- fn_type_id.async_allocator_type = ir_resolve_type(ira, async_allocator_type_value);
- if (type_is_invalid(fn_type_id.async_allocator_type))
- return ira->codegen->invalid_instruction;
+ zig_panic("TODO");
}
return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id));
@@ -23905,7 +22706,6 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
zig_unreachable();
@@ -24059,7 +22859,6 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
zig_unreachable();
case ZigTypeIdVoid:
return ErrorNone;
@@ -24546,181 +23345,7 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct
}
static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *target_inst = instruction->target->child;
- if (type_is_invalid(target_inst->value.type))
- return ira->codegen->invalid_instruction;
- IrInstruction *casted_target = ir_implicit_cast(ira, target_inst, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- result->value.special = ConstValSpecialStatic;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) {
- IrInstruction *promise_ptr = instruction->promise_ptr->child;
- if (type_is_invalid(promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- promise_ptr);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id);
- result->value.type = ira->codegen->builtin_types.entry_bool;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) {
- IrInstruction *result = ir_build_coro_size(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_mem_ptr = instruction->coro_mem_ptr->child;
- if (type_is_invalid(coro_mem_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
- IrInstruction *result = ir_build_coro_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id, coro_mem_ptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) {
- return ir_get_implicit_allocator(ira, &instruction->base, instruction->id);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, IrInstructionCoroAllocFail *instruction) {
- IrInstruction *err_val = instruction->err_val->child;
- if (type_is_invalid(err_val->value.type))
- return ir_unreach_error(ira);
-
- IrInstruction *result = ir_build_coro_alloc_fail(&ira->new_irb, instruction->base.scope, instruction->base.source_node, err_val);
- result->value.type = ira->codegen->builtin_types.entry_unreachable;
- return ir_finish_anal(ira, result);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_suspend(IrAnalyze *ira, IrInstructionCoroSuspend *instruction) {
- IrInstruction *save_point = nullptr;
- if (instruction->save_point != nullptr) {
- save_point = instruction->save_point->child;
- if (type_is_invalid(save_point->value.type))
- return ira->codegen->invalid_instruction;
- }
-
- IrInstruction *is_final = instruction->is_final->child;
- if (type_is_invalid(is_final->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_suspend(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, save_point, is_final);
- result->value.type = ira->codegen->builtin_types.entry_u8;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_end(IrAnalyze *ira, IrInstructionCoroEnd *instruction) {
- IrInstruction *result = ir_build_coro_end(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstructionCoroFree *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_free(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_id, coro_handle);
- ZigType *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, ptr_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
- IrInstruction *awaiter_handle = instruction->awaiter_handle->child;
- if (type_is_invalid(awaiter_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *casted_target = ir_implicit_cast(ira, awaiter_handle, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstructionCoroSave *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_save(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInstructionCoroPromise *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- if (coro_handle->value.type->id != ZigTypeIdPromise ||
- coro_handle->value.type->data.promise.result_type == nullptr)
- {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&coro_handle->value.type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- ZigType *coro_frame_type = get_promise_frame_type(ira->codegen,
- coro_handle->value.type->data.promise.result_type);
-
- IrInstruction *result = ir_build_coro_promise(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = get_pointer_to_type(ira->codegen, coro_frame_type, false);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) {
- IrInstruction *realloc_fn = instruction->realloc_fn->child;
- if (type_is_invalid(realloc_fn->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_size = instruction->coro_size->child;
- if (type_is_invalid(coro_size->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, realloc_fn, coro_size);
- ZigType *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
- return result;
+ zig_panic("TODO analyze cancel");
}
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) {
@@ -24853,65 +23478,6 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) {
- ZigType *promise_type = ir_resolve_type(ira, instruction->promise_type->child);
- if (type_is_invalid(promise_type))
- return ira->codegen->invalid_instruction;
-
- if (promise_type->id != ZigTypeIdPromise || promise_type->data.promise.result_type == nullptr) {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&promise_type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- return ir_const_type(ira, &instruction->base, promise_type->data.promise.result_type);
-}
-
-static IrInstruction *ir_analyze_instruction_await_bookkeeping(IrAnalyze *ira, IrInstructionAwaitBookkeeping *instruction) {
- ZigType *promise_result_type = ir_resolve_type(ira, instruction->promise_result_type->child);
- if (type_is_invalid(promise_result_type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
-
- if (type_can_fail(promise_result_type)) {
- fn_entry->calls_or_awaits_errorable_fn = true;
- }
-
- return ir_const_void(ira, &instruction->base);
-}
-
-static IrInstruction *ir_analyze_instruction_merge_err_ret_traces(IrAnalyze *ira,
- IrInstructionMergeErrRetTraces *instruction)
-{
- IrInstruction *coro_promise_ptr = instruction->coro_promise_ptr->child;
- if (type_is_invalid(coro_promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ir_assert(coro_promise_ptr->value.type->id == ZigTypeIdPointer, &instruction->base);
- ZigType *promise_frame_type = coro_promise_ptr->value.type->data.pointer.child_type;
- ir_assert(promise_frame_type->id == ZigTypeIdStruct, &instruction->base);
- ZigType *promise_result_type = promise_frame_type->data.structure.fields[1].type_entry;
-
- if (!type_can_fail(promise_result_type)) {
- return ir_const_void(ira, &instruction->base);
- }
-
- IrInstruction *src_err_ret_trace_ptr = instruction->src_err_ret_trace_ptr->child;
- if (type_is_invalid(src_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *dest_err_ret_trace_ptr = instruction->dest_err_ret_trace_ptr->child;
- if (type_is_invalid(dest_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_merge_err_ret_traces(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) {
IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope,
instruction->base.source_node);
@@ -25530,8 +24096,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_asm(ira, (IrInstructionAsm *)instruction);
case IrInstructionIdArrayType:
return ir_analyze_instruction_array_type(ira, (IrInstructionArrayType *)instruction);
- case IrInstructionIdPromiseType:
- return ir_analyze_instruction_promise_type(ira, (IrInstructionPromiseType *)instruction);
case IrInstructionIdSizeOf:
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
@@ -25704,46 +24268,14 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction);
case IrInstructionIdCancel:
return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction);
- case IrInstructionIdCoroId:
- return ir_analyze_instruction_coro_id(ira, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_analyze_instruction_coro_alloc(ira, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_analyze_instruction_coro_suspend(ira, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_analyze_instruction_coro_end(ira, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_analyze_instruction_coro_promise(ira, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
- case IrInstructionIdPromiseResultType:
- return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction);
- case IrInstructionIdAwaitBookkeeping:
- return ir_analyze_instruction_await_bookkeeping(ira, (IrInstructionAwaitBookkeeping *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdAddImplicitReturnType:
return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction);
case IrInstructionIdMarkErrRetTracePtr:
return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
@@ -25788,9 +24320,7 @@ ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_
old_exec->analysis = ira;
ira->codegen = codegen;
- ZigFn *fn_entry = exec_fn_entry(old_exec);
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- ira->explicit_return_type = is_async ? get_promise_type(codegen, expected_type) : expected_type;
+ ira->explicit_return_type = expected_type;
ira->explicit_return_type_source_node = expected_type_source_node;
ira->old_irb.codegen = codegen;
@@ -25889,17 +24419,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:
- case IrInstructionIdCoroId:
- case IrInstructionIdCoroBegin:
- case IrInstructionIdCoroAllocFail:
- case IrInstructionIdCoroEnd:
- case IrInstructionIdCoroResume:
- case IrInstructionIdCoroSave:
- case IrInstructionIdCoroAllocHelper:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
- case IrInstructionIdMergeErrRetTraces:
case IrInstructionIdMarkErrRetTracePtr:
case IrInstructionIdAtomicRmw:
case IrInstructionIdCmpxchgGen:
@@ -25933,7 +24454,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTypeOf:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
@@ -25993,13 +24513,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTagType:
case IrInstructionIdErrorReturnTrace:
case IrInstructionIdErrorUnion:
- case IrInstructionIdGetImplicitAllocator:
- case IrInstructionIdCoroAlloc:
- case IrInstructionIdCoroSize:
- case IrInstructionIdCoroSuspend:
- case IrInstructionIdCoroFree:
- case IrInstructionIdCoroPromise:
- case IrInstructionIdPromiseResultType:
case IrInstructionIdFloatOp:
case IrInstructionIdMulAdd:
case IrInstructionIdAtomicLoad:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 588a9b2882..9ea70ba7ab 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -257,13 +257,7 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) {
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -284,13 +278,7 @@ static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instructi
static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -477,14 +465,6 @@ static void ir_print_array_type(IrPrint *irp, IrInstructionArrayType *instructio
ir_print_other_instruction(irp, instruction->child_type);
}
-static void ir_print_promise_type(IrPrint *irp, IrInstructionPromiseType *instruction) {
- fprintf(irp->f, "promise");
- if (instruction->payload_type != nullptr) {
- fprintf(irp->f, "->");
- ir_print_other_instruction(irp, instruction->payload_type);
- }
-}
-
static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instruction) {
const char *const_kw = instruction->is_const ? "const " : "";
fprintf(irp->f, "[]%s", const_kw);
@@ -1396,105 +1376,6 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
ir_print_other_instruction(irp, instruction->target);
}
-static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) {
- fprintf(irp->f, "@getImplicitAllocator(");
- switch (instruction->id) {
- case ImplicitAllocatorIdArg:
- fprintf(irp->f, "Arg");
- break;
- case ImplicitAllocatorIdLocalVar:
- fprintf(irp->f, "LocalVar");
- break;
- }
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) {
- fprintf(irp->f, "@coroId(");
- ir_print_other_instruction(irp, instruction->promise_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) {
- fprintf(irp->f, "@coroAlloc(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_size(IrPrint *irp, IrInstructionCoroSize *instruction) {
- fprintf(irp->f, "@coroSize()");
-}
-
-static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instruction) {
- fprintf(irp->f, "@coroBegin(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_mem_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *instruction) {
- fprintf(irp->f, "@coroAllocFail(");
- ir_print_other_instruction(irp, instruction->err_val);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_suspend(IrPrint *irp, IrInstructionCoroSuspend *instruction) {
- fprintf(irp->f, "@coroSuspend(");
- if (instruction->save_point != nullptr) {
- ir_print_other_instruction(irp, instruction->save_point);
- } else {
- fprintf(irp->f, "null");
- }
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->is_final);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_end(IrPrint *irp, IrInstructionCoroEnd *instruction) {
- fprintf(irp->f, "@coroEnd()");
-}
-
-static void ir_print_coro_free(IrPrint *irp, IrInstructionCoroFree *instruction) {
- fprintf(irp->f, "@coroFree(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) {
- fprintf(irp->f, "@coroResume(");
- ir_print_other_instruction(irp, instruction->awaiter_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) {
- fprintf(irp->f, "@coroSave(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_promise(IrPrint *irp, IrInstructionCoroPromise *instruction) {
- fprintf(irp->f, "@coroPromise(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResultType *instruction) {
- fprintf(irp->f, "@PromiseResultType(");
- ir_print_other_instruction(irp, instruction->promise_type);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) {
- fprintf(irp->f, "@coroAllocHelper(");
- ir_print_other_instruction(irp, instruction->realloc_fn);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_size);
- fprintf(irp->f, ")");
-}
-
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
fprintf(irp->f, "@atomicRmw(");
if (instruction->operand_type != nullptr) {
@@ -1539,12 +1420,6 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct
fprintf(irp->f, ")");
}
-static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) {
- fprintf(irp->f, "@awaitBookkeeping(");
- ir_print_other_instruction(irp, instruction->promise_result_type);
- fprintf(irp->f, ")");
-}
-
static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) {
fprintf(irp->f, "@saveErrRetAddr()");
}
@@ -1555,16 +1430,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl
fprintf(irp->f, ")");
}
-static void ir_print_merge_err_ret_traces(IrPrint *irp, IrInstructionMergeErrRetTraces *instruction) {
- fprintf(irp->f, "@mergeErrRetTraces(");
- ir_print_other_instruction(irp, instruction->coro_promise_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->src_err_ret_trace_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->dest_err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) {
fprintf(irp->f, "@markErrRetTracePtr(");
ir_print_other_instruction(irp, instruction->err_ret_trace_ptr);
@@ -1727,9 +1592,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdArrayType:
ir_print_array_type(irp, (IrInstructionArrayType *)instruction);
break;
- case IrInstructionIdPromiseType:
- ir_print_promise_type(irp, (IrInstructionPromiseType *)instruction);
- break;
case IrInstructionIdSliceType:
ir_print_slice_type(irp, (IrInstructionSliceType *)instruction);
break;
@@ -2033,63 +1895,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCancel:
ir_print_cancel(irp, (IrInstructionCancel *)instruction);
break;
- case IrInstructionIdGetImplicitAllocator:
- ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction);
- break;
- case IrInstructionIdCoroId:
- ir_print_coro_id(irp, (IrInstructionCoroId *)instruction);
- break;
- case IrInstructionIdCoroAlloc:
- ir_print_coro_alloc(irp, (IrInstructionCoroAlloc *)instruction);
- break;
- case IrInstructionIdCoroSize:
- ir_print_coro_size(irp, (IrInstructionCoroSize *)instruction);
- break;
- case IrInstructionIdCoroBegin:
- ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction);
- break;
- case IrInstructionIdCoroAllocFail:
- ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction);
- break;
- case IrInstructionIdCoroSuspend:
- ir_print_coro_suspend(irp, (IrInstructionCoroSuspend *)instruction);
- break;
- case IrInstructionIdCoroEnd:
- ir_print_coro_end(irp, (IrInstructionCoroEnd *)instruction);
- break;
- case IrInstructionIdCoroFree:
- ir_print_coro_free(irp, (IrInstructionCoroFree *)instruction);
- break;
- case IrInstructionIdCoroResume:
- ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
- break;
- case IrInstructionIdCoroSave:
- ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction);
- break;
- case IrInstructionIdCoroAllocHelper:
- ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction);
- break;
case IrInstructionIdAtomicRmw:
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
break;
- case IrInstructionIdCoroPromise:
- ir_print_coro_promise(irp, (IrInstructionCoroPromise *)instruction);
- break;
- case IrInstructionIdPromiseResultType:
- ir_print_promise_result_type(irp, (IrInstructionPromiseResultType *)instruction);
- break;
- case IrInstructionIdAwaitBookkeeping:
- ir_print_await_bookkeeping(irp, (IrInstructionAwaitBookkeeping *)instruction);
- break;
case IrInstructionIdSaveErrRetAddr:
ir_print_save_err_ret_addr(irp, (IrInstructionSaveErrRetAddr *)instruction);
break;
case IrInstructionIdAddImplicitReturnType:
ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction);
break;
- case IrInstructionIdMergeErrRetTraces:
- ir_print_merge_err_ret_traces(irp, (IrInstructionMergeErrRetTraces *)instruction);
- break;
case IrInstructionIdMarkErrRetTracePtr:
ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction);
break;
diff --git a/src/parser.cpp b/src/parser.cpp
index fe1f89ac92..b1a593d9c9 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -282,9 +282,6 @@ static AstNode *ast_parse_prefix_op_expr(
case NodeTypeAwaitExpr:
right = &prefix->data.await_expr.expr;
break;
- case NodeTypePromiseType:
- right = &prefix->data.promise_type.payload_type;
- break;
case NodeTypeArrayType:
right = &prefix->data.array_type.child_type;
break;
@@ -1643,10 +1640,6 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (null != nullptr)
return ast_create_node(pc, NodeTypeNullLiteral, null);
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr)
- return ast_create_node(pc, NodeTypePromiseType, promise);
-
Token *true_token = eat_token_if(pc, TokenIdKeywordTrue);
if (true_token != nullptr) {
AstNode *res = ast_create_node(pc, NodeTypeBoolLiteral, true_token);
@@ -2042,11 +2035,6 @@ static Optional ast_parse_fn_cc(ParseContext *pc) {
}
if (eat_token_if(pc, TokenIdKeywordAsync) != nullptr) {
res.cc = CallingConventionAsync;
- if (eat_token_if(pc, TokenIdCmpLessThan) == nullptr)
- return Optional::some(res);
-
- res.async_allocator_type = ast_expect(pc, ast_parse_type_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
return Optional::some(res);
}
@@ -2533,16 +2521,6 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
return res;
}
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr) {
- if (eat_token_if(pc, TokenIdArrow) != nullptr) {
- AstNode *res = ast_create_node(pc, NodeTypePromiseType, promise);
- return res;
- }
-
- put_back_token(pc);
- }
-
AstNode *array = ast_parse_array_type_start(pc);
if (array != nullptr) {
assert(array->type == NodeTypeArrayType);
@@ -2680,11 +2658,6 @@ static AstNode *ast_parse_async_prefix(ParseContext *pc) {
AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async);
res->data.fn_call_expr.is_async = true;
res->data.fn_call_expr.seen = false;
- if (eat_token_if(pc, TokenIdCmpLessThan) != nullptr) {
- AstNode *prefix_expr = ast_expect(pc, ast_parse_prefix_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
- res->data.fn_call_expr.async_allocator = prefix_expr;
- }
return res;
}
@@ -2858,7 +2831,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
visit_node_list(&node->data.fn_proto.params, visit, context);
visit_field(&node->data.fn_proto.align_expr, visit, context);
visit_field(&node->data.fn_proto.section_expr, visit, context);
- visit_field(&node->data.fn_proto.async_allocator_type, visit, context);
break;
case NodeTypeFnDef:
visit_field(&node->data.fn_def.fn_proto, visit, context);
@@ -2918,7 +2890,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeFnCallExpr:
visit_field(&node->data.fn_call_expr.fn_ref_expr, visit, context);
visit_node_list(&node->data.fn_call_expr.params, visit, context);
- visit_field(&node->data.fn_call_expr.async_allocator, visit, context);
break;
case NodeTypeArrayAccessExpr:
visit_field(&node->data.array_access_expr.array_ref_expr, visit, context);
@@ -3034,9 +3005,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeInferredArrayType:
visit_field(&node->data.array_type.child_type, visit, context);
break;
- case NodeTypePromiseType:
- visit_field(&node->data.promise_type.payload_type, visit, context);
- break;
case NodeTypeErrorType:
// none
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 783b6e0e20..0869c3ba9c 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -136,7 +136,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"or", TokenIdKeywordOr},
{"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
- {"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
{"resume", TokenIdKeywordResume},
{"return", TokenIdKeywordReturn},
@@ -1558,7 +1557,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordOr: return "or";
case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
- case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
case TokenIdKeywordReturn: return "return";
case TokenIdKeywordLinkSection: return "linksection";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 83dbe99471..253e0bd1e5 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -81,7 +81,6 @@ enum TokenId {
TokenIdKeywordOr,
TokenIdKeywordOrElse,
TokenIdKeywordPacked,
- TokenIdKeywordPromise,
TokenIdKeywordPub,
TokenIdKeywordResume,
TokenIdKeywordReturn,
diff --git a/std/fmt.zig b/std/fmt.zig
index 2e9527f4ca..961c7279a2 100644
--- a/std/fmt.zig
+++ b/std/fmt.zig
@@ -328,9 +328,6 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
- .Promise => {
- return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
- },
.Enum => {
if (comptime std.meta.trait.hasFn("format")(T)) {
return value.format(fmt, options, context, Errors, output);
diff --git a/std/hash_map.zig b/std/hash_map.zig
index c99d1d2490..4327bfdddb 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -560,7 +560,7 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type
builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng, HashInt),
builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng, HashInt),
builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng, HashInt),
- builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt),
+ builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng, HashInt),
builtin.TypeId.BoundFn,
builtin.TypeId.ComptimeFloat,
diff --git a/std/meta.zig b/std/meta.zig
index 0db76ce774..6b90727737 100644
--- a/std/meta.zig
+++ b/std/meta.zig
@@ -104,8 +104,7 @@ pub fn Child(comptime T: type) type {
TypeId.Array => |info| info.child,
TypeId.Pointer => |info| info.child,
TypeId.Optional => |info| info.child,
- TypeId.Promise => |info| if (info.child) |child| child else null,
- else => @compileError("Expected promise, pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"),
+ else => @compileError("Expected pointer, optional, or array type, " ++ "found '" ++ @typeName(T) ++ "'"),
};
}
@@ -114,7 +113,6 @@ test "std.meta.Child" {
testing.expect(Child(*u8) == u8);
testing.expect(Child([]u8) == u8);
testing.expect(Child(?u8) == u8);
- testing.expect(Child(promise->u8) == u8);
}
pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout {
diff --git a/std/testing.zig b/std/testing.zig
index 4568e024e2..84f6cff5d8 100644
--- a/std/testing.zig
+++ b/std/testing.zig
@@ -45,7 +45,6 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
TypeId.EnumLiteral,
TypeId.Enum,
TypeId.Fn,
- TypeId.Promise,
TypeId.Vector,
TypeId.ErrorSet,
=> {
diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig
index 71af5586ed..7950088348 100644
--- a/test/stage1/behavior.zig
+++ b/test/stage1/behavior.zig
@@ -39,11 +39,11 @@ comptime {
_ = @import("behavior/bugs/828.zig");
_ = @import("behavior/bugs/920.zig");
_ = @import("behavior/byval_arg_var.zig");
- _ = @import("behavior/cancel.zig");
+ //_ = @import("behavior/cancel.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/const_slice_child.zig");
- _ = @import("behavior/coroutine_await_struct.zig");
- _ = @import("behavior/coroutines.zig");
+ //_ = @import("behavior/coroutine_await_struct.zig");
+ //_ = @import("behavior/coroutines.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/enum_with_members.zig");
diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig
index 4ae81aff20..2decf0c329 100644
--- a/test/stage1/behavior/type_info.zig
+++ b/test/stage1/behavior/type_info.zig
@@ -116,21 +116,6 @@ fn testOptional() void {
expect(null_info.Optional.child == void);
}
-test "type info: promise info" {
- testPromise();
- comptime testPromise();
-}
-
-fn testPromise() void {
- const null_promise_info = @typeInfo(promise);
- expect(TypeId(null_promise_info) == TypeId.Promise);
- expect(null_promise_info.Promise.child == null);
-
- const promise_info = @typeInfo(promise->usize);
- expect(TypeId(promise_info) == TypeId.Promise);
- expect(promise_info.Promise.child.? == usize);
-}
-
test "type info: error set, error union info" {
testErrorSet();
comptime testErrorSet();
@@ -192,11 +177,11 @@ fn testUnion() void {
expect(TypeId(typeinfo_info) == TypeId.Union);
expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
expect(typeinfo_info.Union.tag_type.? == TypeId);
- expect(typeinfo_info.Union.fields.len == 25);
+ expect(typeinfo_info.Union.fields.len == 24);
expect(typeinfo_info.Union.fields[4].enum_field != null);
expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
- expect(typeinfo_info.Union.decls.len == 21);
+ expect(typeinfo_info.Union.decls.len == 20);
const TestNoTagUnion = union {
Foo: void,
@@ -265,7 +250,6 @@ fn testFunction() void {
expect(fn_info.Fn.args.len == 2);
expect(fn_info.Fn.is_var_args);
expect(fn_info.Fn.return_type == null);
- expect(fn_info.Fn.async_allocator_type == null);
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
--
cgit v1.2.3
From 72e983670e65eac0b89da5564432988862828b30 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 21 Jul 2019 16:21:16 -0400
Subject: simple async function call working
---
CMakeLists.txt | 2 +-
src/all_types.hpp | 30 +++++++++
src/analyze.cpp | 137 +++++++++++++++++++++++++++++++++++----
src/analyze.hpp | 3 +
src/codegen.cpp | 139 +++++++++++++++++++++++++++++++++-------
src/ir.cpp | 187 ++++++++++++++++++++++++++++++++++++++++++++++--------
src/ir_print.cpp | 16 +++++
7 files changed, 448 insertions(+), 66 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d8cf0c507d..aa3cfbfeac 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -426,7 +426,6 @@ set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp")
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/userland.cpp")
set(ZIG_SOURCES
- "${CMAKE_SOURCE_DIR}/src/glibc.cpp"
"${CMAKE_SOURCE_DIR}/src/analyze.cpp"
"${CMAKE_SOURCE_DIR}/src/ast_render.cpp"
"${CMAKE_SOURCE_DIR}/src/bigfloat.cpp"
@@ -438,6 +437,7 @@ set(ZIG_SOURCES
"${CMAKE_SOURCE_DIR}/src/compiler.cpp"
"${CMAKE_SOURCE_DIR}/src/errmsg.cpp"
"${CMAKE_SOURCE_DIR}/src/error.cpp"
+ "${CMAKE_SOURCE_DIR}/src/glibc.cpp"
"${CMAKE_SOURCE_DIR}/src/ir.cpp"
"${CMAKE_SOURCE_DIR}/src/ir_print.cpp"
"${CMAKE_SOURCE_DIR}/src/libc_installation.cpp"
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 7fe035ad1c..3f61e77f66 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1251,6 +1251,7 @@ enum ZigTypeId {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
+ ZigTypeIdCoroFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1265,6 +1266,11 @@ struct ZigTypeOpaque {
Buf *bare_name;
};
+struct ZigTypeCoroFrame {
+ ZigFn *fn;
+ ZigType *locals_struct;
+};
+
struct ZigType {
ZigTypeId id;
Buf name;
@@ -1290,6 +1296,7 @@ struct ZigType {
ZigTypeBoundFn bound_fn;
ZigTypeVector vector;
ZigTypeOpaque opaque;
+ ZigTypeCoroFrame frame;
} data;
// use these fields to make sure we don't duplicate type table entries for the same type
@@ -1340,6 +1347,7 @@ struct ZigFn {
ScopeBlock *def_scope; // parent is child_scope
Buf symbol_name;
ZigType *type_entry; // function type
+ ZigType *frame_type; // coro frame type
// in the case of normal functions this is the implicit return type
// in the case of async functions this is the implicit return type according to the
// zig source code, not according to zig ir
@@ -1356,6 +1364,7 @@ struct ZigFn {
ZigList alloca_gen_list;
ZigList variable_list;
+ ZigList resume_blocks;
Buf *section_name;
AstNode *set_alignstack_node;
@@ -1365,6 +1374,7 @@ struct ZigFn {
ZigList export_list;
LLVMValueRef valgrind_client_request_array;
+ LLVMBasicBlockRef preamble_llvm_block;
FnInline fn_inline;
FnAnalState anal_state;
@@ -1512,6 +1522,7 @@ enum PanicMsgId {
PanicMsgIdBadEnumValue,
PanicMsgIdFloatToInt,
PanicMsgIdPtrCastNull,
+ PanicMsgIdBadResume,
PanicMsgIdCount,
};
@@ -1755,6 +1766,7 @@ struct CodeGen {
ZigType *entry_global_error_set;
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
+ ZigType *entry_frame_header;
} builtin_types;
ZigType *align_amt_type;
ZigType *stack_trace_type;
@@ -2119,6 +2131,8 @@ struct IrBasicBlock {
size_t ref_count;
// index into the basic block list
size_t index;
+ // for coroutines, the resume_index which corresponds to this block
+ size_t resume_index;
LLVMBasicBlockRef llvm_block;
LLVMBasicBlockRef llvm_exit_block;
// The instruction that referenced this basic block and caused us to
@@ -2297,6 +2311,8 @@ enum IrInstructionId {
IrInstructionIdEndExpr,
IrInstructionIdPtrOfArrayToSlice,
IrInstructionIdUnionInitNamedField,
+ IrInstructionIdSuspendBegin,
+ IrInstructionIdSuspendBr,
};
struct IrInstruction {
@@ -3511,6 +3527,18 @@ struct IrInstructionPtrOfArrayToSlice {
IrInstruction *result_loc;
};
+struct IrInstructionSuspendBegin {
+ IrInstruction base;
+
+ IrBasicBlock *resume_block;
+};
+
+struct IrInstructionSuspendBr {
+ IrInstruction base;
+
+ IrBasicBlock *resume_block;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
@@ -3593,6 +3621,8 @@ static const size_t maybe_null_index = 1;
static const size_t err_union_err_index = 0;
static const size_t err_union_payload_index = 1;
+static const size_t coro_resume_index_index = 0;
+
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
static const size_t stack_trace_ptr_count = 32;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 15e12caa8d..2b93c390e0 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -228,6 +228,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
return type_entry->data.enumeration.decl_node;
case ZigTypeIdUnion:
return type_entry->data.unionation.decl_node;
+ case ZigTypeIdCoroFrame:
+ return type_entry->data.frame.fn->proto_node;
case ZigTypeIdOpaque:
case ZigTypeIdMetaType:
case ZigTypeIdVoid:
@@ -262,6 +264,20 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
return type_entry->data.structure.resolve_status >= status;
case ZigTypeIdUnion:
return type_entry->data.unionation.resolve_status >= status;
+ case ZigTypeIdCoroFrame:
+ switch (status) {
+ case ResolveStatusInvalid:
+ zig_unreachable();
+ case ResolveStatusUnstarted:
+ case ResolveStatusZeroBitsKnown:
+ return true;
+ case ResolveStatusAlignmentKnown:
+ case ResolveStatusSizeKnown:
+ return type_entry->data.frame.locals_struct != nullptr;
+ case ResolveStatusLLVMFwdDecl:
+ case ResolveStatusLLVMFull:
+ return type_entry->llvm_type != nullptr;
+ }
case ZigTypeIdEnum:
switch (status) {
case ResolveStatusUnstarted:
@@ -345,6 +361,25 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) {
zig_unreachable();
}
+ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn) {
+ if (fn->frame_type != nullptr) {
+ return fn->frame_type;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdCoroFrame);
+ buf_resize(&entry->name, 0);
+ buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name));
+
+ entry->data.frame.fn = fn;
+
+ // Coroutine frames are always non-zero bits because they always have a resume index.
+ entry->abi_size = SIZE_MAX;
+ entry->size_in_bits = SIZE_MAX;
+
+ fn->frame_type = entry;
+ return entry;
+}
+
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment,
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero)
@@ -1039,6 +1074,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdCoroFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1127,6 +1163,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVoid:
+ case ZigTypeIdCoroFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1297,6 +1334,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1392,6 +1430,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -1825,6 +1864,39 @@ static Error resolve_union_type(CodeGen *g, ZigType *union_type) {
return ErrorNone;
}
+static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
+ assert(frame_type->data.frame.locals_struct == nullptr);
+
+ ZigFn *fn = frame_type->data.frame.fn;
+ switch (fn->anal_state) {
+ case FnAnalStateInvalid:
+ return ErrorSemanticAnalyzeFail;
+ case FnAnalStateComplete:
+ break;
+ case FnAnalStateReady:
+ analyze_fn_body(g, fn);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+ break;
+ case FnAnalStateProbing:
+ add_node_error(g, fn->proto_node,
+ buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
+ buf_ptr(&frame_type->name)));
+ return ErrorSemanticAnalyzeFail;
+ }
+ // TODO iterate over fn->alloca_gen_list
+ ZigList field_types = {};
+ ZigList field_names = {};
+
+ field_names.append("resume_index");
+ field_types.append(g->builtin_types.entry_usize);
+
+ assert(field_names.length == field_types.length);
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ field_names.items, field_types.items, field_names.length);
+ return ErrorNone;
+}
+
static bool type_is_valid_extern_enum_tag(CodeGen *g, ZigType *ty) {
// Only integer types are allowed by the C ABI
if(ty->id != ZigTypeIdInt)
@@ -2997,6 +3069,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
return type_entry;
}
zig_unreachable();
@@ -3496,6 +3569,7 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
return false;
}
zig_unreachable();
@@ -3552,6 +3626,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
}
zig_unreachable();
@@ -4002,6 +4077,7 @@ bool handle_is_ptr(ZigType *type_entry) {
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
+ case ZigTypeIdCoroFrame:
return type_has_bits(type_entry);
case ZigTypeIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
@@ -4246,6 +4322,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdVector:
// TODO better hashing algorithm
return 3647867726;
+ case ZigTypeIdCoroFrame:
+ // TODO better hashing algorithm
+ return 675741936;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4310,6 +4389,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdOpaque:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
+ case ZigTypeIdCoroFrame:
return false;
case ZigTypeIdPointer:
@@ -4381,6 +4461,7 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdEnum:
case ZigTypeIdPointer:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
return true;
case ZigTypeIdArray:
@@ -4512,6 +4593,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdBool:
case ZigTypeIdFloat:
case ZigTypeIdErrorUnion:
+ case ZigTypeIdCoroFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4599,6 +4681,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
+ case ZigTypeIdCoroFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -4941,6 +5024,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_alignment(g, ty);
+ } else if (ty->id == ZigTypeIdCoroFrame) {
+ return resolve_coro_frame(g, ty);
}
return ErrorNone;
case ResolveStatusSizeKnown:
@@ -4950,6 +5035,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_type(g, ty);
+ } else if (ty->id == ZigTypeIdCoroFrame) {
+ return resolve_coro_frame(g, ty);
}
return ErrorNone;
case ResolveStatusLLVMFwdDecl:
@@ -5144,6 +5231,8 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return false;
}
return true;
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5496,6 +5585,10 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "(args value)");
return;
}
+ case ZigTypeIdCoroFrame:
+ buf_appendf(buf, "(TODO: coroutine frame value)");
+ return;
+
}
zig_unreachable();
}
@@ -5542,6 +5635,7 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5590,6 +5684,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -5818,10 +5913,12 @@ size_t type_id_index(ZigType *entry) {
return 20;
case ZigTypeIdOpaque:
return 21;
- case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
return 22;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 23;
+ case ZigTypeIdEnumLiteral:
+ return 24;
}
zig_unreachable();
}
@@ -5878,6 +5975,8 @@ const char *type_id_name(ZigTypeId id) {
return "Opaque";
case ZigTypeIdVector:
return "Vector";
+ case ZigTypeIdCoroFrame:
+ return "Frame";
}
zig_unreachable();
}
@@ -5947,7 +6046,7 @@ bool type_can_fail(ZigType *type_entry) {
}
bool fn_type_can_fail(FnTypeId *fn_type_id) {
- return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
+ return type_can_fail(fn_type_id->return_type);
}
// ErrorNone - result pointer has the type
@@ -6935,12 +7034,12 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) {
debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len);
}
-static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
+void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn) {
if (fn_type->llvm_di_type != nullptr) return;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
bool first_arg_return = want_first_arg_sret(g, fn_type_id);
- bool is_async = fn_type_id->cc == CallingConventionAsync;
+ bool is_async = fn_type_id->cc == CallingConventionAsync || (fn != nullptr && fn->resume_blocks.length != 0);
bool is_c_abi = fn_type_id->cc == CallingConventionC;
bool prefix_arg_error_return_trace = g->have_err_ret_tracing && fn_type_can_fail(fn_type_id);
// +1 for maybe making the first argument the return value
@@ -6955,7 +7054,7 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type));
ZigType *gen_return_type;
if (is_async) {
- gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
+ gen_return_type = g->builtin_types.entry_usize;
} else if (!type_has_bits(fn_type_id->return_type)) {
gen_return_type = g->builtin_types.entry_void;
} else if (first_arg_return) {
@@ -6974,13 +7073,10 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- // coroutine frame pointer
- // TODO if we can make this typed a little more it will be better for
- // debug symbols.
- // TODO do we need to make this aligned more?
- ZigType *void_star = get_pointer_to_type(g, g->builtin_types.entry_c_void, false);
- gen_param_types.append(get_llvm_type(g, void_star));
- param_di_types.append(get_llvm_di_type(g, void_star));
+ ZigType *frame_type = (fn == nullptr) ? g->builtin_types.entry_frame_header : get_coro_frame_type(g, fn);
+ ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
+ gen_param_types.append(get_llvm_type(g, ptr_type));
+ param_di_types.append(get_llvm_di_type(g, ptr_type));
}
fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
@@ -7055,6 +7151,17 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
get_llvm_di_type(g, g->err_tag_type), "");
}
+static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
+ if (frame_type->llvm_di_type != nullptr) return;
+
+ resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status);
+ frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
+ frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7096,7 +7203,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
case ZigTypeIdArray:
return resolve_llvm_types_array(g, type);
case ZigTypeIdFn:
- return resolve_llvm_types_fn(g, type);
+ return resolve_llvm_types_fn(g, type, nullptr);
case ZigTypeIdErrorSet: {
if (type->llvm_di_type != nullptr) return;
@@ -7115,6 +7222,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len);
return;
}
+ case ZigTypeIdCoroFrame:
+ return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index fbbdece8ba..286ff5e043 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -16,6 +16,7 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg);
ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg);
void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg);
ZigType *new_type_table_entry(ZigTypeId id);
+ZigType *get_coro_frame_type(CodeGen *g, ZigFn *fn);
ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const);
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len,
@@ -247,4 +248,6 @@ void src_assert(bool ok, AstNode *source_node);
bool is_container(ZigType *type_entry);
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name);
+void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type, ZigFn *fn);
+
#endif
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 4cc99b39a8..85784e5ac5 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -498,7 +498,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
ZigType *fn_type = fn_table_entry->type_entry;
// Make the raw_type_ref populated
- (void)get_llvm_type(g, fn_type);
+ resolve_llvm_types_fn(g, fn_type, fn_table_entry);
LLVMTypeRef fn_llvm_type = fn_type->data.fn.raw_type_ref;
if (fn_table_entry->body_node == nullptr) {
LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name));
@@ -921,9 +921,8 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
return false;
}
-static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+static bool ir_want_runtime_safety_scope(CodeGen *g, Scope *scope) {
// TODO memoize
- Scope *scope = instruction->scope;
while (scope) {
if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope;
@@ -941,6 +940,10 @@ static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
g->build_mode != BuildModeSmallRelease);
}
+static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+ return ir_want_runtime_safety_scope(g, instruction->scope);
+}
+
static Buf *panic_msg_buf(PanicMsgId msg_id) {
switch (msg_id) {
case PanicMsgIdCount:
@@ -981,6 +984,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("integer part of floating point value out of bounds");
case PanicMsgIdPtrCastNull:
return buf_create_from_str("cast causes pointer to be null");
+ case PanicMsgIdBadResume:
+ return buf_create_from_str("invalid resume of async function");
}
zig_unreachable();
}
@@ -1027,14 +1032,18 @@ static void gen_safety_crash(CodeGen *g, PanicMsgId msg_id) {
gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr);
}
-static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
- if (ir_want_runtime_safety(g, source_instruction)) {
+static void gen_assertion_scope(CodeGen *g, PanicMsgId msg_id, Scope *source_scope) {
+ if (ir_want_runtime_safety_scope(g, source_scope)) {
gen_safety_crash(g, msg_id);
} else {
LLVMBuildUnreachable(g->builder);
}
}
+static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
+ return gen_assertion_scope(g, msg_id, source_instruction->scope);
+}
+
static LLVMValueRef get_stacksave_fn_val(CodeGen *g) {
if (g->stacksave_fn_val)
return g->stacksave_fn_val;
@@ -2092,6 +2101,10 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
+ if (g->cur_fn->resume_blocks.length != 0) {
+ LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type));
+ return nullptr;
+ }
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
if (return_instruction->value == nullptr) {
LLVMBuildRetVoid(g->builder);
@@ -3375,8 +3388,7 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) {
return g->have_err_ret_tracing &&
(fn_type_id->return_type->id == ZigTypeIdErrorUnion ||
- fn_type_id->return_type->id == ZigTypeIdErrorSet ||
- fn_type_id->cc == CallingConventionAsync);
+ fn_type_id->return_type->id == ZigTypeIdErrorSet);
}
static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
@@ -3440,15 +3452,23 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
bool is_var_args = fn_type_id->is_var_args;
ZigList gen_param_values = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
- if (first_arg_ret) {
+ if (instruction->is_async) {
+ assert(result_loc != nullptr);
+ assert(instruction->fn_entry != nullptr);
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_resume_index_index, "");
+ LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMBuildStore(g->builder, zero, resume_index_ptr);
+
+ if (prefix_arg_err_ret_stack) {
+ zig_panic("TODO");
+ }
+
gen_param_values.append(result_loc);
- }
- if (prefix_arg_err_ret_stack) {
+ } else if (first_arg_ret) {
+ gen_param_values.append(result_loc);
+ } else if (prefix_arg_err_ret_stack) {
gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
}
- if (instruction->is_async) {
- zig_panic("TODO codegen async call");
- }
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdCall;
fn_walk.data.call.inst = instruction;
@@ -3489,9 +3509,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->is_async) {
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, "");
- LLVMBuildStore(g->builder, result, payload_ptr);
- return result_loc;
+ return nullptr;
}
if (src_return_type->id == ZigTypeIdUnreachable) {
@@ -4921,6 +4939,24 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab
return nullptr;
}
+static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendBegin *instruction)
+{
+ LLVMValueRef locals_ptr = g->cur_ret_ptr;
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, "");
+ LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
+ instruction->resume_block->resume_index, false);
+ LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr);
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendBr *instruction)
+{
+ LLVMBuildRet(g->builder, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type));
+ return nullptr;
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5161,6 +5197,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_resize_slice(g, executable, (IrInstructionResizeSlice *)instruction);
case IrInstructionIdPtrOfArrayToSlice:
return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendBr:
+ return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction);
}
zig_unreachable();
}
@@ -5422,7 +5462,8 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
return val;
}
-
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO bit pack a coroutine frame");
}
zig_unreachable();
}
@@ -5943,7 +5984,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
zig_unreachable();
-
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO");
}
zig_unreachable();
}
@@ -6027,12 +6069,20 @@ static void generate_error_name_table(CodeGen *g) {
static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+ LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ LLVMBasicBlockRef first_bb = nullptr;
+ if (fn->resume_blocks.length != 0) {
+ first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
+ fn->preamble_llvm_block = first_bb;
+ }
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *bb = executable->basic_block_list.at(block_i);
- bb->llvm_block = LLVMAppendBasicBlock(fn_llvm_value(g, fn), bb->name_hint);
+ bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint);
+ }
+ if (first_bb == nullptr) {
+ first_bb = executable->basic_block_list.at(0)->llvm_block;
}
- IrBasicBlock *entry_bb = executable->basic_block_list.at(0);
- LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block);
+ LLVMPositionBuilderAtEnd(g->builder, first_bb);
}
static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
@@ -6209,7 +6259,7 @@ static void do_code_gen(CodeGen *g) {
build_all_basic_blocks(g, fn_table_entry);
clear_debug_source_node(g);
- if (want_sret) {
+ if (want_sret || fn_table_entry->resume_blocks.length != 0) {
g->cur_ret_ptr = LLVMGetParam(fn, 0);
} else if (handle_is_ptr(fn_type_id->return_type)) {
g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
@@ -6357,6 +6407,41 @@ static void do_code_gen(CodeGen *g) {
fn_walk_init.data.inits.gen_i = gen_i_init;
walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
+ if (fn_table_entry->resume_blocks.length != 0) {
+ if (!g->strip_debug_symbols) {
+ AstNode *source_node = fn_table_entry->proto_node;
+ ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
+ (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
+ }
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
+
+ LLVMBasicBlockRef get_size_block = LLVMAppendBasicBlock(g->cur_fn_val, "GetSize");
+ LLVMPositionBuilderAtEnd(g->builder, get_size_block);
+ assert(fn_table_entry->frame_type->abi_size != 0);
+ assert(fn_table_entry->frame_type->abi_size != SIZE_MAX);
+ LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
+ LLVMBuildRet(g->builder, size_val);
+
+ LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block);
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ coro_resume_index_index, "");
+ LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
+ // The +1 is because index 0 is reserved for getting the size.
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block,
+ fn_table_entry->resume_blocks.length + 1);
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMAddCase(switch_instr, zero, get_size_block);
+
+ for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
+ LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 1, false);
+ LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block);
+ }
+ }
+
ir_render(g, fn_table_entry);
}
@@ -6644,9 +6729,13 @@ static void define_builtin_types(CodeGen *g) {
g->primitive_type_table.put(&entry->name, entry);
}
+ {
+ const char *field_names[] = {"resume_index"};
+ ZigType *field_types[] = {g->builtin_types.entry_usize};
+ g->builtin_types.entry_frame_header = get_struct_type(g, "(frame header)", field_names, field_types, 1);
+ }
}
-
static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) {
BuiltinFnEntry *builtin_fn = allocate(1);
buf_init_from_str(&builtin_fn->name, name);
@@ -7072,6 +7161,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" BoundFn: Fn,\n"
" ArgTuple: void,\n"
" Opaque: void,\n"
+ " Frame: void,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -8335,6 +8425,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdArgTuple:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -8518,6 +8609,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
}
}
@@ -8685,6 +8777,7 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdOptional:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
zig_unreachable();
case ZigTypeIdEnum:
if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
diff --git a/src/ir.cpp b/src/ir.cpp
index f23fe1b7d0..2332e28c84 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -318,6 +318,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
return false;
}
zig_unreachable();
@@ -1026,6 +1027,14 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionInitNamedFi
return IrInstructionIdUnionInitNamedField;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *) {
+ return IrInstructionIdSuspendBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) {
+ return IrInstructionIdSuspendBr;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -3183,6 +3192,30 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
+static IrInstruction *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrBasicBlock *resume_block)
+{
+ IrInstructionSuspendBegin *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->resume_block = resume_block;
+
+ ir_ref_bb(resume_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrBasicBlock *resume_block)
+{
+ IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
+ instruction->resume_block = resume_block;
+
+ ir_ref_bb(resume_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -3286,6 +3319,18 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock *
ir_set_cursor_at_end(irb, basic_block);
}
+static ScopeSuspend *get_scope_suspend(Scope *scope) {
+ while (scope) {
+ if (scope->id == ScopeIdSuspend)
+ return (ScopeSuspend *)scope;
+ if (scope->id == ScopeIdFnDef)
+ return nullptr;
+
+ scope = scope->parent;
+ }
+ return nullptr;
+}
+
static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDeferExpr)
@@ -3308,14 +3353,9 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
{
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
- bool is_async = exec_is_async(irb->exec);
- if (!is_async) {
- IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value);
- return_inst->is_gen = is_generated_code;
- return return_inst;
- }
-
- zig_panic("TODO async return");
+ IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value);
+ return_inst->is_gen = is_generated_code;
+ return return_inst;
}
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
@@ -5393,12 +5433,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
}
bool is_async = node->data.fn_call_expr.is_async;
- if (is_async) {
- zig_panic("TODO async fn call");
- }
-
- IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto,
- is_async, nullptr, result_loc);
+ IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, is_async, nullptr, result_loc);
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}
@@ -7655,7 +7691,45 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
assert(node->type == NodeTypeSuspend);
- zig_panic("TODO ir_gen_suspend");
+ ZigFn *fn_entry = exec_fn_entry(irb->exec);
+ if (!fn_entry) {
+ add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
+ return irb->codegen->invalid_instruction;
+ }
+ ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
+ if (scope_defer_expr) {
+ if (!scope_defer_expr->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
+ add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
+ scope_defer_expr->reported_err = true;
+ }
+ return irb->codegen->invalid_instruction;
+ }
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here"));
+ existing_suspend_scope->reported_err = true;
+ }
+ return irb->codegen->invalid_instruction;
+ }
+
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "Resume");
+
+ ir_build_suspend_begin(irb, parent_scope, node, resume_block);
+ if (node->data.suspend.block != nullptr) {
+ Scope *child_scope;
+ ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
+ suspend_scope->resume_block = resume_block;
+ child_scope = &suspend_scope->base;
+ IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
+ }
+
+ IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block);
+ ir_set_cursor_at_end_and_append_block(irb, resume_block);
+ return result;
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -7854,13 +7928,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
// Entry block gets a reference because we enter it to begin.
ir_ref_bb(irb->current_basic_block);
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
-
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- if (is_async) {
- zig_panic("ir_gen async fn");
- }
-
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
assert(result);
if (irb->exec->invalid)
@@ -12659,6 +12726,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdNull:
case ZigTypeIdErrorUnion:
case ZigTypeIdUnion:
+ case ZigTypeIdCoroFrame:
operator_allowed = false;
break;
case ZigTypeIdOptional:
@@ -14023,6 +14091,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdCoroFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -14047,6 +14116,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdCoroFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
break;
@@ -14553,6 +14623,20 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
+ ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count)
+{
+ ir_assert(fn_entry != nullptr, &call_instruction->base);
+
+ ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry);
+ IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ frame_type, nullptr, true, true);
+ if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
+ return result_loc;
+ }
+ return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
+ casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type);
+}
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
{
@@ -15366,16 +15450,18 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (type_is_invalid(return_type))
return ira->codegen->invalid_instruction;
- if (call_instruction->is_async) {
- zig_panic("TODO async call");
- }
-
if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
ir_add_error(ira, &call_instruction->base,
buf_sprintf("no-inline call of inline function"));
return ira->codegen->invalid_instruction;
}
+ if (call_instruction->is_async) {
+ IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
+ casted_args, call_param_count);
+ return ir_finish_anal(ira, result);
+ }
+
IrInstruction *result_loc;
if (handle_is_ptr(return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
@@ -15535,7 +15621,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source
zig_unreachable();
}
-static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
+static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
Error err;
IrInstruction *value = un_op_instruction->value->child;
ZigType *type_entry = ir_resolve_type(ira, value);
@@ -15569,6 +15655,7 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
+ case ZigTypeIdCoroFrame:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
@@ -15733,7 +15820,7 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction
return result;
}
case IrUnOpOptional:
- return ir_analyze_maybe(ira, instruction);
+ return ir_analyze_optional_type(ira, instruction);
}
zig_unreachable();
}
@@ -17340,6 +17427,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
{
ResolveStatus needed_status = (align_bytes == 0) ?
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
@@ -17454,6 +17542,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->invalid_instruction;
@@ -17504,6 +17593,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
@@ -18067,6 +18157,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
ir_add_error(ira, &switch_target_instruction->base,
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->invalid_instruction;
@@ -19906,6 +19997,8 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO @typeInfo for coro frames");
}
assert(result != nullptr);
@@ -21660,6 +21753,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdCoroFrame:
{
uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry);
return ir_const_unsigned(ira, &instruction->base, align_in_bytes);
@@ -22815,6 +22909,8 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_write_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_write_value_bytes union type");
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO buf_write_value_bytes coro frame type");
}
zig_unreachable();
}
@@ -22994,6 +23090,8 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
zig_panic("TODO buf_read_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_read_value_bytes union type");
+ case ZigTypeIdCoroFrame:
+ zig_panic("TODO buf_read_value_bytes coro frame type");
}
zig_unreachable();
}
@@ -24021,6 +24119,33 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i
union_type, field_name, field_result_loc, result_loc);
}
+static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) {
+ IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, instruction->resume_block, &instruction->base);
+ if (new_bb == nullptr)
+ return ir_unreach_error(ira);
+ return ir_build_suspend_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, new_bb);
+}
+
+static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstructionSuspendBr *instruction) {
+ IrBasicBlock *old_dest_block = instruction->resume_block;
+
+ IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &instruction->base);
+ if (new_bb == nullptr)
+ return ir_unreach_error(ira);
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+ fn_entry->resume_blocks.append(new_bb);
+ // This is done after appending the block because resume_index 0 is reserved for querying the size.
+ new_bb->resume_index = fn_entry->resume_blocks.length;
+
+ ir_push_resume_block(ira, old_dest_block);
+
+ IrInstruction *result = ir_build_suspend_br(&ira->new_irb,
+ instruction->base.scope, instruction->base.source_node, new_bb);
+ return ir_finish_anal(ira, result);
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -24304,6 +24429,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_bit_cast_src(ira, (IrInstructionBitCastSrc *)instruction);
case IrInstructionIdUnionInitNamedField:
return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendBr:
+ return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction);
}
zig_unreachable();
}
@@ -24436,6 +24565,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdOptionalWrap:
case IrInstructionIdVectorToArray:
case IrInstructionIdResetResult:
+ case IrInstructionIdSuspendBegin:
+ case IrInstructionIdSuspendBr:
return true;
case IrInstructionIdPhi:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 9ea70ba7ab..3a77e92bc7 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1503,6 +1503,16 @@ static void ir_print_union_init_named_field(IrPrint *irp, IrInstructionUnionInit
fprintf(irp->f, ")");
}
+static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *instruction) {
+ fprintf(irp->f, "@suspendBegin()");
+}
+
+static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instruction) {
+ fprintf(irp->f, "@suspendBr(");
+ ir_print_other_block(irp, instruction->resume_block);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -1961,6 +1971,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnionInitNamedField:
ir_print_union_init_named_field(irp, (IrInstructionUnionInitNamedField *)instruction);
break;
+ case IrInstructionIdSuspendBegin:
+ ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction);
+ break;
+ case IrInstructionIdSuspendBr:
+ ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
--
cgit v1.2.3
From 27a5f2c4fa9cfa104faa4cc2b15cd21cc5a5501f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 21 Jul 2019 16:43:43 -0400
Subject: remove errors for async calling convention
---
src/ir.cpp | 14 --------------
1 file changed, 14 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 2332e28c84..96d355bc95 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14984,20 +14984,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
return ira->codegen->invalid_instruction;
}
- if (fn_type_id->cc == CallingConventionAsync && !call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("must use async keyword to call async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
- if (fn_type_id->cc != CallingConventionAsync && call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("cannot use async keyword to call non-async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
if (fn_type_id->is_var_args) {
--
cgit v1.2.3
From 6053ca4f69e490c744384bd02e89df595ff7b085 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 21 Jul 2019 17:10:16 -0400
Subject: fix not jumping to entry
---
src/codegen.cpp | 13 +++++++++----
src/ir.cpp | 6 ++++--
2 files changed, 13 insertions(+), 6 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 85784e5ac5..47804d91f6 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -6413,6 +6413,7 @@ static void do_code_gen(CodeGen *g) {
ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
(int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
}
+ IrExecutable *executable = &fn_table_entry->analyzed_executable;
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
@@ -6429,15 +6430,19 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
coro_resume_index_index, "");
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
- // The +1 is because index 0 is reserved for getting the size.
+ // +1 - index 0 is reserved for the entry block
+ // +1 - index 1 is reserved for getting the size.
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block,
- fn_table_entry->resume_blocks.length + 1);
+ fn_table_entry->resume_blocks.length + 2);
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
- LLVMAddCase(switch_instr, zero, get_size_block);
+ LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block);
+
+ LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
+ LLVMAddCase(switch_instr, one, get_size_block);
for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
- LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 1, false);
+ LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 2, false);
LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block);
}
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 96d355bc95..2b3462772f 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -24121,9 +24121,11 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
+
+ // +2 - one for the GetSize block, one for the Entry block, resume blocks are indexed after that.
+ new_bb->resume_index = fn_entry->resume_blocks.length + 2;
+
fn_entry->resume_blocks.append(new_bb);
- // This is done after appending the block because resume_index 0 is reserved for querying the size.
- new_bb->resume_index = fn_entry->resume_blocks.length;
ir_push_resume_block(ira, old_dest_block);
--
cgit v1.2.3
From 11bd50f2b2a74ce25d841a15ba67d042d41b71c2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 21 Jul 2019 20:54:08 -0400
Subject: implement coroutine resume
---
src/all_types.hpp | 7 +++++++
src/codegen.cpp | 14 ++++++++++++++
src/ir.cpp | 35 ++++++++++++++++++++++++++++++++++-
src/ir_print.cpp | 9 +++++++++
test/stage1/behavior/coroutines.zig | 4 +++-
5 files changed, 67 insertions(+), 2 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index a2b569898c..82d2e2cddb 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2322,6 +2322,7 @@ enum IrInstructionId {
IrInstructionIdUnionInitNamedField,
IrInstructionIdSuspendBegin,
IrInstructionIdSuspendBr,
+ IrInstructionIdCoroResume,
};
struct IrInstruction {
@@ -3548,6 +3549,12 @@ struct IrInstructionSuspendBr {
IrBasicBlock *resume_block;
};
+struct IrInstructionCoroResume {
+ IrInstruction base;
+
+ IrInstruction *frame;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 4a9e5fd629..fa5f3ef8ee 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4969,6 +4969,18 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
return nullptr;
}
+static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
+ IrInstructionCoroResume *instruction)
+{
+ LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
+ ZigType *frame_type = instruction->frame->value.type;
+ assert(frame_type->id == ZigTypeIdCoroFrame);
+ ZigFn *fn = frame_type->data.frame.fn;
+ LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ LLVMBuildCall(g->builder, fn_val, &frame, 1, "");
+ return nullptr;
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5213,6 +5225,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction);
case IrInstructionIdSuspendBr:
return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction);
+ case IrInstructionIdCoroResume:
+ return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
}
zig_unreachable();
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 2b3462772f..d0a11c2f1e 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1035,6 +1035,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) {
return IrInstructionIdSuspendBr;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
+ return IrInstructionIdCoroResume;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -3216,6 +3220,18 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *frame)
+{
+ IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->frame = frame;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -7675,7 +7691,7 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- zig_panic("TODO ir_gen_resume");
+ return ir_build_coro_resume(irb, scope, node, target_inst);
}
static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -24134,6 +24150,20 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
return ir_finish_anal(ira, result);
}
+static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
+ IrInstruction *frame = instruction->frame->child;
+ if (type_is_invalid(frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (frame->value.type->id != ZigTypeIdCoroFrame) {
+ ir_add_error(ira, instruction->frame,
+ buf_sprintf("expected frame, found '%s'", buf_ptr(&frame->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame);
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -24421,6 +24451,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction);
case IrInstructionIdSuspendBr:
return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction);
+ case IrInstructionIdCoroResume:
+ return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
}
zig_unreachable();
}
@@ -24555,6 +24587,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdResetResult:
case IrInstructionIdSuspendBegin:
case IrInstructionIdSuspendBr:
+ case IrInstructionIdCoroResume:
return true;
case IrInstructionIdPhi:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 3a77e92bc7..e14647ea82 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1513,6 +1513,12 @@ static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instructio
fprintf(irp->f, ")");
}
+static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) {
+ fprintf(irp->f, "@coroResume(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -1977,6 +1983,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdSuspendBr:
ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction);
break;
+ case IrInstructionIdCoroResume:
+ ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index cdab411fb1..fd07790e7f 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -4,9 +4,11 @@ const expect = std.testing.expect;
var x: i32 = 1;
-test "simple coroutine suspend" {
+test "simple coroutine suspend and resume" {
const p = async simpleAsyncFn();
expect(x == 2);
+ resume p;
+ expect(x == 3);
}
fn simpleAsyncFn() void {
x += 1;
--
cgit v1.2.3
From 650e07ebd96d6c476cadc1f7c19856e950ceef9c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 22 Jul 2019 13:04:22 -0400
Subject: fix suspend at end of function
---
src/ir.cpp | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index d0a11c2f1e..6f9f582c6f 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3212,7 +3212,6 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode
IrBasicBlock *resume_block)
{
IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
instruction->resume_block = resume_block;
ir_ref_bb(resume_block);
@@ -7744,6 +7743,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
}
IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block);
+ result->value.type = irb->codegen->builtin_types.entry_void;
ir_set_cursor_at_end_and_append_block(irb, resume_block);
return result;
}
@@ -10279,7 +10279,7 @@ static IrBasicBlock *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlock *old_bb,
}
static void ir_start_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrBasicBlock *const_predecessor_bb) {
- ir_assert(!old_bb->suspended, old_bb->instruction_list.at(0));
+ ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? old_bb->instruction_list.at(0) : nullptr);
ira->instruction_index = 0;
ira->old_irb.current_basic_block = old_bb;
ira->const_predecessor_bb = const_predecessor_bb;
@@ -22547,7 +22547,7 @@ static IrInstruction *ir_analyze_instruction_check_statement_is_void(IrAnalyze *
if (type_is_invalid(statement_type))
return ira->codegen->invalid_instruction;
- if (statement_type->id != ZigTypeIdVoid) {
+ if (statement_type->id != ZigTypeIdVoid && statement_type->id != ZigTypeIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("expression value is ignored"));
}
@@ -24147,6 +24147,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
IrInstruction *result = ir_build_suspend_br(&ira->new_irb,
instruction->base.scope, instruction->base.source_node, new_bb);
+ result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
--
cgit v1.2.3
From 7e9760de10e05a4c2a7bae4c4bb945351b9ae0cb Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 23 Jul 2019 18:54:45 -0400
Subject: inferring async from async calls
---
build.zig | 4 ++-
src/all_types.hpp | 7 ++++
src/analyze.cpp | 100 +++++++++++++++++++++++++++++++++++++++++++-----------
src/analyze.hpp | 1 +
src/codegen.cpp | 20 ++++++-----
src/ir.cpp | 19 +++++++++++
6 files changed, 123 insertions(+), 28 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/build.zig b/build.zig
index 011f742d82..45758d4075 100644
--- a/build.zig
+++ b/build.zig
@@ -375,7 +375,9 @@ fn addLibUserlandStep(b: *Builder) void {
artifact.bundle_compiler_rt = true;
artifact.setTarget(builtin.arch, builtin.os, builtin.abi);
artifact.linkSystemLibrary("c");
- artifact.linkSystemLibrary("ntdll");
+ if (builtin.os == .windows) {
+ artifact.linkSystemLibrary("ntdll");
+ }
const libuserland_step = b.step("libuserland", "Build the userland compiler library for use in stage1");
libuserland_step.dependOn(&artifact.step);
diff --git a/src/all_types.hpp b/src/all_types.hpp
index aa7ff06ce9..8991b53e64 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1336,6 +1336,11 @@ struct GlobalExport {
GlobalLinkageId linkage;
};
+struct FnCall {
+ AstNode *source_node;
+ ZigFn *callee;
+};
+
struct ZigFn {
CodeGen *codegen;
LLVMValueRef llvm_value;
@@ -1379,8 +1384,10 @@ struct ZigFn {
AstNode *set_alignstack_node;
AstNode *set_cold_node;
+ const AstNode *inferred_async_node;
ZigList export_list;
+ ZigList call_list;
LLVMValueRef valgrind_client_request_array;
LLVMBasicBlockRef preamble_llvm_block;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 80e22c6c62..3da13dcc02 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -31,6 +31,11 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
+// nullptr means not analyzed yet; this one means currently being analyzed
+static const AstNode *inferred_async_checking = reinterpret_cast(0x1);
+// this one means analyzed and it's not async
+static const AstNode *inferred_async_none = reinterpret_cast(0x2);
+
static bool is_top_level_struct(ZigType *import) {
return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr;
}
@@ -1892,8 +1897,12 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_names.append("resume_index");
field_types.append(g->builtin_types.entry_usize);
- for (size_t arg_i = 0; arg_i < fn->type_entry->data.fn.fn_type_id.param_count; arg_i += 1) {
- FnTypeParamInfo *param_info = &fn->type_entry->data.fn.fn_type_id.param_info[arg_i];
+ FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id;
+ field_names.append("result");
+ field_types.append(fn_type_id->return_type);
+
+ for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
+ FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
Buf *param_name;
bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
@@ -2796,6 +2805,16 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
g->fn_defs.append(fn_table_entry);
}
+ switch (fn_table_entry->type_entry->data.fn.fn_type_id.cc) {
+ case CallingConventionAsync:
+ fn_table_entry->inferred_async_node = fn_table_entry->proto_node;
+ break;
+ case CallingConventionUnspecified:
+ break;
+ default:
+ fn_table_entry->inferred_async_node = inferred_async_none;
+ }
+
if (scope_is_root_decls(tld_fn->base.parent_scope) &&
(import == g->root_import || import->data.structure.root_struct->package == g->panic_package))
{
@@ -3767,6 +3786,55 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour
return true;
}
+static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) {
+ ZigType *frame_type = get_coro_frame_type(g, fn);
+ Error err;
+ if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+}
+
+bool fn_is_async(ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ return fn->inferred_async_node != inferred_async_none;
+}
+
+// This function resolves functions being inferred async.
+static void analyze_fn_async(CodeGen *g, ZigFn *fn) {
+ if (fn->inferred_async_node == inferred_async_checking) {
+ // TODO call graph cycle detected, disallow the recursion
+ fn->inferred_async_node = inferred_async_none;
+ return;
+ }
+ if (fn->inferred_async_node == inferred_async_none) {
+ return;
+ }
+ if (fn->inferred_async_node != nullptr) {
+ resolve_async_fn_frame(g, fn);
+ return;
+ }
+ fn->inferred_async_node = inferred_async_checking;
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ FnCall *call = &fn->call_list.at(i);
+ if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ continue;
+ assert(call->callee->anal_state == FnAnalStateComplete);
+ analyze_fn_async(g, call->callee);
+ if (call->callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (fn_is_async(call->callee)) {
+ fn->inferred_async_node = call->source_node;
+ resolve_async_fn_frame(g, fn);
+ return;
+ }
+ }
+ fn->inferred_async_node = inferred_async_none;
+}
+
static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) {
ZigType *fn_type = fn_table_entry->type_entry;
assert(!fn_type->data.fn.is_generic);
@@ -3824,17 +3892,7 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_typ
ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
fprintf(stderr, "}\n");
}
-
fn_table_entry->anal_state = FnAnalStateComplete;
-
- if (fn_table_entry->resume_blocks.length != 0) {
- ZigType *frame_type = get_coro_frame_type(g, fn_table_entry);
- Error err;
- if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
- fn_table_entry->anal_state = FnAnalStateInvalid;
- return;
- }
- }
}
static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
@@ -4004,6 +4062,16 @@ void semantic_analyze(CodeGen *g) {
analyze_fn_body(g, fn_entry);
}
}
+
+ if (g->errors.length != 0) {
+ return;
+ }
+
+ // second pass over functions for detecting async
+ for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) {
+ ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index);
+ analyze_fn_async(g, fn_entry);
+ }
}
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
@@ -7173,11 +7241,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
if (fn->raw_di_type != nullptr) return;
ZigType *fn_type = fn->type_entry;
- FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
- bool cc_async = fn_type_id->cc == CallingConventionAsync;
- bool inferred_async = fn->resume_blocks.length != 0;
- bool is_async = cc_async || inferred_async;
- if (!is_async) {
+ if (!fn_is_async(fn)) {
resolve_llvm_types_fn_type(g, fn_type);
fn->raw_type_ref = fn_type->data.fn.raw_type_ref;
fn->raw_di_type = fn_type->data.fn.raw_di_type;
@@ -7223,8 +7287,6 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
}
static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
- if (frame_type->llvm_di_type != nullptr) return;
-
resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status);
frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 57f4452104..50e7b72309 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -248,5 +248,6 @@ bool is_container(ZigType *type_entry);
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name);
void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
+bool fn_is_async(ZigFn *fn);
#endif
diff --git a/src/codegen.cpp b/src/codegen.cpp
index f3519ea72d..6c0bedee2d 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -371,7 +371,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name));
}
- bool is_async = fn_table_entry->resume_blocks.length != 0 || cc == CallingConventionAsync;
+ bool is_async = fn_is_async(fn_table_entry);
ZigType *fn_type = fn_table_entry->type_entry;
@@ -1847,7 +1847,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
}
case FnWalkIdInits: {
clear_debug_source_node(g);
- if (fn_walk->data.inits.fn->resume_blocks.length == 0) {
+ if (!fn_is_async(fn_walk->data.inits.fn)) {
LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i);
LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0);
LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, "");
@@ -1945,7 +1945,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
assert(variable);
assert(variable->value_ref);
- if (!handle_is_ptr(variable->var_type) && fn_walk->data.inits.fn->resume_blocks.length == 0) {
+ if (!handle_is_ptr(variable->var_type) && !fn_is_async(fn_walk->data.inits.fn)) {
clear_debug_source_node(g);
ZigType *fn_type = fn_table_entry->type_entry;
unsigned gen_arg_index = fn_type->data.fn.gen_param_info[variable->src_arg_index].gen_index;
@@ -1986,7 +1986,7 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
- if (g->cur_fn->resume_blocks.length != 0) {
+ if (fn_is_async(g->cur_fn)) {
if (ir_want_runtime_safety(g, &return_instruction->base)) {
LLVMValueRef locals_ptr = g->cur_ret_ptr;
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, "");
@@ -3387,8 +3387,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef result;
if (instruction->is_async) {
+ size_t ret_1_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 1 : 0;
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
- LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_arg_start + arg_i, "");
+ LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc,
+ coro_arg_start + ret_1_or_0 + arg_i, "");
LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr);
}
ZigLLVMBuildCall(g->builder, fn_val, &result_loc, 1, llvm_cc, fn_inline, "");
@@ -5983,7 +5985,7 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
assert(executable->basic_block_list.length > 0);
LLVMValueRef fn_val = fn_llvm_value(g, fn);
LLVMBasicBlockRef first_bb = nullptr;
- if (fn->resume_blocks.length != 0) {
+ if (fn_is_async(fn)) {
first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
fn->preamble_llvm_block = first_bb;
}
@@ -6171,7 +6173,7 @@ static void do_code_gen(CodeGen *g) {
build_all_basic_blocks(g, fn_table_entry);
clear_debug_source_node(g);
- bool is_async = cc == CallingConventionAsync || fn_table_entry->resume_blocks.length != 0;
+ bool is_async = fn_is_async(fn_table_entry);
if (want_sret || is_async) {
g->cur_ret_ptr = LLVMGetParam(fn, 0);
@@ -6261,7 +6263,9 @@ static void do_code_gen(CodeGen *g) {
fn_walk_var.data.vars.var = var;
iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index);
} else if (is_async) {
- var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start + var_i, "");
+ size_t ret_1_or_0 = type_has_bits(fn_type_id->return_type) ? 1 : 0;
+ var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ coro_arg_start + ret_1_or_0 + var_i, "");
if (var->decl_node) {
var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
buf_ptr(&var->name), import->data.structure.root_struct->di_file,
diff --git a/src/ir.cpp b/src/ir.cpp
index 6f9f582c6f..0cc68eaa55 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15383,6 +15383,13 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
zig_panic("TODO async call");
}
+ if (!call_instruction->is_async) {
+ if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ }
+ parent_fn_entry->call_list.append({call_instruction->base.source_node, impl_fn});
+ }
+
IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
call_instruction->is_async, casted_new_stack, result_loc,
@@ -15458,6 +15465,15 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
return ira->codegen->invalid_instruction;
}
+ if (!call_instruction->is_async) {
+ if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ }
+ if (fn_entry != nullptr) {
+ parent_fn_entry->call_list.append({call_instruction->base.source_node, fn_entry});
+ }
+ }
+
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
casted_args, call_param_count);
@@ -24142,6 +24158,9 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
new_bb->resume_index = fn_entry->resume_blocks.length + 2;
fn_entry->resume_blocks.append(new_bb);
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
ir_push_resume_block(ira, old_dest_block);
--
cgit v1.2.3
From ead2d32be871411685f846e604ec7e4253b9f25a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 25 Jul 2019 00:03:06 -0400
Subject: calling an inferred async function
---
src/all_types.hpp | 17 +--
src/analyze.cpp | 220 ++++++++++++++++++++++--------------
src/codegen.cpp | 72 ++++++++++--
src/ir.cpp | 26 ++---
src/zig_llvm.cpp | 4 +
src/zig_llvm.h | 1 +
test/stage1/behavior/coroutines.zig | 16 +++
7 files changed, 238 insertions(+), 118 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index a68f19a877..d67356b178 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -35,6 +35,7 @@ struct ConstExprValue;
struct IrInstruction;
struct IrInstructionCast;
struct IrInstructionAllocaGen;
+struct IrInstructionCallGen;
struct IrBasicBlock;
struct ScopeDecls;
struct ZigWindowsSDK;
@@ -1336,11 +1337,6 @@ struct GlobalExport {
GlobalLinkageId linkage;
};
-struct FnCall {
- AstNode *source_node;
- ZigFn *callee;
-};
-
struct ZigFn {
LLVMValueRef llvm_value;
const char *llvm_name;
@@ -1387,7 +1383,7 @@ struct ZigFn {
ZigFn *inferred_async_fn;
ZigList export_list;
- ZigList call_list;
+ ZigList call_list;
LLVMValueRef valgrind_client_request_array;
LLVMBasicBlockRef preamble_llvm_block;
@@ -2585,6 +2581,8 @@ struct IrInstructionCallGen {
size_t arg_count;
IrInstruction **args;
IrInstruction *result_loc;
+ IrInstruction *frame_result_loc;
+ IrBasicBlock *resume_block;
IrInstruction *new_stack;
FnInline fn_inline;
@@ -3645,7 +3643,12 @@ static const size_t err_union_err_index = 0;
static const size_t err_union_payload_index = 1;
static const size_t coro_resume_index_index = 0;
-static const size_t coro_arg_start = 1;
+static const size_t coro_fn_ptr_index = 1;
+static const size_t coro_awaiter_index = 2;
+static const size_t coro_arg_start = 3;
+
+// one for the GetSize block, one for the Entry block, resume blocks are indexed after that.
+static const size_t coro_extra_resume_block_count = 2;
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 957e61b198..c8e02a4771 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -1869,80 +1869,6 @@ static Error resolve_union_type(CodeGen *g, ZigType *union_type) {
return ErrorNone;
}
-static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
- if (frame_type->data.frame.locals_struct != nullptr)
- return ErrorNone;
-
- ZigFn *fn = frame_type->data.frame.fn;
- switch (fn->anal_state) {
- case FnAnalStateInvalid:
- return ErrorSemanticAnalyzeFail;
- case FnAnalStateComplete:
- break;
- case FnAnalStateReady:
- analyze_fn_body(g, fn);
- if (fn->anal_state == FnAnalStateInvalid)
- return ErrorSemanticAnalyzeFail;
- break;
- case FnAnalStateProbing:
- add_node_error(g, fn->proto_node,
- buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
- buf_ptr(&frame_type->name)));
- return ErrorSemanticAnalyzeFail;
- }
- // TODO iterate over fn->alloca_gen_list
- ZigList field_types = {};
- ZigList field_names = {};
-
- field_names.append("resume_index");
- field_types.append(g->builtin_types.entry_usize);
-
- FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id;
- field_names.append("result");
- field_types.append(fn_type_id->return_type);
-
- for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
- FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
- AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
- Buf *param_name;
- bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
- if (param_decl_node && !is_var_args) {
- param_name = param_decl_node->data.param_decl.name;
- } else {
- param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i);
- }
- ZigType *param_type = param_info->type;
- field_names.append(buf_ptr(param_name));
- field_types.append(param_type);
- }
-
- for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
- IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
- ZigType *ptr_type = instruction->base.value.type;
- assert(ptr_type->id == ZigTypeIdPointer);
- ZigType *child_type = ptr_type->data.pointer.child_type;
- if (!type_has_bits(child_type))
- continue;
- if (instruction->base.ref_count == 0)
- continue;
- if (instruction->base.value.special != ConstValSpecialRuntime) {
- if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
- ConstValSpecialRuntime)
- {
- continue;
- }
- }
- field_names.append(instruction->name_hint);
- field_types.append(child_type);
- }
-
-
- assert(field_names.length == field_types.length);
- frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
- field_names.items, field_types.items, field_names.length);
- return ErrorNone;
-}
-
static bool type_is_valid_extern_enum_tag(CodeGen *g, ZigType *ty) {
// Only integer types are allowed by the C ABI
if(ty->id != ZigTypeIdInt)
@@ -3861,18 +3787,24 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) {
}
for (size_t i = 0; i < fn->call_list.length; i += 1) {
- FnCall *call = &fn->call_list.at(i);
- if (call->callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ // TODO function pointer call here, could be anything
continue;
- assert(call->callee->anal_state == FnAnalStateComplete);
- analyze_fn_async(g, call->callee);
- if (call->callee->anal_state == FnAnalStateInvalid) {
+ }
+
+ if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ continue;
+ assert(callee->anal_state == FnAnalStateComplete);
+ analyze_fn_async(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
fn->anal_state = FnAnalStateInvalid;
return;
}
- if (fn_is_async(call->callee)) {
- fn->inferred_async_node = call->source_node;
- fn->inferred_async_fn = call->callee;
+ if (fn_is_async(callee)) {
+ fn->inferred_async_node = call->base.source_node;
+ fn->inferred_async_fn = callee;
if (must_not_be_async) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("function with calling convention '%s' cannot be async",
@@ -5147,6 +5079,127 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) {
return type_resolve(g, type_entry, ResolveStatusSizeKnown);
}
+static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
+ if (frame_type->data.frame.locals_struct != nullptr)
+ return ErrorNone;
+
+ ZigFn *fn = frame_type->data.frame.fn;
+ switch (fn->anal_state) {
+ case FnAnalStateInvalid:
+ return ErrorSemanticAnalyzeFail;
+ case FnAnalStateComplete:
+ break;
+ case FnAnalStateReady:
+ analyze_fn_body(g, fn);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+ break;
+ case FnAnalStateProbing:
+ add_node_error(g, fn->proto_node,
+ buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
+ buf_ptr(&frame_type->name)));
+ return ErrorSemanticAnalyzeFail;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ assert(callee != nullptr);
+
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
+ return ErrorSemanticAnalyzeFail;
+ }
+ analyze_fn_async(g, callee);
+ if (!fn_is_async(callee))
+ continue;
+
+ IrBasicBlock *new_resume_block = allocate(1);
+ new_resume_block->name_hint = "CallResume";
+ new_resume_block->resume_index = fn->resume_blocks.length + coro_extra_resume_block_count;
+ fn->resume_blocks.append(new_resume_block);
+ call->resume_block = new_resume_block;
+ fn->analyzed_executable.basic_block_list.append(new_resume_block);
+
+ ZigType *callee_frame_type = get_coro_frame_type(g, callee);
+
+ IrInstructionAllocaGen *alloca_gen = allocate(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = call->base.source_node;
+ alloca_gen->base.scope = call->base.scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ call->frame_result_loc = &alloca_gen->base;
+ }
+
+ ZigList field_types = {};
+ ZigList field_names = {};
+
+ field_names.append("resume_index");
+ field_types.append(g->builtin_types.entry_usize);
+
+ field_names.append("fn_ptr");
+ field_types.append(fn->type_entry);
+
+ field_names.append("awaiter");
+ field_types.append(g->builtin_types.entry_usize);
+
+ FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+ field_names.append("result_ptr");
+ field_types.append(ptr_return_type);
+
+ field_names.append("result");
+ field_types.append(fn_type_id->return_type);
+
+ for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
+ FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
+ AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
+ Buf *param_name;
+ bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
+ if (param_decl_node && !is_var_args) {
+ param_name = param_decl_node->data.param_decl.name;
+ } else {
+ param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i);
+ }
+ ZigType *param_type = param_info->type;
+ field_names.append(buf_ptr(param_name));
+ field_types.append(param_type);
+ }
+
+ for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
+ continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
+ }
+ field_names.append(instruction->name_hint);
+ field_types.append(child_type);
+ }
+
+
+ assert(field_names.length == field_types.length);
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ field_names.items, field_types.items, field_names.length);
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+ return ErrorNone;
+}
+
Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
if (type_is_invalid(ty))
return ErrorSemanticAnalyzeFail;
@@ -7343,9 +7396,6 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol
resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status);
frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
- frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
- frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
- frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
}
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 534b97232e..34f4aa1cc4 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -3324,13 +3324,16 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) {
LLVMValueRef fn_val;
ZigType *fn_type;
+ bool callee_is_async;
if (instruction->fn_entry) {
fn_val = fn_llvm_value(g, instruction->fn_entry);
fn_type = instruction->fn_entry->type_entry;
+ callee_is_async = fn_is_async(instruction->fn_entry);
} else {
assert(instruction->fn_ref);
fn_val = ir_llvm_value(g, instruction->fn_ref);
fn_type = instruction->fn_ref->value.type;
+ callee_is_async = fn_type->data.fn.fn_type_id.cc == CallingConventionAsync;
}
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -3345,17 +3348,47 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
bool is_var_args = fn_type_id->is_var_args;
ZigList gen_param_values = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
+ LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMValueRef frame_result_loc;
+ LLVMValueRef awaiter_init_val;
+ LLVMValueRef ret_ptr;
if (instruction->is_async) {
- assert(result_loc != nullptr);
+ frame_result_loc = result_loc;
+ awaiter_init_val = zero;
+ if (ret_has_bits) {
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, "");
+ }
+ } else if (callee_is_async) {
+ frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
+ awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr,
+ g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer
+ if (ret_has_bits) {
+ ret_ptr = result_loc;
+ }
+ }
+ if (instruction->is_async || callee_is_async) {
+ assert(frame_result_loc != nullptr);
assert(instruction->fn_entry != nullptr);
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, result_loc, coro_resume_index_index, "");
- LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index_index, "");
LLVMBuildStore(g->builder, zero, resume_index_ptr);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, "");
+ LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val,
+ LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), "");
+ LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr);
if (prefix_arg_err_ret_stack) {
zig_panic("TODO");
}
- } else {
+
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, "");
+ LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+ }
+ }
+ if (!instruction->is_async && !callee_is_async) {
if (first_arg_ret) {
gen_param_values.append(result_loc);
}
@@ -3386,14 +3419,28 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMCallConv llvm_cc = get_llvm_cc(g, cc);
LLVMValueRef result;
- if (instruction->is_async) {
- size_t ret_1_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 1 : 0;
+ if (instruction->is_async || callee_is_async) {
+ size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0;
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
- LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, result_loc,
- coro_arg_start + ret_1_or_0 + arg_i, "");
+ LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ coro_arg_start + ret_2_or_0 + arg_i, "");
LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr);
}
- ZigLLVMBuildCall(g->builder, fn_val, &result_loc, 1, llvm_cc, fn_inline, "");
+ }
+ if (instruction->is_async) {
+ ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
+ return nullptr;
+ } else if (callee_is_async) {
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, "");
+ LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
+ instruction->resume_block->resume_index, false);
+ LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr);
+
+ LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
+ ZigLLVMSetTailCall(call_inst);
+ LLVMBuildRet(g->builder, call_inst);
+
+ LLVMPositionBuilderAtEnd(g->builder, instruction->resume_block->llvm_block);
return nullptr;
}
@@ -6174,7 +6221,7 @@ static void do_code_gen(CodeGen *g) {
clear_debug_source_node(g);
bool is_async = fn_is_async(fn_table_entry);
- size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 1 : 0);
+ size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 2 : 0);
if (want_sret || is_async) {
g->cur_ret_ptr = LLVMGetParam(fn, 0);
@@ -6385,8 +6432,9 @@ static void do_code_gen(CodeGen *g) {
LLVMAddCase(switch_instr, one, get_size_block);
for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
- LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_i + 2, false);
- LLVMAddCase(switch_instr, case_value, fn_table_entry->resume_blocks.at(resume_i)->llvm_block);
+ IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i);
+ LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false);
+ LLVMAddCase(switch_instr, case_value, resume_block->llvm_block);
}
} else {
// create debug variable declarations for parameters
diff --git a/src/ir.cpp b/src/ir.cpp
index 0cc68eaa55..cb4a90c310 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1385,7 +1385,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
return &call_instruction->base;
}
-static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
FnInline fn_inline, bool is_async, IrInstruction *new_stack,
IrInstruction *result_loc, ZigType *return_type)
@@ -1408,7 +1408,7 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in
if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
- return &call_instruction->base;
+ return call_instruction;
}
static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source_node,
@@ -14650,8 +14650,8 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
return result_loc;
}
- return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type);
+ return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
+ casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base;
}
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
@@ -15387,15 +15387,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
}
- parent_fn_entry->call_list.append({call_instruction->base.source_node, impl_fn});
}
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
call_instruction->is_async, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
@@ -15469,9 +15470,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
parent_fn_entry->inferred_async_node = fn_ref->source_node;
}
- if (fn_entry != nullptr) {
- parent_fn_entry->call_list.append({call_instruction->base.source_node, fn_entry});
- }
}
if (call_instruction->is_async) {
@@ -15491,10 +15489,11 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
result_loc = nullptr;
}
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
call_param_count, casted_args, fn_inline, false, casted_new_stack,
result_loc, return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) {
@@ -24154,8 +24153,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
- // +2 - one for the GetSize block, one for the Entry block, resume blocks are indexed after that.
- new_bb->resume_index = fn_entry->resume_blocks.length + 2;
+ new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count;
fn_entry->resume_blocks.append(new_bb);
if (fn_entry->inferred_async_node == nullptr) {
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index c51c9e1a50..b52edabe65 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -898,6 +898,10 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV
return wrap(unwrap(builder)->CreateAShr(unwrap(LHS), unwrap(RHS), name, true));
}
+void ZigLLVMSetTailCall(LLVMValueRef Call) {
+ unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail);
+}
+
class MyOStream: public raw_ostream {
public:
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 8b7b0775f7..2a2ab567a6 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -211,6 +211,7 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclare(struct ZigLLVMDIBuilder *dibuilde
ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col, struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state);
+ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn);
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 4f1cc84064..7188e7af8c 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -77,6 +77,22 @@ test "local variable in async function" {
S.doTheTest();
}
+test "calling an inferred async function" {
+ const S = struct {
+ fn doTheTest() void {
+ const p = async first();
+ }
+
+ fn first() void {
+ other();
+ }
+ fn other() void {
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
+
//test "coroutine suspend, resume" {
// seq('a');
// const p = try async testAsyncSeq();
--
cgit v1.2.3
From 70bced5dcffccc2f8029d8c3d7f2d18b48d993f5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 25 Jul 2019 01:47:56 -0400
Subject: implement `@frame` and `@Frame`
---
BRANCH_TODO | 4 +++
src/all_types.hpp | 16 ++++++---
src/codegen.cpp | 16 ++++-----
src/ir.cpp | 71 ++++++++++++++++++++++++++-----------
src/ir_print.cpp | 17 ++++++---
test/stage1/behavior/coroutines.zig | 8 +++++
6 files changed, 96 insertions(+), 36 deletions(-)
create mode 100644 BRANCH_TODO
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
new file mode 100644
index 0000000000..a22620a626
--- /dev/null
+++ b/BRANCH_TODO
@@ -0,0 +1,4 @@
+ * await
+ * await of a non async function
+ * async call on a non async function
+ * safety for resuming when it is awaiting
diff --git a/src/all_types.hpp b/src/all_types.hpp
index d67356b178..ebdde4642e 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1435,8 +1435,6 @@ enum BuiltinFnId {
BuiltinFnIdErrName,
BuiltinFnIdBreakpoint,
BuiltinFnIdReturnAddress,
- BuiltinFnIdFrameAddress,
- BuiltinFnIdHandle,
BuiltinFnIdEmbedFile,
BuiltinFnIdCmpxchgWeak,
BuiltinFnIdCmpxchgStrong,
@@ -1507,6 +1505,9 @@ enum BuiltinFnId {
BuiltinFnIdAtomicLoad,
BuiltinFnIdHasDecl,
BuiltinFnIdUnionInit,
+ BuiltinFnIdFrameAddress,
+ BuiltinFnIdFrameType,
+ BuiltinFnIdFrameHandle,
};
struct BuiltinFnEntry {
@@ -2252,7 +2253,8 @@ enum IrInstructionId {
IrInstructionIdBreakpoint,
IrInstructionIdReturnAddress,
IrInstructionIdFrameAddress,
- IrInstructionIdHandle,
+ IrInstructionIdFrameHandle,
+ IrInstructionIdFrameType,
IrInstructionIdAlignOf,
IrInstructionIdOverflowOp,
IrInstructionIdTestErrSrc,
@@ -3038,8 +3040,14 @@ struct IrInstructionFrameAddress {
IrInstruction base;
};
-struct IrInstructionHandle {
+struct IrInstructionFrameHandle {
+ IrInstruction base;
+};
+
+struct IrInstructionFrameType {
IrInstruction base;
+
+ IrInstruction *fn;
};
enum IrOverflowOp {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 34f4aa1cc4..d6f19d6a43 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4457,10 +4457,8 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
}
-static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable,
- IrInstructionHandle *instruction)
-{
- zig_panic("TODO @handle() codegen");
+static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) {
+ return g->cur_ret_ptr;
}
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
@@ -5008,6 +5006,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdBitCastSrc:
case IrInstructionIdTestErrSrc:
case IrInstructionIdUnionInitNamedField:
+ case IrInstructionIdFrameType:
zig_unreachable();
case IrInstructionIdDeclVarGen:
@@ -5086,8 +5085,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_render_handle(g, executable, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_render_handle(g, executable, (IrInstructionFrameHandle *)instruction);
case IrInstructionIdOverflowOp:
return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction);
case IrInstructionIdTestErrGen:
@@ -6754,8 +6753,6 @@ static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char
static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0);
create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0);
- create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
- create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0);
create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3);
create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3);
create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1);
@@ -6856,6 +6853,9 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdThis, "This", 0);
create_builtin_fn(g, BuiltinFnIdHasDecl, "hasDecl", 2);
create_builtin_fn(g, BuiltinFnIdUnionInit, "unionInit", 3);
+ create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1);
+ create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
}
static const char *bool_to_str(bool b) {
diff --git a/src/ir.cpp b/src/ir.cpp
index cb4a90c310..93d559b446 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -755,8 +755,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *)
return IrInstructionIdFrameAddress;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) {
- return IrInstructionIdHandle;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameHandle *) {
+ return IrInstructionIdFrameHandle;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) {
+ return IrInstructionIdFrameType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) {
@@ -2362,7 +2366,16 @@ static IrInstruction *ir_build_frame_address(IrBuilder *irb, Scope *scope, AstNo
}
static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionHandle *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionFrameHandle *instruction = ir_build_instruction(irb, scope, source_node);
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameType *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
return &instruction->base;
}
@@ -3358,11 +3371,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
return nullptr;
}
-static bool exec_is_async(IrExecutable *exec) {
- ZigFn *fn_entry = exec_fn_entry(exec);
- return fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
-}
-
static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value,
bool is_generated_code)
{
@@ -4278,8 +4286,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- bool is_async = exec_is_async(irb->exec);
-
switch (builtin_fn->id) {
case BuiltinFnIdInvalid:
zig_unreachable();
@@ -4902,16 +4908,21 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval, result_loc);
case BuiltinFnIdFrameAddress:
return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc);
- case BuiltinFnIdHandle:
+ case BuiltinFnIdFrameHandle:
if (!irb->exec->fn_entry) {
add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition"));
return irb->codegen->invalid_instruction;
}
- if (!is_async) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function"));
- return irb->codegen->invalid_instruction;
- }
return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc);
+ case BuiltinFnIdFrameType: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
+ }
case BuiltinFnIdAlignOf:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -21726,8 +21737,25 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns
return result;
}
-static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) {
- zig_panic("TODO anlayze @handle()");
+static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInstructionFrameHandle *instruction) {
+ ZigFn *fn = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn != nullptr, &instruction->base);
+
+ ZigType *frame_type = get_coro_frame_type(ira->codegen, fn);
+ ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false);
+
+ IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
+ result->value.type = ptr_frame_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstructionFrameType *instruction) {
+ ZigFn *fn = ir_resolve_fn(ira, instruction->fn->child);
+ if (fn == nullptr)
+ return ira->codegen->invalid_instruction;
+
+ ZigType *ty = get_coro_frame_type(ira->codegen, fn);
+ return ir_const_type(ira, &instruction->base, ty);
}
static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) {
@@ -24355,8 +24383,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction);
+ case IrInstructionIdFrameType:
+ return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction);
case IrInstructionIdAlignOf:
return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction);
case IrInstructionIdOverflowOp:
@@ -24650,7 +24680,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAlignOf:
case IrInstructionIdReturnAddress:
case IrInstructionIdFrameAddress:
- case IrInstructionIdHandle:
+ case IrInstructionIdFrameHandle:
+ case IrInstructionIdFrameType:
case IrInstructionIdTestErrSrc:
case IrInstructionIdTestErrGen:
case IrInstructionIdFnProto:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index e14647ea82..5b3bba2271 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -906,8 +906,14 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst
fprintf(irp->f, "@frameAddress()");
}
-static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) {
- fprintf(irp->f, "@handle()");
+static void ir_print_handle(IrPrint *irp, IrInstructionFrameHandle *instruction) {
+ fprintf(irp->f, "@frame()");
+}
+
+static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instruction) {
+ fprintf(irp->f, "@Frame(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
}
static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) {
@@ -1764,8 +1770,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction);
break;
- case IrInstructionIdHandle:
- ir_print_handle(irp, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ ir_print_handle(irp, (IrInstructionFrameHandle *)instruction);
+ break;
+ case IrInstructionIdFrameType:
+ ir_print_frame_type(irp, (IrInstructionFrameType *)instruction);
break;
case IrInstructionIdAlignOf:
ir_print_align_of(irp, (IrInstructionAlignOf *)instruction);
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 7188e7af8c..33246f761f 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -79,15 +79,23 @@ test "local variable in async function" {
test "calling an inferred async function" {
const S = struct {
+ var x: i32 = 1;
+ var other_frame: *@Frame(other) = undefined;
+
fn doTheTest() void {
const p = async first();
+ expect(x == 1);
+ resume other_frame.*;
+ expect(x == 2);
}
fn first() void {
other();
}
fn other() void {
+ other_frame = @frame();
suspend;
+ x += 1;
}
};
S.doTheTest();
--
cgit v1.2.3
From 538c0cd2250e08aad07784355b402cfae6145507 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 25 Jul 2019 15:05:55 -0400
Subject: implement `@frameSize`
---
BRANCH_TODO | 1 +
src/all_types.hpp | 16 +++++++++
src/codegen.cpp | 13 +++++++
src/ir.cpp | 70 ++++++++++++++++++++++++++++++++++---
src/ir_print.cpp | 20 +++++++++++
test/stage1/behavior/coroutines.zig | 26 ++++++++++++++
6 files changed, 142 insertions(+), 4 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index a22620a626..6ea57d2173 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -2,3 +2,4 @@
* await of a non async function
* async call on a non async function
* safety for resuming when it is awaiting
+ * implicit cast of normal function to async function should be allowed when it is inferred to be async
diff --git a/src/all_types.hpp b/src/all_types.hpp
index ebdde4642e..d30b3b8a80 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1508,6 +1508,7 @@ enum BuiltinFnId {
BuiltinFnIdFrameAddress,
BuiltinFnIdFrameType,
BuiltinFnIdFrameHandle,
+ BuiltinFnIdFrameSize,
};
struct BuiltinFnEntry {
@@ -2255,6 +2256,8 @@ enum IrInstructionId {
IrInstructionIdFrameAddress,
IrInstructionIdFrameHandle,
IrInstructionIdFrameType,
+ IrInstructionIdFrameSizeSrc,
+ IrInstructionIdFrameSizeGen,
IrInstructionIdAlignOf,
IrInstructionIdOverflowOp,
IrInstructionIdTestErrSrc,
@@ -3050,6 +3053,19 @@ struct IrInstructionFrameType {
IrInstruction *fn;
};
+struct IrInstructionFrameSizeSrc {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
+struct IrInstructionFrameSizeGen {
+ IrInstruction base;
+
+ IrInstruction *fn;
+ IrInstruction *frame_ptr;
+};
+
enum IrOverflowOp {
IrOverflowOpAdd,
IrOverflowOpSub,
diff --git a/src/codegen.cpp b/src/codegen.cpp
index d6f19d6a43..6fc152ad3e 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4914,6 +4914,15 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
return nullptr;
}
+static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) {
+ LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn);
+ LLVMValueRef frame_ptr = ir_llvm_value(g, instruction->frame_ptr);
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_ptr, coro_resume_index_index, "");
+ LLVMValueRef one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
+ LLVMBuildStore(g->builder, one, resume_index_ptr);
+ return ZigLLVMBuildCall(g->builder, fn_val, &frame_ptr, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5007,6 +5016,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTestErrSrc:
case IrInstructionIdUnionInitNamedField:
case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
zig_unreachable();
case IrInstructionIdDeclVarGen:
@@ -5161,6 +5171,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction);
case IrInstructionIdCoroResume:
return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
+ case IrInstructionIdFrameSizeGen:
+ return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
}
zig_unreachable();
}
@@ -6856,6 +6868,7 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0);
create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1);
create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1);
}
static const char *bool_to_str(bool b) {
diff --git a/src/ir.cpp b/src/ir.cpp
index 93d559b446..1a62af8ce4 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -763,6 +763,14 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) {
return IrInstructionIdFrameType;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeSrc *) {
+ return IrInstructionIdFrameSizeSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeGen *) {
+ return IrInstructionIdFrameSizeGen;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) {
return IrInstructionIdAlignOf;
}
@@ -2379,6 +2387,28 @@ static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameSizeSrc *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn,
+ IrInstruction *frame_ptr)
+{
+ IrInstructionFrameSizeGen *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->fn = fn;
+ instruction->frame_ptr = frame_ptr;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+ ir_ref_instruction(frame_ptr, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrOverflowOp op, IrInstruction *type_value, IrInstruction *op1, IrInstruction *op2,
IrInstruction *result_ptr, ZigType *result_ptr_type)
@@ -4923,6 +4953,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
}
+ case BuiltinFnIdFrameSize: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_size, lval, result_loc);
+ }
case BuiltinFnIdAlignOf:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -21758,6 +21797,28 @@ static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstru
return ir_const_type(ira, &instruction->base, ty);
}
+static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstructionFrameSizeSrc *instruction) {
+ IrInstruction *fn = instruction->fn->child;
+ if (type_is_invalid(fn->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (fn->value.type->id != ZigTypeIdFn) {
+ ir_add_error(ira, fn,
+ buf_sprintf("expected function, found '%s'", buf_ptr(&fn->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *frame_ptr = ir_resolve_result(ira, &instruction->base, no_result_loc(),
+ ira->codegen->builtin_types.entry_frame_header, nullptr, true, false);
+ if (frame_ptr != nullptr && (type_is_invalid(frame_ptr->value.type) || instr_is_unreachable(frame_ptr)))
+ return frame_ptr;
+
+ IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, fn, frame_ptr);
+ result->value.type = ira->codegen->builtin_types.entry_usize;
+ return result;
+}
+
static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) {
Error err;
IrInstruction *type_value = instruction->type_value->child;
@@ -22348,10 +22409,6 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct
return ira->codegen->invalid_instruction;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- zig_panic("TODO");
- }
-
return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id));
}
@@ -24237,6 +24294,7 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdSliceGen:
case IrInstructionIdRefGen:
case IrInstructionIdTestErrGen:
+ case IrInstructionIdFrameSizeGen:
zig_unreachable();
case IrInstructionIdReturn:
@@ -24387,6 +24445,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction);
case IrInstructionIdFrameType:
return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction);
+ case IrInstructionIdFrameSizeSrc:
+ return ir_analyze_instruction_frame_size(ira, (IrInstructionFrameSizeSrc *)instruction);
case IrInstructionIdAlignOf:
return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction);
case IrInstructionIdOverflowOp:
@@ -24682,6 +24742,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
case IrInstructionIdFrameHandle:
case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdFrameSizeGen:
case IrInstructionIdTestErrSrc:
case IrInstructionIdTestErrGen:
case IrInstructionIdFnProto:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 5b3bba2271..7e903ed662 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -916,6 +916,20 @@ static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instructio
fprintf(irp->f, ")");
}
+static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->frame_ptr);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) {
fprintf(irp->f, "@returnAddress()");
}
@@ -1776,6 +1790,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFrameType:
ir_print_frame_type(irp, (IrInstructionFrameType *)instruction);
break;
+ case IrInstructionIdFrameSizeSrc:
+ ir_print_frame_size_src(irp, (IrInstructionFrameSizeSrc *)instruction);
+ break;
+ case IrInstructionIdFrameSizeGen:
+ ir_print_frame_size_gen(irp, (IrInstructionFrameSizeGen *)instruction);
+ break;
case IrInstructionIdAlignOf:
ir_print_align_of(irp, (IrInstructionAlignOf *)instruction);
break;
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 33246f761f..7af04d37c9 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -101,6 +101,32 @@ test "calling an inferred async function" {
S.doTheTest();
}
+test "@frameSize" {
+ const S = struct {
+ fn doTheTest() void {
+ {
+ var ptr = @ptrCast(async fn(i32) void, other);
+ const size = @frameSize(ptr);
+ expect(size == @sizeOf(@Frame(other)));
+ }
+ {
+ var ptr = @ptrCast(async fn() void, first);
+ const size = @frameSize(ptr);
+ expect(size == @sizeOf(@Frame(first)));
+ }
+ }
+
+ fn first() void {
+ other(1);
+ }
+ fn other(param: i32) void {
+ var local: i32 = undefined;
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
+
//test "coroutine suspend, resume" {
// seq('a');
// const p = try async testAsyncSeq();
--
cgit v1.2.3
From 7b3686861f87d006da817db98f7d3b13fada9815 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 25 Jul 2019 22:24:01 -0400
Subject: `@frameSize` works via PrefixData
---
BRANCH_TODO | 9 +++++++++
src/all_types.hpp | 1 -
src/codegen.cpp | 21 +++++++++++++--------
src/ir.cpp | 12 ++----------
src/ir_print.cpp | 2 --
src/zig_llvm.cpp | 6 +++++-
src/zig_llvm.h | 1 +
7 files changed, 30 insertions(+), 22 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 6ea57d2173..d10bc704d8 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,5 +1,14 @@
+ * reimplement @frameSize with Prefix Data
+ * reimplement with function splitting rather than switch
+ * add the `anyframe` type and `anyframe->T`
* await
* await of a non async function
+ * await in single-threaded mode
* async call on a non async function
+ * @asyncCall with an async function pointer
+ * cancel
+ * defer and errdefer
* safety for resuming when it is awaiting
* implicit cast of normal function to async function should be allowed when it is inferred to be async
+ * go over the commented out tests
+ * revive std.event.Loop
diff --git a/src/all_types.hpp b/src/all_types.hpp
index d30b3b8a80..6ee3a6b937 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -3063,7 +3063,6 @@ struct IrInstructionFrameSizeGen {
IrInstruction base;
IrInstruction *fn;
- IrInstruction *frame_ptr;
};
enum IrOverflowOp {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 6fc152ad3e..4343006b17 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4914,13 +4914,16 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
return nullptr;
}
-static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable, IrInstructionFrameSizeGen *instruction) {
+static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
+ IrInstructionFrameSizeGen *instruction)
+{
+ LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type;
+ LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0);
LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn);
- LLVMValueRef frame_ptr = ir_llvm_value(g, instruction->frame_ptr);
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_ptr, coro_resume_index_index, "");
- LLVMValueRef one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
- LLVMBuildStore(g->builder, one, resume_index_ptr);
- return ZigLLVMBuildCall(g->builder, fn_val, &frame_ptr, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+ LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
+ LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
+ LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
+ return LLVMBuildLoad(g->builder, prefix_ptr, "");
}
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
@@ -6409,13 +6412,16 @@ static void do_code_gen(CodeGen *g) {
}
if (is_async) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
+ ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
+
if (!g->strip_debug_symbols) {
AstNode *source_node = fn_table_entry->proto_node;
ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
(int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
}
IrExecutable *executable = &fn_table_entry->analyzed_executable;
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
@@ -6424,7 +6430,6 @@ static void do_code_gen(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, get_size_block);
assert(fn_table_entry->frame_type->abi_size != 0);
assert(fn_table_entry->frame_type->abi_size != SIZE_MAX);
- LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
LLVMBuildRet(g->builder, size_val);
LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block);
diff --git a/src/ir.cpp b/src/ir.cpp
index 1a62af8ce4..7a5af347b7 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -2396,15 +2396,12 @@ static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstN
return &instruction->base;
}
-static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn,
- IrInstruction *frame_ptr)
+static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn)
{
IrInstructionFrameSizeGen *instruction = ir_build_instruction(irb, scope, source_node);
instruction->fn = fn;
- instruction->frame_ptr = frame_ptr;
ir_ref_instruction(fn, irb->current_basic_block);
- ir_ref_instruction(frame_ptr, irb->current_basic_block);
return &instruction->base;
}
@@ -21808,13 +21805,8 @@ static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstru
return ira->codegen->invalid_instruction;
}
- IrInstruction *frame_ptr = ir_resolve_result(ira, &instruction->base, no_result_loc(),
- ira->codegen->builtin_types.entry_frame_header, nullptr, true, false);
- if (frame_ptr != nullptr && (type_is_invalid(frame_ptr->value.type) || instr_is_unreachable(frame_ptr)))
- return frame_ptr;
-
IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, fn, frame_ptr);
+ instruction->base.source_node, fn);
result->value.type = ira->codegen->builtin_types.entry_usize;
return result;
}
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 7e903ed662..ae467bdc8c 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -925,8 +925,6 @@ static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *ins
static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) {
fprintf(irp->f, "@frameSize(");
ir_print_other_instruction(irp, instruction->fn);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->frame_ptr);
fprintf(irp->f, ")");
}
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index b52edabe65..906b278b21 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -899,9 +899,13 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV
}
void ZigLLVMSetTailCall(LLVMValueRef Call) {
- unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail);
+ unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail);
}
+void ZigLLVMFunctionSetPrefixData(LLVMValueRef function, LLVMValueRef data) {
+ unwrap(function)->setPrefixData(unwrap(data));
+}
+
class MyOStream: public raw_ostream {
public:
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 2a2ab567a6..2be119ba0c 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -212,6 +212,7 @@ ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigne
ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state);
ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call);
+ZIG_EXTERN_C void ZigLLVMFunctionSetPrefixData(LLVMValueRef fn, LLVMValueRef data);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn);
--
cgit v1.2.3
From ee64a22045ccbc39773779d4e386e25f563c8a90 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 26 Jul 2019 19:52:35 -0400
Subject: add the `anyframe` and `anyframe->T` types
---
BRANCH_TODO | 6 +-
src/all_types.hpp | 21 +++++++
src/analyze.cpp | 111 ++++++++++++++++++++++++++++++++++++-
src/analyze.hpp | 1 +
src/ast_render.cpp | 10 ++++
src/codegen.cpp | 16 +++++-
src/ir.cpp | 86 +++++++++++++++++++++++++++-
src/ir_print.cpp | 12 ++++
src/parser.cpp | 22 +++++++-
src/tokenizer.cpp | 2 +
src/tokenizer.hpp | 1 +
std/hash_map.zig | 1 +
std/testing.zig | 1 +
std/zig/ast.zig | 16 +++---
std/zig/parse.zig | 40 ++++++-------
std/zig/parser_test.zig | 4 +-
std/zig/render.zig | 10 ++--
std/zig/tokenizer.zig | 4 +-
test/stage1/behavior/type_info.zig | 23 +++++++-
19 files changed, 337 insertions(+), 50 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index d10bc704d8..e2c4fec436 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,6 +1,4 @@
- * reimplement @frameSize with Prefix Data
- * reimplement with function splitting rather than switch
- * add the `anyframe` type and `anyframe->T`
+ * make the anyframe type and anyframe->T type work with resume
* await
* await of a non async function
* await in single-threaded mode
@@ -12,3 +10,5 @@
* implicit cast of normal function to async function should be allowed when it is inferred to be async
* go over the commented out tests
* revive std.event.Loop
+ * reimplement with function splitting rather than switch
+ * @typeInfo for @Frame(func)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index c9bdfabb0d..1096feade0 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -479,6 +479,7 @@ enum NodeType {
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
+ NodeTypeAnyFrameType,
NodeTypeEnumLiteral,
};
@@ -936,6 +937,10 @@ struct AstNodeSuspend {
AstNode *block;
};
+struct AstNodeAnyFrameType {
+ AstNode *payload_type; // can be NULL
+};
+
struct AstNodeEnumLiteral {
Token *period;
Token *identifier;
@@ -1001,6 +1006,7 @@ struct AstNode {
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
+ AstNodeAnyFrameType anyframe_type;
AstNodeEnumLiteral enum_literal;
} data;
};
@@ -1253,6 +1259,7 @@ enum ZigTypeId {
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
ZigTypeIdCoroFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1272,6 +1279,10 @@ struct ZigTypeCoroFrame {
ZigType *locals_struct;
};
+struct ZigTypeAnyFrame {
+ ZigType *result_type; // null if `anyframe` instead of `anyframe->T`
+};
+
struct ZigType {
ZigTypeId id;
Buf name;
@@ -1298,11 +1309,13 @@ struct ZigType {
ZigTypeVector vector;
ZigTypeOpaque opaque;
ZigTypeCoroFrame frame;
+ ZigTypeAnyFrame any_frame;
} data;
// use these fields to make sure we don't duplicate type table entries for the same type
ZigType *pointer_parent[2]; // [0 - mut, 1 - const]
ZigType *optional_parent;
+ ZigType *any_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
// The type of this is array
ConstExprValue *cached_const_name_val;
@@ -1781,6 +1794,7 @@ struct CodeGen {
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
ZigType *entry_frame_header;
+ ZigType *entry_any_frame;
} builtin_types;
ZigType *align_amt_type;
ZigType *stack_trace_type;
@@ -2208,6 +2222,7 @@ enum IrInstructionId {
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
+ IrInstructionIdAnyFrameType,
IrInstructionIdSliceType,
IrInstructionIdGlobalAsm,
IrInstructionIdAsm,
@@ -2709,6 +2724,12 @@ struct IrInstructionPtrType {
bool is_allow_zero;
};
+struct IrInstructionAnyFrameType {
+ IrInstruction base;
+
+ IrInstruction *payload_type;
+};
+
struct IrInstructionSliceType {
IrInstruction base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e1fedab7cf..e47be8f14c 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -256,6 +256,7 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return nullptr;
}
zig_unreachable();
@@ -322,6 +323,7 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return true;
}
zig_unreachable();
@@ -354,6 +356,31 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) {
+ if (result_type != nullptr && result_type->any_frame_parent != nullptr) {
+ return result_type->any_frame_parent;
+ } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) {
+ return g->builtin_types.entry_any_frame;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame);
+ entry->abi_size = g->builtin_types.entry_usize->abi_size;
+ entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
+ entry->abi_align = g->builtin_types.entry_usize->abi_align;
+ entry->data.any_frame.result_type = result_type;
+ buf_init_from_str(&entry->name, "anyframe");
+ if (result_type != nullptr) {
+ buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
+ }
+
+ if (result_type != nullptr) {
+ result_type->any_frame_parent = entry;
+ } else if (result_type == nullptr) {
+ g->builtin_types.entry_any_frame = entry;
+ }
+ return entry;
+}
+
static const char *ptr_len_to_star_str(PtrLen ptr_len) {
switch (ptr_len) {
case PtrLenSingle:
@@ -1080,6 +1107,7 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1169,6 +1197,7 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdArgTuple:
case ZigTypeIdVoid:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1340,6 +1369,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1436,6 +1466,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -2997,6 +3028,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
case NodeTypeEnumLiteral:
+ case NodeTypeAnyFrameType:
zig_unreachable();
}
}
@@ -3049,6 +3081,7 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return type_entry;
}
zig_unreachable();
@@ -3550,6 +3583,7 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
}
zig_unreachable();
@@ -3607,6 +3641,7 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
zig_unreachable();
@@ -3615,11 +3650,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
+ if (type->id == ZigTypeIdAnyFrame) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
+ if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3635,6 +3672,13 @@ bool type_is_nonnull_ptr(ZigType *type) {
return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type);
}
+static uint32_t get_coro_frame_align_bytes(CodeGen *g) {
+ uint32_t a = g->pointer_size_bytes * 2;
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (a < 8) a = 8;
+ return a;
+}
+
uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
ZigType *ptr_type = get_src_ptr_type(type);
if (ptr_type->id == ZigTypeIdPointer) {
@@ -3646,6 +3690,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return get_coro_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3657,6 +3703,8 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return true;
} else {
zig_unreachable();
}
@@ -4153,6 +4201,7 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdEnum:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
@@ -4404,6 +4453,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdCoroFrame:
// TODO better hashing algorithm
return 675741936;
+ case ZigTypeIdAnyFrame:
+ // TODO better hashing algorithm
+ return 3747294894;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4469,6 +4521,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdPointer:
@@ -4541,6 +4594,7 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdPointer:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdArray:
@@ -4673,6 +4727,7 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdErrorUnion:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4761,6 +4816,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5433,6 +5489,8 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return true;
case ZigTypeIdCoroFrame:
zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5786,7 +5844,11 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
}
case ZigTypeIdCoroFrame:
- buf_appendf(buf, "(TODO: coroutine frame value)");
+ buf_appendf(buf, "(TODO: async function frame value)");
+ return;
+
+ case ZigTypeIdAnyFrame:
+ buf_appendf(buf, "(TODO: anyframe value)");
return;
}
@@ -5836,6 +5898,7 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5885,6 +5948,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -6051,6 +6115,7 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
ZigTypeIdCoroFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -6116,10 +6181,12 @@ size_t type_id_index(ZigType *entry) {
return 21;
case ZigTypeIdCoroFrame:
return 22;
- case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return 23;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 24;
+ case ZigTypeIdEnumLiteral:
+ return 25;
}
zig_unreachable();
}
@@ -6178,6 +6245,8 @@ const char *type_id_name(ZigTypeId id) {
return "Vector";
case ZigTypeIdCoroFrame:
return "Frame";
+ case ZigTypeIdAnyFrame:
+ return "AnyFrame";
}
zig_unreachable();
}
@@ -7398,6 +7467,40 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol
frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
}
+static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
+ if (any_frame_type->llvm_di_type != nullptr) return;
+
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
+ Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+
+ ZigType *frame_header_type;
+ if (result_type == nullptr || !type_has_bits(result_type)) {
+ const char *field_names[] = {"resume_index", "fn_ptr", "awaiter"};
+ ZigType *field_types[] = {
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ };
+ frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 3);
+ } else {
+ ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false);
+
+ const char *field_names[] = {"resume_index", "fn_ptr", "awaiter", "result_ptr", "result"};
+ ZigType *field_types[] = {
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ g->builtin_types.entry_usize,
+ ptr_result_type,
+ result_type,
+ };
+ frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 5);
+ }
+
+ ZigType *ptr_type = get_pointer_to_type(g, frame_header_type, false);
+ any_frame_type->llvm_type = get_llvm_type(g, ptr_type);
+ any_frame_type->llvm_di_type = get_llvm_di_type(g, ptr_type);
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7460,6 +7563,8 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
}
case ZigTypeIdCoroFrame:
return resolve_llvm_types_coro_frame(g, type, wanted_resolve_status);
+ case ZigTypeIdAnyFrame:
+ return resolve_llvm_types_any_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 47ff4344ba..3115c79b40 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -41,6 +41,7 @@ ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const c
ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
ZigType *field_types[], size_t field_count);
ZigType *get_test_fn_type(CodeGen *g);
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type);
bool handle_is_ptr(ZigType *type_entry);
bool type_has_bits(ZigType *type_entry);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index d97f58fdec..4d6bae311b 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -259,6 +259,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePointerType:
return "PointerType";
+ case NodeTypeAnyFrameType:
+ return "AnyFrameType";
case NodeTypeEnumLiteral:
return "EnumLiteral";
}
@@ -847,6 +849,14 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_ungrouped(ar, node->data.inferred_array_type.child_type);
break;
}
+ case NodeTypeAnyFrameType: {
+ fprintf(ar->f, "anyframe");
+ if (node->data.anyframe_type.payload_type != nullptr) {
+ fprintf(ar->f, "->");
+ render_node_grouped(ar, node->data.anyframe_type.payload_type);
+ }
+ break;
+ }
case NodeTypeErrorType:
fprintf(ar->f, "anyerror");
break;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 63018cb6a3..c666317c17 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4947,6 +4947,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdSwitchTarget:
@@ -5438,7 +5439,9 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
return val;
}
case ZigTypeIdCoroFrame:
- zig_panic("TODO bit pack a coroutine frame");
+ zig_panic("TODO bit pack an async function frame");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO bit pack an anyframe");
}
zig_unreachable();
}
@@ -5961,6 +5964,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
zig_unreachable();
case ZigTypeIdCoroFrame:
zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
}
zig_unreachable();
}
@@ -7176,6 +7181,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" ArgTuple: void,\n"
" Opaque: void,\n"
" Frame: void,\n"
+ " AnyFrame: AnyFrame,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -7291,6 +7297,10 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" args: []FnArg,\n"
" };\n"
"\n"
+ " pub const AnyFrame = struct {\n"
+ " child: ?type,\n"
+ " };\n"
+ "\n"
" pub const Vector = struct {\n"
" len: comptime_int,\n"
" child: type,\n"
@@ -8448,6 +8458,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -8632,6 +8643,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
}
@@ -8800,7 +8812,9 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
+
case ZigTypeIdEnum:
if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
fprintf(out_h, "enum %s {\n", buf_ptr(type_h_name(type_entry)));
diff --git a/src/ir.cpp b/src/ir.cpp
index 7a5af347b7..e6d987a2ee 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -303,6 +303,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdErrorSet:
case ZigTypeIdOpaque:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdFloat:
return a->data.floating.bit_count == b->data.floating.bit_count;
@@ -563,6 +564,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) {
return IrInstructionIdArrayType;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAnyFrameType *) {
+ return IrInstructionIdAnyFrameType;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) {
return IrInstructionIdSliceType;
}
@@ -1696,6 +1701,16 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *payload_type)
+{
+ IrInstructionAnyFrameType *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->payload_type = payload_type;
+
+ if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
+
+ return &instruction->base;
+}
static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero)
{
@@ -6515,6 +6530,22 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n
}
}
+static IrInstruction *ir_gen_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeAnyFrameType);
+
+ AstNode *payload_type_node = node->data.anyframe_type.payload_type;
+ IrInstruction *payload_type_value = nullptr;
+
+ if (payload_type_node != nullptr) {
+ payload_type_value = ir_gen_node(irb, payload_type_node, scope);
+ if (payload_type_value == irb->codegen->invalid_instruction)
+ return payload_type_value;
+
+ }
+
+ return ir_build_anyframe_type(irb, scope, node, payload_type_value);
+}
+
static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeUndefinedLiteral);
return ir_build_const_undefined(irb, scope, node);
@@ -7884,6 +7915,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
+ case NodeTypeAnyFrameType:
+ return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
case NodeTypeStringLiteral:
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
case NodeTypeUndefinedLiteral:
@@ -12775,6 +12808,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdArgTuple:
case ZigTypeIdEnum:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdAnyFrame:
operator_allowed = is_equality_cmp;
break;
@@ -14155,6 +14189,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -14180,6 +14215,7 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdOpaque:
case ZigTypeIdEnumLiteral:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
break;
@@ -15720,7 +15756,9 @@ static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
+
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -17443,6 +17481,20 @@ static IrInstruction *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_instruction_any_frame_type(IrAnalyze *ira,
+ IrInstructionAnyFrameType *instruction)
+{
+ ZigType *payload_type = nullptr;
+ if (instruction->payload_type != nullptr) {
+ payload_type = ir_resolve_type(ira, instruction->payload_type->child);
+ if (type_is_invalid(payload_type))
+ return ira->codegen->invalid_instruction;
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type);
+ return ir_const_type(ira, &instruction->base, any_frame_type);
+}
+
static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
IrInstructionSliceType *slice_type_instruction)
{
@@ -17492,6 +17544,7 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
ResolveStatus needed_status = (align_bytes == 0) ?
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
@@ -17607,6 +17660,7 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdBoundFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->invalid_instruction;
@@ -17658,6 +17712,7 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
@@ -18222,6 +18277,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdOpaque:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, &switch_target_instruction->base,
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->invalid_instruction;
@@ -19656,6 +19712,22 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
+ case ZigTypeIdAnyFrame: {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr);
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // child: ?type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ fields[0].data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr :
+ create_const_type(ira->codegen, type_entry->data.any_frame.result_type);
+ break;
+ }
case ZigTypeIdEnum:
{
result = create_const_vals(1);
@@ -20062,7 +20134,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
case ZigTypeIdCoroFrame:
- zig_panic("TODO @typeInfo for coro frames");
+ zig_panic("TODO @typeInfo for async function frames");
}
assert(result != nullptr);
@@ -21852,6 +21924,7 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdCoroFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry);
return ir_const_unsigned(ira, &instruction->base, align_in_bytes);
@@ -23004,7 +23077,9 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case ZigTypeIdUnion:
zig_panic("TODO buf_write_value_bytes union type");
case ZigTypeIdCoroFrame:
- zig_panic("TODO buf_write_value_bytes coro frame type");
+ zig_panic("TODO buf_write_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_write_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -23185,7 +23260,9 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
case ZigTypeIdUnion:
zig_panic("TODO buf_read_value_bytes union type");
case ZigTypeIdCoroFrame:
- zig_panic("TODO buf_read_value_bytes coro frame type");
+ zig_panic("TODO buf_read_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_read_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24327,6 +24404,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction);
case IrInstructionIdSetFloatMode:
return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction);
+ case IrInstructionIdAnyFrameType:
+ return ir_analyze_instruction_any_frame_type(ira, (IrInstructionAnyFrameType *)instruction);
case IrInstructionIdSliceType:
return ir_analyze_instruction_slice_type(ira, (IrInstructionSliceType *)instruction);
case IrInstructionIdGlobalAsm:
@@ -24707,6 +24786,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdStructFieldPtr:
case IrInstructionIdArrayType:
case IrInstructionIdSliceType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
case IrInstructionIdOptionalUnwrapPtr:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index ae467bdc8c..284ebed2f3 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -471,6 +471,15 @@ static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instructio
ir_print_other_instruction(irp, instruction->child_type);
}
+static void ir_print_any_frame_type(IrPrint *irp, IrInstructionAnyFrameType *instruction) {
+ if (instruction->payload_type == nullptr) {
+ fprintf(irp->f, "anyframe");
+ } else {
+ fprintf(irp->f, "anyframe->");
+ ir_print_other_instruction(irp, instruction->payload_type);
+ }
+}
+
static void ir_print_global_asm(IrPrint *irp, IrInstructionGlobalAsm *instruction) {
fprintf(irp->f, "asm(\"%s\")", buf_ptr(instruction->asm_code));
}
@@ -1629,6 +1638,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdSliceType:
ir_print_slice_type(irp, (IrInstructionSliceType *)instruction);
break;
+ case IrInstructionIdAnyFrameType:
+ ir_print_any_frame_type(irp, (IrInstructionAnyFrameType *)instruction);
+ break;
case IrInstructionIdGlobalAsm:
ir_print_global_asm(irp, (IrInstructionGlobalAsm *)instruction);
break;
diff --git a/src/parser.cpp b/src/parser.cpp
index b1a593d9c9..82312aacf3 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -282,6 +282,9 @@ static AstNode *ast_parse_prefix_op_expr(
case NodeTypeAwaitExpr:
right = &prefix->data.await_expr.expr;
break;
+ case NodeTypeAnyFrameType:
+ right = &prefix->data.anyframe_type.payload_type;
+ break;
case NodeTypeArrayType:
right = &prefix->data.array_type.child_type;
break;
@@ -1640,6 +1643,10 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (null != nullptr)
return ast_create_node(pc, NodeTypeNullLiteral, null);
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr)
+ return ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
+
Token *true_token = eat_token_if(pc, TokenIdKeywordTrue);
if (true_token != nullptr) {
AstNode *res = ast_create_node(pc, NodeTypeBoolLiteral, true_token);
@@ -2510,7 +2517,7 @@ static AstNode *ast_parse_prefix_op(ParseContext *pc) {
// PrefixTypeOp
// <- QUESTIONMARK
-// / KEYWORD_promise MINUSRARROW
+// / KEYWORD_anyframe MINUSRARROW
// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile)*
// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile)*
static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
@@ -2521,6 +2528,16 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
return res;
}
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr) {
+ if (eat_token_if(pc, TokenIdArrow) != nullptr) {
+ AstNode *res = ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
+ return res;
+ }
+
+ put_back_token(pc);
+ }
+
AstNode *array = ast_parse_array_type_start(pc);
if (array != nullptr) {
assert(array->type == NodeTypeArrayType);
@@ -3005,6 +3022,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeInferredArrayType:
visit_field(&node->data.array_type.child_type, visit, context);
break;
+ case NodeTypeAnyFrameType:
+ visit_field(&node->data.anyframe_type.payload_type, visit, context);
+ break;
case NodeTypeErrorType:
// none
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 0869c3ba9c..38c6c7153e 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -109,6 +109,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"align", TokenIdKeywordAlign},
{"allowzero", TokenIdKeywordAllowZero},
{"and", TokenIdKeywordAnd},
+ {"anyframe", TokenIdKeywordAnyFrame},
{"asm", TokenIdKeywordAsm},
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
@@ -1533,6 +1534,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
+ case TokenIdKeywordAnyFrame: return "anyframe";
case TokenIdKeywordAsm: return "asm";
case TokenIdKeywordBreak: return "break";
case TokenIdKeywordCatch: return "catch";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 253e0bd1e5..98bdfea907 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -53,6 +53,7 @@ enum TokenId {
TokenIdKeywordAlign,
TokenIdKeywordAllowZero,
TokenIdKeywordAnd,
+ TokenIdKeywordAnyFrame,
TokenIdKeywordAsm,
TokenIdKeywordAsync,
TokenIdKeywordAwait,
diff --git a/std/hash_map.zig b/std/hash_map.zig
index bdd6cc7519..431fbb35ab 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -540,6 +540,7 @@ pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type
.Undefined,
.ArgTuple,
.Frame,
+ .AnyFrame,
=> @compileError("cannot hash this type"),
.Void,
diff --git a/std/testing.zig b/std/testing.zig
index 3c4772cf37..7f347b0c24 100644
--- a/std/testing.zig
+++ b/std/testing.zig
@@ -30,6 +30,7 @@ pub fn expectEqual(expected: var, actual: @typeOf(expected)) void {
.ArgTuple,
.Opaque,
.Frame,
+ .AnyFrame,
=> @compileError("value of type " ++ @typeName(@typeOf(actual)) ++ " encountered"),
.Undefined,
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 38bd94339f..475a0e4e13 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -400,7 +400,7 @@ pub const Node = struct {
VarType,
ErrorType,
FnProto,
- PromiseType,
+ AnyFrameType,
// Primary expressions
IntegerLiteral,
@@ -952,9 +952,9 @@ pub const Node = struct {
}
};
- pub const PromiseType = struct {
+ pub const AnyFrameType = struct {
base: Node,
- promise_token: TokenIndex,
+ anyframe_token: TokenIndex,
result: ?Result,
pub const Result = struct {
@@ -962,7 +962,7 @@ pub const Node = struct {
return_type: *Node,
};
- pub fn iterate(self: *PromiseType, index: usize) ?*Node {
+ pub fn iterate(self: *AnyFrameType, index: usize) ?*Node {
var i = index;
if (self.result) |result| {
@@ -973,13 +973,13 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *const PromiseType) TokenIndex {
- return self.promise_token;
+ pub fn firstToken(self: *const AnyFrameType) TokenIndex {
+ return self.anyframe_token;
}
- pub fn lastToken(self: *const PromiseType) TokenIndex {
+ pub fn lastToken(self: *const AnyFrameType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
- return self.promise_token;
+ return self.anyframe_token;
}
};
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 59acf99890..600178cdce 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -1201,7 +1201,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / KEYWORD_error DOT IDENTIFIER
/// / KEYWORD_false
/// / KEYWORD_null
-/// / KEYWORD_promise
+/// / KEYWORD_anyframe
/// / KEYWORD_true
/// / KEYWORD_undefined
/// / KEYWORD_unreachable
@@ -1256,11 +1256,11 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
if (eatToken(it, .Keyword_false)) |token| return createLiteral(arena, Node.BoolLiteral, token);
if (eatToken(it, .Keyword_null)) |token| return createLiteral(arena, Node.NullLiteral, token);
- if (eatToken(it, .Keyword_promise)) |token| {
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
+ if (eatToken(it, .Keyword_anyframe)) |token| {
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
.result = null,
};
return &node.base;
@@ -2194,7 +2194,7 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// PrefixTypeOp
/// <- QUESTIONMARK
-/// / KEYWORD_promise MINUSRARROW
+/// / KEYWORD_anyframe MINUSRARROW
/// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
@@ -2209,20 +2209,20 @@ fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
- // TODO: Returning a PromiseType instead of PrefixOp makes casting and setting .rhs or
+ // TODO: Returning a AnyFrameType instead of PrefixOp makes casting and setting .rhs or
// .return_type more difficult for the caller (see parsePrefixOpExpr helper).
- // Consider making the PromiseType a member of PrefixOp and add a
- // PrefixOp.PromiseType variant?
- if (eatToken(it, .Keyword_promise)) |token| {
+ // Consider making the AnyFrameType a member of PrefixOp and add a
+ // PrefixOp.AnyFrameType variant?
+ if (eatToken(it, .Keyword_anyframe)) |token| {
const arrow = eatToken(it, .Arrow) orelse {
putBackToken(it, token);
return null;
};
- const node = try arena.create(Node.PromiseType);
- node.* = Node.PromiseType{
- .base = Node{ .id = .PromiseType },
- .promise_token = token,
- .result = Node.PromiseType.Result{
+ const node = try arena.create(Node.AnyFrameType);
+ node.* = Node.AnyFrameType{
+ .base = Node{ .id = .AnyFrameType },
+ .anyframe_token = token,
+ .result = Node.AnyFrameType.Result{
.arrow_token = arrow,
.return_type = undefined, // set by caller
},
@@ -2903,8 +2903,8 @@ fn parsePrefixOpExpr(
rightmost_op = rhs;
} else break;
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
if (try opParseFn(arena, it, tree)) |rhs| {
prom.result.?.return_type = rhs;
rightmost_op = rhs;
@@ -2922,8 +2922,8 @@ fn parsePrefixOpExpr(
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
},
- .PromiseType => {
- const prom = rightmost_op.cast(Node.PromiseType).?;
+ .AnyFrameType => {
+ const prom = rightmost_op.cast(Node.AnyFrameType).?;
prom.result.?.return_type = try expectNode(arena, it, tree, childParseFn, AstError{
.InvalidToken = AstError.InvalidToken{ .token = it.index },
});
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index f6f3363bf6..28cde6de01 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -2111,12 +2111,12 @@ test "zig fmt: coroutines" {
\\ suspend;
\\ x += 1;
\\ suspend;
- \\ const p: promise->void = async simpleAsyncFn() catch unreachable;
+ \\ const p: anyframe->void = async simpleAsyncFn() catch unreachable;
\\ await p;
\\}
\\
\\test "coroutine suspend, resume, cancel" {
- \\ const p: promise = try async testAsyncSeq();
+ \\ const p: anyframe = try async testAsyncSeq();
\\ resume p;
\\ cancel p;
\\}
diff --git a/std/zig/render.zig b/std/zig/render.zig
index b85c11c6ac..c6bb51267d 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -1205,15 +1205,15 @@ fn renderExpression(
}
},
- ast.Node.Id.PromiseType => {
- const promise_type = @fieldParentPtr(ast.Node.PromiseType, "base", base);
+ ast.Node.Id.AnyFrameType => {
+ const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base);
- if (promise_type.result) |result| {
- try renderToken(tree, stream, promise_type.promise_token, indent, start_col, Space.None); // promise
+ if (anyframe_type.result) |result| {
+ try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe
try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // ->
return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space);
} else {
- return renderToken(tree, stream, promise_type.promise_token, indent, start_col, space); // promise
+ return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe
}
},
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 4539e1e5b2..9de20c39f2 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -15,6 +15,7 @@ pub const Token = struct {
Keyword{ .bytes = "align", .id = Id.Keyword_align },
Keyword{ .bytes = "allowzero", .id = Id.Keyword_allowzero },
Keyword{ .bytes = "and", .id = Id.Keyword_and },
+ Keyword{ .bytes = "anyframe", .id = Id.Keyword_anyframe },
Keyword{ .bytes = "asm", .id = Id.Keyword_asm },
Keyword{ .bytes = "async", .id = Id.Keyword_async },
Keyword{ .bytes = "await", .id = Id.Keyword_await },
@@ -42,7 +43,6 @@ pub const Token = struct {
Keyword{ .bytes = "or", .id = Id.Keyword_or },
Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
- Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
Keyword{ .bytes = "resume", .id = Id.Keyword_resume },
Keyword{ .bytes = "return", .id = Id.Keyword_return },
@@ -174,7 +174,7 @@ pub const Token = struct {
Keyword_or,
Keyword_orelse,
Keyword_packed,
- Keyword_promise,
+ Keyword_anyframe,
Keyword_pub,
Keyword_resume,
Keyword_return,
diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig
index 6a51015124..b86ba27c13 100644
--- a/test/stage1/behavior/type_info.zig
+++ b/test/stage1/behavior/type_info.zig
@@ -177,11 +177,11 @@ fn testUnion() void {
expect(TypeId(typeinfo_info) == TypeId.Union);
expect(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
expect(typeinfo_info.Union.tag_type.? == TypeId);
- expect(typeinfo_info.Union.fields.len == 25);
+ expect(typeinfo_info.Union.fields.len == 26);
expect(typeinfo_info.Union.fields[4].enum_field != null);
expect(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
expect(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
- expect(typeinfo_info.Union.decls.len == 20);
+ expect(typeinfo_info.Union.decls.len == 21);
const TestNoTagUnion = union {
Foo: void,
@@ -280,6 +280,25 @@ fn testVector() void {
expect(vec_info.Vector.child == i32);
}
+test "type info: anyframe and anyframe->T" {
+ testAnyFrame();
+ comptime testAnyFrame();
+}
+
+fn testAnyFrame() void {
+ {
+ const anyframe_info = @typeInfo(anyframe->i32);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child.? == i32);
+ }
+
+ {
+ const anyframe_info = @typeInfo(anyframe);
+ expect(TypeId(anyframe_info) == .AnyFrame);
+ expect(anyframe_info.AnyFrame.child == null);
+ }
+}
+
test "type info: optional field unwrapping" {
const Struct = struct {
cdOffset: u32,
--
cgit v1.2.3
From dbdc4d62d08c94a967b36afdfa57b126775a4eee Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 29 Jul 2019 19:32:49 -0400
Subject: improve support for anyframe and anyframe->T
* add implicit cast from `*@Frame(func)` to `anyframe->T` or `anyframe`.
* add implicit cast from `anyframe->T` to `anyframe`.
* `resume` works on `anyframe->T` and `anyframe` types.
---
src/all_types.hpp | 2 +-
src/analyze.cpp | 14 ++-----
src/codegen.cpp | 25 ++++++++----
src/ir.cpp | 76 +++++++++++++++++++++++++++++++++----
test/stage1/behavior/coroutines.zig | 9 ++++-
5 files changed, 97 insertions(+), 29 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 1096feade0..cd64c149d9 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1726,6 +1726,7 @@ struct CodeGen {
LLVMValueRef err_name_table;
LLVMValueRef safety_crash_err_fn;
LLVMValueRef return_err_fn;
+ LLVMTypeRef async_fn_llvm_type;
// reminder: hash tables must be initialized before use
HashMap import_table;
@@ -1793,7 +1794,6 @@ struct CodeGen {
ZigType *entry_global_error_set;
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
- ZigType *entry_frame_header;
ZigType *entry_any_frame;
} builtin_types;
ZigType *align_amt_type;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e47be8f14c..c117409445 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7348,19 +7348,13 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
if (is_async) {
fn_type->data.fn.gen_param_info = allocate(1);
- ZigType *frame_type = g->builtin_types.entry_frame_header;
- Error err;
- if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
- zig_unreachable();
- }
- ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
- gen_param_types.append(get_llvm_type(g, ptr_type));
- param_di_types.append(get_llvm_di_type(g, ptr_type));
+ ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
+ gen_param_types.append(get_llvm_type(g, frame_type));
+ param_di_types.append(get_llvm_di_type(g, frame_type));
fn_type->data.fn.gen_param_info[0].src_index = 0;
fn_type->data.fn.gen_param_info[0].gen_index = 0;
- fn_type->data.fn.gen_param_info[0].type = ptr_type;
-
+ fn_type->data.fn.gen_param_info[0].type = frame_type;
} else {
fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index c666317c17..0ee902b537 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4902,14 +4902,28 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
return nullptr;
}
+static LLVMTypeRef async_fn_llvm_type(CodeGen *g) {
+ if (g->async_fn_llvm_type != nullptr)
+ return g->async_fn_llvm_type;
+
+ ZigType *anyframe_type = get_any_frame_type(g, nullptr);
+ LLVMTypeRef param_type = get_llvm_type(g, anyframe_type);
+ LLVMTypeRef return_type = LLVMVoidType();
+ LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false);
+ g->async_fn_llvm_type = LLVMPointerType(fn_type, 0);
+
+ return g->async_fn_llvm_type;
+}
+
static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
IrInstructionCoroResume *instruction)
{
LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
ZigType *frame_type = instruction->frame->value.type;
- assert(frame_type->id == ZigTypeIdCoroFrame);
- ZigFn *fn = frame_type->data.frame.fn;
- LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ assert(frame_type->id == ZigTypeIdAnyFrame);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, "");
+ LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, async_fn_llvm_type(g), "");
ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
return nullptr;
}
@@ -6746,11 +6760,6 @@ static void define_builtin_types(CodeGen *g) {
g->primitive_type_table.put(&entry->name, entry);
}
- {
- const char *field_names[] = {"resume_index"};
- ZigType *field_types[] = {g->builtin_types.entry_usize};
- g->builtin_types.entry_frame_header = get_struct_type(g, "(frame header)", field_names, field_types, 1);
- }
}
static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) {
diff --git a/src/ir.cpp b/src/ir.cpp
index e6d987a2ee..98a8f1061e 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -7764,7 +7764,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -10882,6 +10882,33 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
return result;
}
+static IrInstruction *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime frame pointer");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime anyframe->T to anyframe");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+
static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
ZigType *wanted_type, ResultLoc *result_loc)
{
@@ -11978,6 +12005,29 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // *@Frame(func) to anyframe->T or anyframe
+ if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == ZigTypeIdCoroFrame && wanted_type->id == ZigTypeIdAnyFrame)
+ {
+ bool ok = true;
+ if (wanted_type->data.any_frame.result_type != nullptr) {
+ ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn;
+ ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type;
+ if (wanted_type->data.any_frame.result_type != fn_return_type) {
+ ok = false;
+ }
+ }
+ if (ok) {
+ return ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, wanted_type);
+ }
+ }
+
+ // anyframe->T to anyframe
+ if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr &&
+ wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr)
+ {
+ return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type);
+ }
// cast from null literal to maybe type
if (wanted_type->id == ZigTypeIdOptional &&
@@ -24323,17 +24373,27 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
}
static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
- IrInstruction *frame = instruction->frame->child;
- if (type_is_invalid(frame->value.type))
+ IrInstruction *frame_ptr = instruction->frame->child;
+ if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
- if (frame->value.type->id != ZigTypeIdCoroFrame) {
- ir_add_error(ira, instruction->frame,
- buf_sprintf("expected frame, found '%s'", buf_ptr(&frame->value.type->name)));
- return ira->codegen->invalid_instruction;
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.is_const &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame)
+ {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ } else {
+ frame = frame_ptr;
}
- return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame);
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
}
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 7af04d37c9..fddc912e77 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -5,15 +5,20 @@ const expect = std.testing.expect;
var global_x: i32 = 1;
test "simple coroutine suspend and resume" {
- const p = async simpleAsyncFn();
+ const frame = async simpleAsyncFn();
expect(global_x == 2);
- resume p;
+ resume frame;
expect(global_x == 3);
+ const af: anyframe->void = &frame;
+ resume frame;
+ expect(global_x == 4);
}
fn simpleAsyncFn() void {
global_x += 1;
suspend;
global_x += 1;
+ suspend;
+ global_x += 1;
}
var global_y: i32 = 1;
--
cgit v1.2.3
From 38b5812c4895eb0157f99348f51c40bbd17c3b94 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 1 Aug 2019 02:37:22 -0400
Subject: allow 128 bit cmpxchg on x86_64
---
src/ir.cpp | 7 +++--
src/target.cpp | 66 ++++++++++++++++++++++++++++++++++++++++
src/target.hpp | 1 +
test/stage1/behavior/atomics.zig | 20 ++++++++++++
4 files changed, 91 insertions(+), 3 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index be7a8e2e51..f34c840496 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -24735,10 +24735,11 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op
operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
- if (operand_type->data.integral.bit_count > ira->codegen->pointer_size_bytes * 8) {
+ uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch);
+ if (operand_type->data.integral.bit_count > max_atomic_bits) {
ir_add_error(ira, op,
- buf_sprintf("expected integer type pointer size or smaller, found %" PRIu32 "-bit integer type",
- operand_type->data.integral.bit_count));
+ buf_sprintf("expected %" PRIu32 "-bit integer type or smaller, found %" PRIu32 "-bit integer type",
+ max_atomic_bits, operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
if (!is_power_of_2(operand_type->data.integral.bit_count)) {
diff --git a/src/target.cpp b/src/target.cpp
index 6a949270ae..7bb248a35f 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -863,6 +863,71 @@ uint32_t target_arch_pointer_bit_width(ZigLLVM_ArchType arch) {
zig_unreachable();
}
+uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch) {
+ switch (arch) {
+ case ZigLLVM_UnknownArch:
+ zig_unreachable();
+
+ case ZigLLVM_avr:
+ case ZigLLVM_msp430:
+ return 16;
+
+ case ZigLLVM_arc:
+ case ZigLLVM_arm:
+ case ZigLLVM_armeb:
+ case ZigLLVM_hexagon:
+ case ZigLLVM_le32:
+ case ZigLLVM_mips:
+ case ZigLLVM_mipsel:
+ case ZigLLVM_nvptx:
+ case ZigLLVM_ppc:
+ case ZigLLVM_r600:
+ case ZigLLVM_riscv32:
+ case ZigLLVM_sparc:
+ case ZigLLVM_sparcel:
+ case ZigLLVM_tce:
+ case ZigLLVM_tcele:
+ case ZigLLVM_thumb:
+ case ZigLLVM_thumbeb:
+ case ZigLLVM_x86:
+ case ZigLLVM_xcore:
+ case ZigLLVM_amdil:
+ case ZigLLVM_hsail:
+ case ZigLLVM_spir:
+ case ZigLLVM_kalimba:
+ case ZigLLVM_lanai:
+ case ZigLLVM_shave:
+ case ZigLLVM_wasm32:
+ case ZigLLVM_renderscript32:
+ return 32;
+
+ case ZigLLVM_aarch64:
+ case ZigLLVM_aarch64_be:
+ case ZigLLVM_amdgcn:
+ case ZigLLVM_bpfel:
+ case ZigLLVM_bpfeb:
+ case ZigLLVM_le64:
+ case ZigLLVM_mips64:
+ case ZigLLVM_mips64el:
+ case ZigLLVM_nvptx64:
+ case ZigLLVM_ppc64:
+ case ZigLLVM_ppc64le:
+ case ZigLLVM_riscv64:
+ case ZigLLVM_sparcv9:
+ case ZigLLVM_systemz:
+ case ZigLLVM_amdil64:
+ case ZigLLVM_hsail64:
+ case ZigLLVM_spir64:
+ case ZigLLVM_wasm64:
+ case ZigLLVM_renderscript64:
+ return 64;
+
+ case ZigLLVM_x86_64:
+ return 128;
+ }
+ zig_unreachable();
+}
+
uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
switch (target->os) {
case OsFreestanding:
@@ -1693,3 +1758,4 @@ bool target_supports_libunwind(const ZigTarget *target) {
}
return true;
}
+
diff --git a/src/target.hpp b/src/target.hpp
index fcda9955b9..985a4c11b4 100644
--- a/src/target.hpp
+++ b/src/target.hpp
@@ -192,6 +192,7 @@ const char *target_arch_musl_name(ZigLLVM_ArchType arch);
bool target_supports_libunwind(const ZigTarget *target);
uint32_t target_arch_pointer_bit_width(ZigLLVM_ArchType arch);
+uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch);
size_t target_libc_count(void);
void target_libc_enum(size_t index, ZigTarget *out_target);
diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig
index daa463fd45..3d1caaaa15 100644
--- a/test/stage1/behavior/atomics.zig
+++ b/test/stage1/behavior/atomics.zig
@@ -69,3 +69,23 @@ test "cmpxchg with ptr" {
expect(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
expect(x == &data2);
}
+
+test "128-bit cmpxchg" {
+ if (builtin.arch != .x86_64) {
+ return error.SkipZigTest;
+ }
+ var x: u128 align(16) = 1234; // TODO: https://github.com/ziglang/zig/issues/2987
+ if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
+ expect(x1 == 1234);
+ } else {
+ @panic("cmpxchg should have failed");
+ }
+
+ while (@cmpxchgWeak(u128, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
+ expect(x1 == 1234);
+ }
+ expect(x == 5678);
+
+ expect(@cmpxchgStrong(u128, &x, 5678, 42, .SeqCst, .SeqCst) == null);
+ expect(x == 42);
+}
--
cgit v1.2.3
From e7ae4e4645a46a216c5913e2f9120cb02c10008c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 1 Aug 2019 16:08:52 -0400
Subject: reimplement async with function splitting instead of switch
---
BRANCH_TODO | 4 +-
src/all_types.hpp | 20 +-
src/analyze.cpp | 159 ++++++++++++---
src/codegen.cpp | 394 ++++++++++++++++++++----------------
src/ir.cpp | 8 +-
test/stage1/behavior/coroutines.zig | 98 ++++-----
6 files changed, 410 insertions(+), 273 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index e2c4fec436..7c19147aa8 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,5 @@
- * make the anyframe type and anyframe->T type work with resume
+ * fix @frameSize
+ * fix calling an inferred async function
* await
* await of a non async function
* await in single-threaded mode
@@ -10,5 +11,4 @@
* implicit cast of normal function to async function should be allowed when it is inferred to be async
* go over the commented out tests
* revive std.event.Loop
- * reimplement with function splitting rather than switch
* @typeInfo for @Frame(func)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index cd64c149d9..b5b8b06259 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1726,7 +1726,7 @@ struct CodeGen {
LLVMValueRef err_name_table;
LLVMValueRef safety_crash_err_fn;
LLVMValueRef return_err_fn;
- LLVMTypeRef async_fn_llvm_type;
+ LLVMTypeRef anyframe_fn_type;
// reminder: hash tables must be initialized before use
HashMap import_table;
@@ -1795,7 +1795,9 @@ struct CodeGen {
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
ZigType *entry_any_frame;
+ ZigType *entry_async_fn;
} builtin_types;
+
ZigType *align_amt_type;
ZigType *stack_trace_type;
ZigType *ptr_to_stack_trace_type;
@@ -1934,6 +1936,7 @@ struct ZigVar {
ZigType *var_type;
LLVMValueRef value_ref;
IrInstruction *is_comptime;
+ IrInstruction *ptr_instruction;
// which node is the declaration of the variable
AstNode *decl_node;
ZigLLVMDILocalVariable *di_loc_var;
@@ -2159,8 +2162,8 @@ struct IrBasicBlock {
size_t ref_count;
// index into the basic block list
size_t index;
- // for coroutines, the resume_index which corresponds to this block
- size_t resume_index;
+ // for async functions, the split function which corresponds to this block
+ LLVMValueRef split_llvm_fn;
LLVMBasicBlockRef llvm_block;
LLVMBasicBlockRef llvm_exit_block;
// The instruction that referenced this basic block and caused us to
@@ -3686,13 +3689,9 @@ static const size_t maybe_null_index = 1;
static const size_t err_union_err_index = 0;
static const size_t err_union_payload_index = 1;
-static const size_t coro_resume_index_index = 0;
-static const size_t coro_fn_ptr_index = 1;
-static const size_t coro_awaiter_index = 2;
-static const size_t coro_arg_start = 3;
-
-// one for the Entry block, resume blocks are indexed after that.
-static const size_t coro_extra_resume_block_count = 1;
+static const size_t coro_fn_ptr_index = 0;
+static const size_t coro_awaiter_index = 1;
+static const size_t coro_arg_start = 2;
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
@@ -3719,6 +3718,7 @@ enum FnWalkId {
struct FnWalkAttrs {
ZigFn *fn;
+ LLVMValueRef llvm_fn;
unsigned gen_i;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index c117409445..5e22358423 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5135,6 +5135,19 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) {
return type_resolve(g, type_entry, ResolveStatusSizeKnown);
}
+static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
+ if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync)
+ return orig_fn_type;
+
+ ZigType *fn_type = allocate_nonzero(1);
+ *fn_type = *orig_fn_type;
+ fn_type->data.fn.fn_type_id.cc = CallingConventionAsync;
+ fn_type->llvm_type = nullptr;
+ fn_type->llvm_di_type = nullptr;
+
+ return fn_type;
+}
+
static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
if (frame_type->data.frame.locals_struct != nullptr)
return ErrorNone;
@@ -5156,6 +5169,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
buf_ptr(&frame_type->name)));
return ErrorSemanticAnalyzeFail;
}
+ ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
for (size_t i = 0; i < fn->call_list.length; i += 1) {
IrInstructionCallGen *call = fn->call_list.at(i);
@@ -5173,7 +5187,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
IrBasicBlock *new_resume_block = allocate(1);
new_resume_block->name_hint = "CallResume";
- new_resume_block->resume_index = fn->resume_blocks.length + coro_extra_resume_block_count;
+ new_resume_block->split_llvm_fn = reinterpret_cast(0x1);
fn->resume_blocks.append(new_resume_block);
call->resume_block = new_resume_block;
fn->analyzed_executable.basic_block_list.append(new_resume_block);
@@ -5194,16 +5208,13 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
ZigList field_types = {};
ZigList field_names = {};
- field_names.append("resume_index");
- field_types.append(g->builtin_types.entry_usize);
-
field_names.append("fn_ptr");
- field_types.append(fn->type_entry);
+ field_types.append(fn_type);
field_names.append("awaiter");
field_types.append(g->builtin_types.entry_usize);
- FnTypeId *fn_type_id = &fn->type_entry->data.fn.fn_type_id;
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
field_names.append("result_ptr");
field_types.append(ptr_return_type);
@@ -6686,7 +6697,9 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa
type->data.structure.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status) {
+static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status,
+ ZigType *coro_frame_type)
+{
assert(struct_type->id == ZigTypeIdStruct);
assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid);
assert(struct_type->data.structure.resolve_status >= ResolveStatusSizeKnown);
@@ -6774,7 +6787,16 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
packed_bits_offset = next_packed_bits_offset;
} else {
- element_types[gen_field_index] = get_llvm_type(g, field_type);
+ LLVMTypeRef llvm_type;
+ if (i == 0 && coro_frame_type != nullptr) {
+ assert(coro_frame_type->id == ZigTypeIdCoroFrame);
+ assert(field_type->id == ZigTypeIdFn);
+ resolve_llvm_types_fn(g, coro_frame_type->data.frame.fn);
+ llvm_type = LLVMPointerType(coro_frame_type->data.frame.fn->raw_type_ref, 0);
+ } else {
+ llvm_type = get_llvm_type(g, field_type);
+ }
+ element_types[gen_field_index] = llvm_type;
gen_field_index += 1;
}
@@ -7456,7 +7478,7 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
}
static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
- resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status);
+ resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, frame_type);
frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
}
@@ -7464,35 +7486,112 @@ static void resolve_llvm_types_coro_frame(CodeGen *g, ZigType *frame_type, Resol
static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
if (any_frame_type->llvm_di_type != nullptr) return;
- ZigType *result_type = any_frame_type->data.any_frame.result_type;
Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+ LLVMTypeRef frame_header_type = LLVMStructCreateNamed(LLVMGetGlobalContext(), buf_ptr(name));
+ any_frame_type->llvm_type = LLVMPointerType(frame_header_type, 0);
+
+ unsigned dwarf_kind = ZigLLVMTag_DW_structure_type();
+ ZigLLVMDIFile *di_file = nullptr;
+ ZigLLVMDIScope *di_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+ unsigned line = 0;
+ ZigLLVMDIType *frame_header_di_type = ZigLLVMCreateReplaceableCompositeType(g->dbuilder,
+ dwarf_kind, buf_ptr(name), di_scope, di_file, line);
+ any_frame_type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, frame_header_di_type,
+ 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name));
+
+ LLVMTypeRef llvm_void = LLVMVoidType();
+ LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, &any_frame_type->llvm_type, 1, false);
+ LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
- ZigType *frame_header_type;
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
if (result_type == nullptr || !type_has_bits(result_type)) {
- const char *field_names[] = {"resume_index", "fn_ptr", "awaiter"};
- ZigType *field_types[] = {
- g->builtin_types.entry_usize,
- g->builtin_types.entry_usize,
- g->builtin_types.entry_usize,
+ LLVMTypeRef ptr_result_type = LLVMPointerType(fn_type, 0);
+ if (result_type == nullptr) {
+ g->anyframe_fn_type = ptr_result_type;
+ }
+ LLVMTypeRef field_types[] = {
+ ptr_result_type, // fn_ptr
+ usize_type_ref, // awaiter
+ };
+ LLVMStructSetBody(frame_header_type, field_types, 2, false);
+
+ ZigLLVMDIType *di_element_types[] = {
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
};
- frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 3);
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types, 2, 0, nullptr, "");
+
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
} else {
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false);
+ LLVMTypeRef field_types[] = {
+ LLVMPointerType(fn_type, 0), // fn_ptr
+ usize_type_ref, // awaiter
+ get_llvm_type(g, ptr_result_type), // result_ptr
+ get_llvm_type(g, result_type), // result
+ };
+ LLVMStructSetBody(frame_header_type, field_types, 4, false);
- const char *field_names[] = {"resume_index", "fn_ptr", "awaiter", "result_ptr", "result"};
- ZigType *field_types[] = {
- g->builtin_types.entry_usize,
- g->builtin_types.entry_usize,
- g->builtin_types.entry_usize,
- ptr_result_type,
- result_type,
+ ZigLLVMDIType *di_element_types[] = {
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[3]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)),
};
- frame_header_type = get_struct_type(g, buf_ptr(name), field_names, field_types, 5);
- }
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types, 2, 0, nullptr, "");
- ZigType *ptr_type = get_pointer_to_type(g, frame_header_type, false);
- any_frame_type->llvm_type = get_llvm_type(g, ptr_type);
- any_frame_type->llvm_di_type = get_llvm_di_type(g, ptr_type);
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
+ }
}
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
@@ -7520,7 +7619,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
if (type->data.structure.is_slice)
return resolve_llvm_types_slice(g, type, wanted_resolve_status);
else
- return resolve_llvm_types_struct(g, type, wanted_resolve_status);
+ return resolve_llvm_types_struct(g, type, wanted_resolve_status, nullptr);
case ZigTypeIdEnum:
return resolve_llvm_types_enum(g, type);
case ZigTypeIdUnion:
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 0ee902b537..d955736083 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -343,27 +343,24 @@ static bool cc_want_sret_attr(CallingConvention cc) {
zig_unreachable();
}
-static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
- if (fn_table_entry->llvm_value)
- return fn_table_entry->llvm_value;
-
- Buf *unmangled_name = &fn_table_entry->symbol_name;
+static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ Buf *unmangled_name = &fn->symbol_name;
Buf *symbol_name;
GlobalLinkageId linkage;
- if (fn_table_entry->body_node == nullptr) {
+ if (fn->body_node == nullptr) {
symbol_name = unmangled_name;
linkage = GlobalLinkageIdStrong;
- } else if (fn_table_entry->export_list.length == 0) {
+ } else if (fn->export_list.length == 0) {
symbol_name = get_mangled_name(g, unmangled_name, false);
linkage = GlobalLinkageIdInternal;
} else {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[0];
+ GlobalExport *fn_export = &fn->export_list.items[0];
symbol_name = &fn_export->name;
linkage = fn_export->linkage;
}
bool external_linkage = linkage != GlobalLinkageIdInternal;
- CallingConvention cc = fn_table_entry->type_entry->data.fn.fn_type_id.cc;
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
if (cc == CallingConventionStdcall && external_linkage &&
g->zig_target->arch == ZigLLVM_x86)
{
@@ -371,28 +368,28 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name));
}
- bool is_async = fn_is_async(fn_table_entry);
+ bool is_async = fn_is_async(fn);
- ZigType *fn_type = fn_table_entry->type_entry;
+ ZigType *fn_type = fn->type_entry;
// Make the raw_type_ref populated
- resolve_llvm_types_fn(g, fn_table_entry);
- LLVMTypeRef fn_llvm_type = fn_table_entry->raw_type_ref;
- if (fn_table_entry->body_node == nullptr) {
+ resolve_llvm_types_fn(g, fn);
+ LLVMTypeRef fn_llvm_type = fn->raw_type_ref;
+ LLVMValueRef llvm_fn = nullptr;
+ if (fn->body_node == nullptr) {
LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name));
if (existing_llvm_fn) {
- fn_table_entry->llvm_value = LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ return LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
} else {
auto entry = g->exported_symbol_names.maybe_get(symbol_name);
if (entry == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
if (target_is_wasm(g->zig_target)) {
- assert(fn_table_entry->proto_node->type == NodeTypeFnProto);
- AstNodeFnProto *fn_proto = &fn_table_entry->proto_node->data.fn_proto;
+ assert(fn->proto_node->type == NodeTypeFnProto);
+ AstNodeFnProto *fn_proto = &fn->proto_node->data.fn_proto;
if (fn_proto-> is_extern && fn_proto->lib_name != nullptr ) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "wasm-import-module", buf_ptr(fn_proto->lib_name));
+ addLLVMFnAttrStr(llvm_fn, "wasm-import-module", buf_ptr(fn_proto->lib_name));
}
}
} else {
@@ -402,101 +399,98 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
resolve_llvm_types_fn(g, tld_fn->fn_entry);
tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name),
tld_fn->fn_entry->raw_type_ref);
- fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value,
- LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ llvm_fn = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, LLVMPointerType(fn_llvm_type, 0));
+ return llvm_fn;
}
}
} else {
- if (fn_table_entry->llvm_value == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ if (llvm_fn == nullptr) {
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
}
- for (size_t i = 1; i < fn_table_entry->export_list.length; i += 1) {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[i];
- LLVMAddAlias(g->module, LLVMTypeOf(fn_table_entry->llvm_value),
- fn_table_entry->llvm_value, buf_ptr(&fn_export->name));
+ for (size_t i = 1; i < fn->export_list.length; i += 1) {
+ GlobalExport *fn_export = &fn->export_list.items[i];
+ LLVMAddAlias(g->module, LLVMTypeOf(llvm_fn), llvm_fn, buf_ptr(&fn_export->name));
}
}
- fn_table_entry->llvm_name = strdup(LLVMGetValueName(fn_table_entry->llvm_value));
- switch (fn_table_entry->fn_inline) {
+ switch (fn->fn_inline) {
case FnInlineAlways:
- addLLVMFnAttr(fn_table_entry->llvm_value, "alwaysinline");
- g->inline_fns.append(fn_table_entry);
+ addLLVMFnAttr(llvm_fn, "alwaysinline");
+ g->inline_fns.append(fn);
break;
case FnInlineNever:
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ addLLVMFnAttr(llvm_fn, "noinline");
break;
case FnInlineAuto:
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttr(llvm_fn, "noinline");
}
break;
}
if (cc == CallingConventionNaked) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "naked");
+ addLLVMFnAttr(llvm_fn, "naked");
} else {
- LLVMSetFunctionCallConv(fn_table_entry->llvm_value, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
+ LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
}
if (cc == CallingConventionAsync) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "optnone");
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ addLLVMFnAttr(llvm_fn, "optnone");
+ addLLVMFnAttr(llvm_fn, "noinline");
}
- bool want_cold = fn_table_entry->is_cold || cc == CallingConventionCold;
+ bool want_cold = fn->is_cold || cc == CallingConventionCold;
if (want_cold) {
- ZigLLVMAddFunctionAttrCold(fn_table_entry->llvm_value);
+ ZigLLVMAddFunctionAttrCold(llvm_fn);
}
- LLVMSetLinkage(fn_table_entry->llvm_value, to_llvm_linkage(linkage));
+ LLVMSetLinkage(llvm_fn, to_llvm_linkage(linkage));
if (linkage == GlobalLinkageIdInternal) {
- LLVMSetUnnamedAddr(fn_table_entry->llvm_value, true);
+ LLVMSetUnnamedAddr(llvm_fn, true);
}
ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
if (return_type->id == ZigTypeIdUnreachable) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noreturn");
+ addLLVMFnAttr(llvm_fn, "noreturn");
}
- if (fn_table_entry->body_node != nullptr) {
- maybe_export_dll(g, fn_table_entry->llvm_value, linkage);
+ if (fn->body_node != nullptr) {
+ maybe_export_dll(g, llvm_fn, linkage);
bool want_fn_safety = g->build_mode != BuildModeFastRelease &&
g->build_mode != BuildModeSmallRelease &&
- !fn_table_entry->def_scope->safety_off;
+ !fn->def_scope->safety_off;
if (want_fn_safety) {
if (g->libc_link_lib != nullptr) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong");
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "stack-protector-buffer-size", "4");
+ addLLVMFnAttr(llvm_fn, "sspstrong");
+ addLLVMFnAttrStr(llvm_fn, "stack-protector-buffer-size", "4");
}
}
- if (g->have_stack_probing && !fn_table_entry->def_scope->safety_off) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "probe-stack", "__zig_probe_stack");
+ if (g->have_stack_probing && !fn->def_scope->safety_off) {
+ addLLVMFnAttrStr(llvm_fn, "probe-stack", "__zig_probe_stack");
}
} else {
- maybe_import_dll(g, fn_table_entry->llvm_value, linkage);
+ maybe_import_dll(g, llvm_fn, linkage);
}
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttrInt(fn_table_entry->llvm_value, "alignstack", fn_table_entry->alignstack_value);
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttrInt(llvm_fn, "alignstack", fn->alignstack_value);
}
- addLLVMFnAttr(fn_table_entry->llvm_value, "nounwind");
- add_uwtable_attr(g, fn_table_entry->llvm_value);
- addLLVMFnAttr(fn_table_entry->llvm_value, "nobuiltin");
- if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
+ addLLVMFnAttr(llvm_fn, "nounwind");
+ add_uwtable_attr(g, llvm_fn);
+ addLLVMFnAttr(llvm_fn, "nobuiltin");
+ if (g->build_mode == BuildModeDebug && fn->fn_inline != FnInlineAlways) {
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim-non-leaf", nullptr);
}
- if (fn_table_entry->section_name) {
- LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
+ if (fn->section_name) {
+ LLVMSetSection(llvm_fn, buf_ptr(fn->section_name));
}
- if (fn_table_entry->align_bytes > 0) {
- LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
+ if (fn->align_bytes > 0) {
+ LLVMSetAlignment(llvm_fn, (unsigned)fn->align_bytes);
} else {
// We'd like to set the best alignment for the function here, but on Darwin LLVM gives
// "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
@@ -508,36 +502,46 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
if (!type_has_bits(return_type)) {
// nothing to do
} else if (type_is_nonnull_ptr(return_type)) {
- addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ addLLVMAttr(llvm_fn, 0, "nonnull");
} else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
// Sret pointers must not be address 0
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ addLLVMArgAttr(llvm_fn, 0, "sret");
if (cc_want_sret_attr(cc)) {
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "noalias");
+ addLLVMArgAttr(llvm_fn, 0, "noalias");
}
init_gen_i = 1;
}
if (is_async) {
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
} else {
// set parameter attributes
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdAttrs;
- fn_walk.data.attrs.fn = fn_table_entry;
+ fn_walk.data.attrs.fn = fn;
+ fn_walk.data.attrs.llvm_fn = llvm_fn;
fn_walk.data.attrs.gen_i = init_gen_i;
walk_function_params(g, fn_type, &fn_walk);
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
+ uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn);
if (err_ret_trace_arg_index != UINT32_MAX) {
// Error return trace memory is in the stack, which is impossible to be at address 0
// on any architecture.
- addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
+ addLLVMArgAttr(llvm_fn, (unsigned)err_ret_trace_arg_index, "nonnull");
}
}
- return fn_table_entry->llvm_value;
+ return llvm_fn;
+}
+
+static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ if (fn->llvm_value)
+ return fn->llvm_value;
+
+ fn->llvm_value = make_fn_llvm_value(g, fn);
+ fn->llvm_name = strdup(LLVMGetValueName(fn->llvm_value));
+ return fn->llvm_value;
}
static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
@@ -1665,7 +1669,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
param_info = &fn_type->data.fn.fn_type_id.param_info[src_i];
ty = param_info->type;
source_node = fn_walk->data.attrs.fn->proto_node;
- llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ llvm_fn = fn_walk->data.attrs.llvm_fn;
break;
case FnWalkIdCall: {
if (src_i >= fn_walk->data.call.inst->arg_count)
@@ -1916,7 +1920,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
switch (fn_walk->id) {
case FnWalkIdAttrs: {
- LLVMValueRef llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ LLVMValueRef llvm_fn = fn_walk->data.attrs.llvm_fn;
bool is_byval = gen_info->is_byval;
FnTypeParamInfo *param_info = &fn_type->data.fn.fn_type_id.param_info[param_i];
@@ -1989,10 +1993,9 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
if (fn_is_async(g->cur_fn)) {
if (ir_want_runtime_safety(g, &return_instruction->base)) {
LLVMValueRef locals_ptr = g->cur_ret_ptr;
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, "");
- LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
- g->cur_fn->resume_blocks.length + 2, false);
- LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr);
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, "");
+ LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn;
+ LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr);
}
LLVMBuildRetVoid(g->builder);
@@ -2954,14 +2957,17 @@ static LLVMValueRef ir_render_bool_not(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildICmp(g->builder, LLVMIntEQ, value, zero, "");
}
-static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
- ZigVar *var = instruction->var;
-
+static void render_decl_var(CodeGen *g, ZigVar *var) {
if (!type_has_bits(var->var_type))
- return nullptr;
+ return;
- var->value_ref = ir_llvm_value(g, instruction->var_ptr);
+ var->value_ref = ir_llvm_value(g, var->ptr_instruction);
gen_var_debug_decl(g, var);
+}
+
+static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
+ instruction->var->ptr_instruction = instruction->var_ptr;
+ render_decl_var(g, instruction->var);
return nullptr;
}
@@ -3369,12 +3375,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
assert(instruction->fn_entry != nullptr);
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index_index, "");
- LLVMBuildStore(g->builder, zero, resume_index_ptr);
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, "");
- LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val,
- LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), "");
- LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr);
if (prefix_arg_err_ret_stack) {
zig_panic("TODO");
@@ -3431,10 +3431,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
return nullptr;
} else if (callee_is_async) {
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index_index, "");
- LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
- instruction->resume_block->resume_index, false);
- LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, "");
+ LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn;
+ LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr);
LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
ZigLLVMSetTailCall(call_inst);
@@ -4888,10 +4887,9 @@ static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable
IrInstructionSuspendBegin *instruction)
{
LLVMValueRef locals_ptr = g->cur_ret_ptr;
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_resume_index_index, "");
- LLVMValueRef new_resume_index = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
- instruction->resume_block->resume_index, false);
- LLVMBuildStore(g->builder, new_resume_index, resume_index_ptr);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, "");
+ LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn;
+ LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr);
return nullptr;
}
@@ -4902,17 +4900,17 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
return nullptr;
}
-static LLVMTypeRef async_fn_llvm_type(CodeGen *g) {
- if (g->async_fn_llvm_type != nullptr)
- return g->async_fn_llvm_type;
+static LLVMTypeRef anyframe_fn_type(CodeGen *g) {
+ if (g->anyframe_fn_type != nullptr)
+ return g->anyframe_fn_type;
ZigType *anyframe_type = get_any_frame_type(g, nullptr);
LLVMTypeRef param_type = get_llvm_type(g, anyframe_type);
LLVMTypeRef return_type = LLVMVoidType();
LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false);
- g->async_fn_llvm_type = LLVMPointerType(fn_type, 0);
+ g->anyframe_fn_type = LLVMPointerType(fn_type, 0);
- return g->async_fn_llvm_type;
+ return g->anyframe_fn_type;
}
static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
@@ -4923,7 +4921,7 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
assert(frame_type->id == ZigTypeIdAnyFrame);
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, "");
LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
- LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, async_fn_llvm_type(g), "");
+ LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), "");
ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
return nullptr;
}
@@ -5022,7 +5020,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCallSrc:
case IrInstructionIdAllocaSrc:
case IrInstructionIdEndExpr:
- case IrInstructionIdAllocaGen:
case IrInstructionIdImplicitCast:
case IrInstructionIdResolveResult:
case IrInstructionIdResetResult:
@@ -5035,6 +5032,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdUnionInitNamedField:
case IrInstructionIdFrameType:
case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdAllocaGen:
zig_unreachable();
case IrInstructionIdDeclVarGen:
@@ -5195,6 +5193,92 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
zig_unreachable();
}
+static void render_async_spills(CodeGen *g) {
+ ZigType *fn_type = g->cur_fn->type_entry;
+ ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
+ size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0);
+ for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
+ ZigVar *var = g->cur_fn->variable_list.at(var_i);
+
+ if (!type_has_bits(var->var_type)) {
+ continue;
+ }
+ if (ir_get_var_is_comptime(var))
+ continue;
+ switch (type_requires_comptime(g, var->var_type)) {
+ case ReqCompTimeInvalid:
+ zig_unreachable();
+ case ReqCompTimeYes:
+ continue;
+ case ReqCompTimeNo:
+ break;
+ }
+ if (var->src_arg_index == SIZE_MAX) {
+ continue;
+ }
+
+ var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
+ buf_ptr(&var->name));
+ async_var_index += 1;
+ if (var->decl_node) {
+ var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ buf_ptr(&var->name), import->data.structure.root_struct->di_file,
+ (unsigned)(var->decl_node->line + 1),
+ get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0);
+ gen_var_debug_decl(g, var);
+ }
+ }
+ for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i);
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
+ continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
+ }
+ instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
+ instruction->name_hint);
+ async_var_index += 1;
+ }
+}
+
+static void render_async_var_decls(CodeGen *g, Scope *scope) {
+ render_async_spills(g);
+ for (;;) {
+ switch (scope->id) {
+ case ScopeIdCImport:
+ zig_unreachable();
+ case ScopeIdFnDef:
+ return;
+ case ScopeIdVarDecl: {
+ ZigVar *var = reinterpret_cast(scope)->var;
+ if (var->ptr_instruction != nullptr) {
+ render_decl_var(g, var);
+ }
+ // fallthrough
+ }
+ case ScopeIdDecls:
+ case ScopeIdBlock:
+ case ScopeIdDefer:
+ case ScopeIdDeferExpr:
+ case ScopeIdLoop:
+ case ScopeIdSuspend:
+ case ScopeIdCompTime:
+ case ScopeIdRuntime:
+ scope = scope->parent;
+ continue;
+ }
+ }
+}
+
static void ir_render(CodeGen *g, ZigFn *fn_entry) {
assert(fn_entry);
@@ -5204,6 +5288,11 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
assert(current_block->llvm_block);
LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block);
+ if (current_block->split_llvm_fn != nullptr) {
+ g->cur_fn_val = current_block->split_llvm_fn;
+ g->cur_ret_ptr = LLVMGetParam(g->cur_fn_val, 0);
+ render_async_var_decls(g, current_block->instruction_list.at(0)->scope);
+ }
for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) {
IrInstruction *instruction = current_block->instruction_list.at(instr_i);
if (instruction->ref_count == 0 && !ir_has_side_effects(instruction))
@@ -6064,19 +6153,17 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
LLVMValueRef fn_val = fn_llvm_value(g, fn);
- LLVMBasicBlockRef first_bb = nullptr;
- if (fn_is_async(fn)) {
- first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
- fn->preamble_llvm_block = first_bb;
- }
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *bb = executable->basic_block_list.at(block_i);
+ if (bb->split_llvm_fn != nullptr) {
+ assert(bb->split_llvm_fn == reinterpret_cast(0x1));
+ fn_val = make_fn_llvm_value(g, fn);
+ bb->split_llvm_fn = fn_val;
+ }
bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint);
}
- if (first_bb == nullptr) {
- first_bb = executable->basic_block_list.at(0)->llvm_block;
- }
- LLVMPositionBuilderAtEnd(g->builder, first_bb);
+ IrBasicBlock *entry_bb = executable->basic_block_list.at(0);
+ LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block);
}
static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
@@ -6254,7 +6341,6 @@ static void do_code_gen(CodeGen *g) {
clear_debug_source_node(g);
bool is_async = fn_is_async(fn_table_entry);
- size_t async_var_index = coro_arg_start + (type_has_bits(fn_type_id->return_type) ? 2 : 0);
if (want_sret || is_async) {
g->cur_ret_ptr = LLVMGetParam(fn, 0);
@@ -6287,7 +6373,9 @@ static void do_code_gen(CodeGen *g) {
g->cur_err_ret_trace_val_stack = nullptr;
}
- if (!is_async) {
+ if (is_async) {
+ render_async_spills(g);
+ } else {
// allocate temporary stack data
for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
@@ -6345,18 +6433,7 @@ static void do_code_gen(CodeGen *g) {
} else if (is_c_abi) {
fn_walk_var.data.vars.var = var;
iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index);
- } else if (is_async) {
- var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
- buf_ptr(&var->name));
- async_var_index += 1;
- if (var->decl_node) {
- var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
- buf_ptr(&var->name), import->data.structure.root_struct->di_file,
- (unsigned)(var->decl_node->line + 1),
- get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0);
- gen_var_debug_decl(g, var);
- }
- } else {
+ } else if (!is_async) {
ZigType *gen_type;
FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
assert(gen_info->gen_index != SIZE_MAX);
@@ -6382,29 +6459,6 @@ static void do_code_gen(CodeGen *g) {
}
}
- if (is_async) {
- for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
- IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
- ZigType *ptr_type = instruction->base.value.type;
- assert(ptr_type->id == ZigTypeIdPointer);
- ZigType *child_type = ptr_type->data.pointer.child_type;
- if (!type_has_bits(child_type))
- continue;
- if (instruction->base.ref_count == 0)
- continue;
- if (instruction->base.value.special != ConstValSpecialRuntime) {
- if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
- ConstValSpecialRuntime)
- {
- continue;
- }
- }
- instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
- instruction->name_hint);
- async_var_index += 1;
- }
- }
-
// finishing error return trace setup. we have to do this after all the allocas.
if (have_err_ret_trace_stack) {
ZigType *usize = g->builtin_types.entry_usize;
@@ -6435,31 +6489,16 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
- if (!g->strip_debug_symbols) {
- AstNode *source_node = fn_table_entry->proto_node;
- ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
- (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
- }
- IrExecutable *executable = &fn_table_entry->analyzed_executable;
- LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
- LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
- gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
-
- LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block);
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
- coro_resume_index_index, "");
- LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
- // +1 - index 0 is reserved for the entry block
- LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block,
- fn_table_entry->resume_blocks.length + 1);
-
- LLVMValueRef zero = LLVMConstNull(usize_type_ref);
- LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block);
-
- for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
- IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i);
- LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false);
- LLVMAddCase(switch_instr, case_value, resume_block->llvm_block);
+ if (ir_want_runtime_safety_scope(g, fn_table_entry->child_scope)) {
+ IrBasicBlock *bad_resume_block = allocate(1);
+ bad_resume_block->name_hint = "BadResume";
+ bad_resume_block->split_llvm_fn = make_fn_llvm_value(g, fn_table_entry);
+
+ LLVMBasicBlockRef llvm_block = LLVMAppendBasicBlock(bad_resume_block->split_llvm_fn, "BadResume");
+ LLVMPositionBuilderAtEnd(g->builder, llvm_block);
+ gen_safety_crash(g, PanicMsgIdBadResume);
+
+ fn_table_entry->resume_blocks.append(bad_resume_block);
}
} else {
// create debug variable declarations for parameters
@@ -6472,7 +6511,6 @@ static void do_code_gen(CodeGen *g) {
walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
}
-
ir_render(g, fn_table_entry);
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 98a8f1061e..3d376270f6 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3227,7 +3227,7 @@ static IrInstruction *ir_build_alloca_src(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstructionAllocaGen *ir_create_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionAllocaGen *ir_build_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
uint32_t align, const char *name_hint)
{
IrInstructionAllocaGen *instruction = ir_create_instruction(&ira->new_irb,
@@ -14351,7 +14351,7 @@ static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_in
ConstExprValue *pointee = create_const_vals(1);
pointee->special = ConstValSpecialUndef;
- IrInstructionAllocaGen *result = ir_create_alloca_gen(ira, source_inst, align, name_hint);
+ IrInstructionAllocaGen *result = ir_build_alloca_gen(ira, source_inst, align, name_hint);
result->base.value.special = ConstValSpecialStatic;
result->base.value.data.x_ptr.special = ConstPtrSpecialRef;
result->base.value.data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer;
@@ -14448,7 +14448,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
return nullptr;
}
// need to return a result location and don't have one. use a stack allocation
- IrInstructionAllocaGen *alloca_gen = ir_create_alloca_gen(ira, suspend_source_instr, 0, "");
+ IrInstructionAllocaGen *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
@@ -24357,7 +24357,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
- new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count;
+ new_bb->split_llvm_fn = reinterpret_cast(0x1);
fn_entry->resume_blocks.append(new_bb);
if (fn_entry->inferred_async_node == nullptr) {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index fddc912e77..7a8a4a07df 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -82,55 +82,55 @@ test "local variable in async function" {
S.doTheTest();
}
-test "calling an inferred async function" {
- const S = struct {
- var x: i32 = 1;
- var other_frame: *@Frame(other) = undefined;
-
- fn doTheTest() void {
- const p = async first();
- expect(x == 1);
- resume other_frame.*;
- expect(x == 2);
- }
-
- fn first() void {
- other();
- }
- fn other() void {
- other_frame = @frame();
- suspend;
- x += 1;
- }
- };
- S.doTheTest();
-}
-
-test "@frameSize" {
- const S = struct {
- fn doTheTest() void {
- {
- var ptr = @ptrCast(async fn(i32) void, other);
- const size = @frameSize(ptr);
- expect(size == @sizeOf(@Frame(other)));
- }
- {
- var ptr = @ptrCast(async fn() void, first);
- const size = @frameSize(ptr);
- expect(size == @sizeOf(@Frame(first)));
- }
- }
-
- fn first() void {
- other(1);
- }
- fn other(param: i32) void {
- var local: i32 = undefined;
- suspend;
- }
- };
- S.doTheTest();
-}
+//test "calling an inferred async function" {
+// const S = struct {
+// var x: i32 = 1;
+// var other_frame: *@Frame(other) = undefined;
+//
+// fn doTheTest() void {
+// const p = async first();
+// expect(x == 1);
+// resume other_frame.*;
+// expect(x == 2);
+// }
+//
+// fn first() void {
+// other();
+// }
+// fn other() void {
+// other_frame = @frame();
+// suspend;
+// x += 1;
+// }
+// };
+// S.doTheTest();
+//}
+//
+//test "@frameSize" {
+// const S = struct {
+// fn doTheTest() void {
+// {
+// var ptr = @ptrCast(async fn(i32) void, other);
+// const size = @frameSize(ptr);
+// expect(size == @sizeOf(@Frame(other)));
+// }
+// {
+// var ptr = @ptrCast(async fn() void, first);
+// const size = @frameSize(ptr);
+// expect(size == @sizeOf(@Frame(first)));
+// }
+// }
+//
+// fn first() void {
+// other(1);
+// }
+// fn other(param: i32) void {
+// var local: i32 = undefined;
+// suspend;
+// }
+// };
+// S.doTheTest();
+//}
//test "coroutine suspend, resume" {
// seq('a');
--
cgit v1.2.3
From 0f879d02a4c4b1de0e28c2863c1e5f3760eb5b19 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 1 Aug 2019 19:14:48 -0400
Subject: more passing coroutine tests
---
BRANCH_TODO | 3 +
src/ir.cpp | 1 -
test/stage1/behavior/coroutines.zig | 107 ++++++++++++++++++------------------
3 files changed, 56 insertions(+), 55 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 4e128b78a1..f5db81a080 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -6,7 +6,10 @@
* cancel
* defer and errdefer
* safety for resuming when it is awaiting
+ * safety for double await
* implicit cast of normal function to async function should be allowed when it is inferred to be async
* go over the commented out tests
* revive std.event.Loop
* @typeInfo for @Frame(func)
+ * peer type resolution of *@Frame(func) and anyframe
+ * peer type resolution of *@Frame(func) and anyframe->T when the return type matches
diff --git a/src/ir.cpp b/src/ir.cpp
index 3d376270f6..3a6853b034 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -24380,7 +24380,6 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr
IrInstruction *frame;
if (frame_ptr->value.type->id == ZigTypeIdPointer &&
frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
- frame_ptr->value.type->data.pointer.is_const &&
frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame)
{
frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index fddc912e77..01237ed1c9 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -132,56 +132,55 @@ test "@frameSize" {
S.doTheTest();
}
-//test "coroutine suspend, resume" {
-// seq('a');
-// const p = try async testAsyncSeq();
-// seq('c');
-// resume p;
-// seq('f');
-// cancel p;
-// seq('g');
-//
-// expect(std.mem.eql(u8, points, "abcdefg"));
-//}
-//async fn testAsyncSeq() void {
-// defer seq('e');
-//
-// seq('b');
-// suspend;
-// seq('d');
-//}
-//var points = [_]u8{0} ** "abcdefg".len;
-//var index: usize = 0;
-//
-//fn seq(c: u8) void {
-// points[index] = c;
-// index += 1;
-//}
-//
-//test "coroutine suspend with block" {
-// const p = try async testSuspendBlock();
-// std.testing.expect(!result);
-// resume a_promise;
-// std.testing.expect(result);
-// cancel p;
-//}
-//
-//var a_promise: promise = undefined;
-//var result = false;
-//async fn testSuspendBlock() void {
-// suspend {
-// comptime expect(@typeOf(@handle()) == promise->void);
-// a_promise = @handle();
-// }
-//
-// //Test to make sure that @handle() works as advertised (issue #1296)
-// //var our_handle: promise = @handle();
-// expect(a_promise == @handle());
-//
-// result = true;
-//}
-//
-//var await_a_promise: promise = undefined;
+test "coroutine suspend, resume" {
+ seq('a');
+ const p = async testAsyncSeq();
+ seq('c');
+ resume p;
+ seq('f');
+ // `cancel` is now a suspend point so it cannot be done here
+ seq('g');
+
+ expect(std.mem.eql(u8, points, "abcdefg"));
+}
+async fn testAsyncSeq() void {
+ defer seq('e');
+
+ seq('b');
+ suspend;
+ seq('d');
+}
+var points = [_]u8{0} ** "abcdefg".len;
+var index: usize = 0;
+
+fn seq(c: u8) void {
+ points[index] = c;
+ index += 1;
+}
+
+test "coroutine suspend with block" {
+ const p = async testSuspendBlock();
+ expect(!result);
+ resume a_promise;
+ expect(result);
+}
+
+var a_promise: anyframe = undefined;
+var result = false;
+async fn testSuspendBlock() void {
+ suspend {
+ comptime expect(@typeOf(@frame()) == *@Frame(testSuspendBlock));
+ a_promise = @frame();
+ }
+
+ // Test to make sure that @frame() works as advertised (issue #1296)
+ // var our_handle: anyframe = @frame();
+ expect(a_promise == anyframe(@frame()));
+
+ result = true;
+}
+
+//var await_a_promise: anyframe = undefined;
//var await_final_result: i32 = 0;
//
//test "coroutine await" {
@@ -204,7 +203,7 @@ test "@frameSize" {
// await_seq('c');
// suspend {
// await_seq('d');
-// await_a_promise = @handle();
+// await_a_promise = @frame();
// }
// await_seq('g');
// return 1234;
@@ -314,14 +313,14 @@ test "@frameSize" {
// cancel p2;
//}
//
-//fn nonFailing() (promise->anyerror!void) {
+//fn nonFailing() (anyframe->anyerror!void) {
// return async suspendThenFail() catch unreachable;
//}
//async fn suspendThenFail() anyerror!void {
// suspend;
// return error.Fail;
//}
-//async fn printTrace(p: promise->(anyerror!void)) void {
+//async fn printTrace(p: anyframe->(anyerror!void)) void {
// (await p) catch |e| {
// std.testing.expect(e == error.Fail);
// if (@errorReturnTrace()) |trace| {
@@ -343,7 +342,7 @@ test "@frameSize" {
//}
//async fn testBreakFromSuspend(my_result: *i32) void {
// suspend {
-// resume @handle();
+// resume @frame();
// }
// my_result.* += 1;
// suspend;
--
cgit v1.2.3
From 056c4e2c988c0a2ff6f1be8fe18a0a056d848271 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 2 Aug 2019 01:05:34 -0400
Subject: implement async await and return
---
BRANCH_TODO | 6 +-
src/all_types.hpp | 10 +-
src/analyze.cpp | 25 ++-
src/ast_render.cpp | 4 +-
src/codegen.cpp | 162 +++++++++++++++++-
src/ir.cpp | 97 +++++++++--
src/ir_print.cpp | 9 +
test/stage1/behavior/coroutine_await_struct.zig | 8 +-
test/stage1/behavior/coroutines.zig | 209 +++++++++++-------------
9 files changed, 384 insertions(+), 146 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index f5db81a080..6d6ae42529 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,5 @@
- * await
+ * compile error for error: expected anyframe->T, found 'anyframe'
+ * compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
* await in single-threaded mode
* async call on a non async function
@@ -13,3 +14,6 @@
* @typeInfo for @Frame(func)
* peer type resolution of *@Frame(func) and anyframe
* peer type resolution of *@Frame(func) and anyframe->T when the return type matches
+ * returning a value from within a suspend block
+ * struct types as the return type of an async function. make sure it works with return result locations.
+ * make resuming inside a suspend block, with nothing after it, a must-tail call.
diff --git a/src/all_types.hpp b/src/all_types.hpp
index e66c9aebff..9ab90b2785 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1550,6 +1550,8 @@ enum PanicMsgId {
PanicMsgIdFloatToInt,
PanicMsgIdPtrCastNull,
PanicMsgIdBadResume,
+ PanicMsgIdBadAwait,
+ PanicMsgIdBadReturn,
PanicMsgIdCount,
};
@@ -1795,7 +1797,6 @@ struct CodeGen {
ZigType *entry_arg_tuple;
ZigType *entry_enum_literal;
ZigType *entry_any_frame;
- ZigType *entry_async_fn;
} builtin_types;
ZigType *align_amt_type;
@@ -2348,6 +2349,7 @@ enum IrInstructionId {
IrInstructionIdUnionInitNamedField,
IrInstructionIdSuspendBegin,
IrInstructionIdSuspendBr,
+ IrInstructionIdAwait,
IrInstructionIdCoroResume,
};
@@ -3600,6 +3602,12 @@ struct IrInstructionSuspendBr {
IrBasicBlock *resume_block;
};
+struct IrInstructionAwait {
+ IrInstruction base;
+
+ IrInstruction *frame;
+};
+
struct IrInstructionCoroResume {
IrInstruction base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 99caf9688b..5af9698dd1 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3807,6 +3807,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
} else if (fn->inferred_async_node->type == NodeTypeSuspend) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("suspends here"));
+ } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("await is a suspend point"));
} else {
zig_unreachable();
}
@@ -7361,7 +7364,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- fn_type->data.fn.gen_param_info = allocate(1);
+ fn_type->data.fn.gen_param_info = allocate(2);
ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
gen_param_types.append(get_llvm_type(g, frame_type));
@@ -7370,6 +7373,13 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
fn_type->data.fn.gen_param_info[0].src_index = 0;
fn_type->data.fn.gen_param_info[0].gen_index = 0;
fn_type->data.fn.gen_param_info[0].type = frame_type;
+
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn_type->data.fn.gen_param_info[1].src_index = 1;
+ fn_type->data.fn.gen_param_info[1].gen_index = 1;
+ fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize;
} else {
fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count);
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
@@ -7434,15 +7444,21 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
ZigType *gen_return_type = g->builtin_types.entry_void;
ZigList param_di_types = {};
+ ZigList gen_param_types = {};
// first "parameter" is return value
param_di_types.append(get_llvm_di_type(g, gen_return_type));
ZigType *frame_type = get_coro_frame_type(g, fn);
ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
- LLVMTypeRef gen_param_type = get_llvm_type(g, ptr_type);
+ gen_param_types.append(get_llvm_type(g, ptr_type));
param_di_types.append(get_llvm_di_type(g, ptr_type));
- fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type), &gen_param_type, 1, false);
+ // this parameter is used to pass the result pointer when await completes
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
+ gen_param_types.items, gen_param_types.length, false);
fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0);
}
@@ -7493,7 +7509,8 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name));
LLVMTypeRef llvm_void = LLVMVoidType();
- LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, &any_frame_type->llvm_type, 1, false);
+ LLVMTypeRef arg_types[] = {any_frame_type->llvm_type, g->builtin_types.entry_usize->llvm_type};
+ LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, arg_types, 2, false);
LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize);
ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize);
ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 4d6bae311b..98e11e24c9 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -1149,9 +1149,11 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
}
case NodeTypeSuspend:
{
- fprintf(ar->f, "suspend");
if (node->data.suspend.block != nullptr) {
+ fprintf(ar->f, "suspend ");
render_node_grouped(ar, node->data.suspend.block);
+ } else {
+ fprintf(ar->f, "suspend\n");
}
break;
}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index d0aadaabe1..6fe46acbbf 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -873,6 +873,10 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("cast causes pointer to be null");
case PanicMsgIdBadResume:
return buf_create_from_str("invalid resume of async function");
+ case PanicMsgIdBadAwait:
+ return buf_create_from_str("async function awaited twice");
+ case PanicMsgIdBadReturn:
+ return buf_create_from_str("async function returned twice");
}
zig_unreachable();
}
@@ -1991,14 +1995,66 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
if (fn_is_async(g->cur_fn)) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef locals_ptr = g->cur_ret_ptr;
+ bool ret_type_has_bits = return_instruction->value != nullptr &&
+ type_has_bits(return_instruction->value->value.type);
+ ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr;
+
if (ir_want_runtime_safety(g, &return_instruction->base)) {
- LLVMValueRef locals_ptr = g->cur_ret_ptr;
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, "");
LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn;
LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr);
}
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_awaiter_index, "");
+ LLVMValueRef result_ptr_as_usize;
+ if (ret_type_has_bits) {
+ LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, "");
+ LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
+ if (!handle_is_ptr(ret_type)) {
+ // It's a scalar, so it didn't get written to the result ptr. Do that now.
+ LLVMBuildStore(g->builder, ir_llvm_value(g, return_instruction->value), result_ptr);
+ }
+ result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, "");
+ } else {
+ result_ptr_as_usize = LLVMGetUndef(usize_type_ref);
+ }
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr,
+ all_ones, LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded);
+
+ LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+ LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
+
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
+
+ LLVMAddCase(switch_instr, zero, early_return_block);
+ LLVMAddCase(switch_instr, all_ones, bad_return_block);
+
+ // Something has gone horribly wrong, and this is an invalid second return.
+ LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
+ gen_assertion(g, PanicMsgIdBadReturn, &return_instruction->base);
+
+ // The caller will deal with fetching the result - we're done.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMBuildRetVoid(g->builder);
+
+ // We need to resume the caller by tail calling them.
+ LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
+ ZigType *any_frame_type = get_any_frame_type(g, ret_type);
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val,
+ get_llvm_type(g, any_frame_type), "");
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, their_frame_ptr, coro_fn_ptr_index, "");
+ LLVMValueRef awaiter_fn = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ LLVMValueRef args[] = {their_frame_ptr, result_ptr_as_usize};
+ LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, awaiter_fn, args, 2, LLVMFastCallConv,
+ ZigLLVM_FnInlineAuto, "");
+ ZigLLVMSetTailCall(call_inst);
+ LLVMBuildRetVoid(g->builder);
+
return nullptr;
}
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
@@ -3514,14 +3570,17 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
}
if (instruction->is_async) {
- ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
+ LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)};
+ ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
return nullptr;
} else if (callee_is_async) {
+ ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn);
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, "");
LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr);
- LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, &frame_result_loc, 1, llvm_cc, fn_inline, "");
+ LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(g->builtin_types.entry_usize->llvm_type)};
+ LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
ZigLLVMSetTailCall(call_inst);
LLVMBuildRetVoid(g->builder);
@@ -3530,7 +3589,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume");
LLVMPositionBuilderAtEnd(g->builder, call_bb);
render_async_var_decls(g, instruction->base.scope);
- return nullptr;
+
+ if (type_has_bits(src_return_type)) {
+ LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1);
+ LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr,
+ get_llvm_type(g, ptr_result_type), "");
+ return get_handle_value(g, casted_spilled_result_ptr, src_return_type, ptr_result_type);
+ } else {
+ return nullptr;
+ }
}
if (instruction->new_stack == nullptr) {
@@ -4829,7 +4896,7 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
if (get_codegen_ptr_type(operand_type) == nullptr) {
- return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false);
+ return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded);
}
// it's a pointer but we need to treat it as an int
@@ -4990,14 +5057,89 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
return nullptr;
}
+static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) {
+ LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
+ ZigType *result_type = instruction->base.value.type;
+ ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
+
+ // Prepare to be suspended
+ LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, "");
+ LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr);
+
+ // At this point resuming the function will do the correct thing.
+ // This code is as if it is running inside the suspend block.
+
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ // caller's own frame pointer
+ LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, "");
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
+ LLVMValueRef result_ptr_as_usize;
+ if (type_has_bits(result_type)) {
+ LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_arg_start, "");
+ LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
+ result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, "");
+ } else {
+ result_ptr_as_usize = LLVMGetUndef(usize_type_ref);
+ }
+ LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
+ LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded);
+
+ LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
+ LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2);
+
+ LLVMAddCase(switch_instr, zero, complete_suspend_block);
+ LLVMAddCase(switch_instr, all_ones, early_return_block);
+
+ // We discovered that another awaiter was already here.
+ LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
+ gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
+
+ // Rely on the target to resume us from suspension.
+ LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // The async function has already completed. So we use a tail call to resume ourselves.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMValueRef args[] = {g->cur_ret_ptr, result_ptr_as_usize};
+ LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, split_llvm_fn, args, 2, LLVMFastCallConv,
+ ZigLLVM_FnInlineAuto, "");
+ ZigLLVMSetTailCall(call_inst);
+ LLVMBuildRetVoid(g->builder);
+
+ g->cur_fn_val = split_llvm_fn;
+ g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0);
+ LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "AwaitResume");
+ LLVMPositionBuilderAtEnd(g->builder, call_bb);
+ render_async_var_decls(g, instruction->base.scope);
+
+ if (type_has_bits(result_type)) {
+ LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1);
+ LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr,
+ get_llvm_type(g, ptr_result_type), "");
+ return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type);
+ } else {
+ return nullptr;
+ }
+}
+
static LLVMTypeRef anyframe_fn_type(CodeGen *g) {
if (g->anyframe_fn_type != nullptr)
return g->anyframe_fn_type;
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
ZigType *anyframe_type = get_any_frame_type(g, nullptr);
- LLVMTypeRef param_type = get_llvm_type(g, anyframe_type);
LLVMTypeRef return_type = LLVMVoidType();
- LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false);
+ LLVMTypeRef param_types[] = {
+ get_llvm_type(g, anyframe_type),
+ usize_type_ref,
+ };
+ LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, 2, false);
g->anyframe_fn_type = LLVMPointerType(fn_type, 0);
return g->anyframe_fn_type;
@@ -5006,13 +5148,15 @@ static LLVMTypeRef anyframe_fn_type(CodeGen *g) {
static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
IrInstructionCoroResume *instruction)
{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
ZigType *frame_type = instruction->frame->value.type;
assert(frame_type->id == ZigTypeIdAnyFrame);
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, "");
LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), "");
- ZigLLVMBuildCall(g->builder, fn_val, &frame, 1, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+ LLVMValueRef args[] = {frame, LLVMGetUndef(usize_type_ref)};
+ ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
return nullptr;
}
@@ -5279,6 +5423,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
case IrInstructionIdFrameSizeGen:
return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
+ case IrInstructionIdAwait:
+ return ir_render_await(g, executable, (IrInstructionAwait *)instruction);
}
zig_unreachable();
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 3a6853b034..ecd2cd6f87 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1052,6 +1052,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) {
return IrInstructionIdSuspendBr;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) {
+ return IrInstructionIdAwait;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
return IrInstructionIdCoroResume;
}
@@ -3274,6 +3278,17 @@ static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_await(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *frame)
+{
+ IrInstructionAwait *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->frame = frame;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *frame)
{
@@ -7774,11 +7789,26 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeAwaitExpr);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope);
+ ZigFn *fn_entry = exec_fn_entry(irb->exec);
+ if (!fn_entry) {
+ add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
+ return irb->codegen->invalid_instruction;
+ }
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
+ existing_suspend_scope->reported_err = true;
+ }
+ return irb->codegen->invalid_instruction;
+ }
+
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.await_expr.expr, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- zig_panic("TODO ir_gen_await_expr");
+ return ir_build_await(irb, scope, node, target_inst);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
@@ -7789,15 +7819,6 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
return irb->codegen->invalid_instruction;
}
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
- add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
- scope_defer_expr->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
if (existing_suspend_scope) {
if (!existing_suspend_scope->reported_err) {
@@ -7808,7 +7829,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
return irb->codegen->invalid_instruction;
}
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "Resume");
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
ir_build_suspend_begin(irb, parent_scope, node, resume_block);
if (node->data.suspend.block != nullptr) {
@@ -24372,19 +24393,62 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
return ir_finish_anal(ira, result);
}
-static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
+static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) {
IrInstruction *frame_ptr = instruction->frame->child;
if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
+ ZigType *result_type;
IrInstruction *frame;
if (frame_ptr->value.type->id == ZigTypeIdPointer &&
frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
- frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdAnyFrame)
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
{
- frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ frame = frame_ptr;
} else {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ if (frame->value.type->id != ZigTypeIdAnyFrame ||
+ frame->value.type->data.any_frame.result_type == nullptr)
+ {
+ ir_add_error(ira, &instruction->base,
+ buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+ result_type = frame->value.type->data.any_frame.result_type;
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ IrInstruction *result = ir_build_await(&ira->new_irb,
+ instruction->base.scope, instruction->base.source_node, frame);
+ result->value.type = result_type;
+ return ir_finish_anal(ira, result);
+}
+
+static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
+ IrInstruction *frame_ptr = instruction->frame->child;
+ if (type_is_invalid(frame_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
+ {
frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
}
ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
@@ -24691,6 +24755,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction);
case IrInstructionIdCoroResume:
return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
+ case IrInstructionIdAwait:
+ return ir_analyze_instruction_await(ira, (IrInstructionAwait *)instruction);
}
zig_unreachable();
}
@@ -24826,6 +24892,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSuspendBegin:
case IrInstructionIdSuspendBr:
case IrInstructionIdCoroResume:
+ case IrInstructionIdAwait:
return true;
case IrInstructionIdPhi:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 284ebed2f3..46d2906d30 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1546,6 +1546,12 @@ static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruct
fprintf(irp->f, ")");
}
+static void ir_print_await(IrPrint *irp, IrInstructionAwait *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -2025,6 +2031,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCoroResume:
ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
break;
+ case IrInstructionIdAwait:
+ ir_print_await(irp, (IrInstructionAwait *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/test/stage1/behavior/coroutine_await_struct.zig b/test/stage1/behavior/coroutine_await_struct.zig
index 66ff8bb492..a649b0a39b 100644
--- a/test/stage1/behavior/coroutine_await_struct.zig
+++ b/test/stage1/behavior/coroutine_await_struct.zig
@@ -6,12 +6,12 @@ const Foo = struct {
x: i32,
};
-var await_a_promise: promise = undefined;
+var await_a_promise: anyframe = undefined;
var await_final_result = Foo{ .x = 0 };
test "coroutine await struct" {
await_seq('a');
- const p = async await_amain() catch unreachable;
+ const p = async await_amain();
await_seq('f');
resume await_a_promise;
await_seq('i');
@@ -20,7 +20,7 @@ test "coroutine await struct" {
}
async fn await_amain() void {
await_seq('b');
- const p = async await_another() catch unreachable;
+ const p = async await_another();
await_seq('e');
await_final_result = await p;
await_seq('h');
@@ -29,7 +29,7 @@ async fn await_another() Foo {
await_seq('c');
suspend {
await_seq('d');
- await_a_promise = @handle();
+ await_a_promise = @frame();
}
await_seq('g');
return Foo{ .x = 1234 };
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 01237ed1c9..28dd834bfe 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -180,97 +180,85 @@ async fn testSuspendBlock() void {
result = true;
}
-//var await_a_promise: anyframe = undefined;
-//var await_final_result: i32 = 0;
-//
-//test "coroutine await" {
-// await_seq('a');
-// const p = async await_amain() catch unreachable;
-// await_seq('f');
-// resume await_a_promise;
-// await_seq('i');
-// expect(await_final_result == 1234);
-// expect(std.mem.eql(u8, await_points, "abcdefghi"));
-//}
-//async fn await_amain() void {
-// await_seq('b');
-// const p = async await_another() catch unreachable;
-// await_seq('e');
-// await_final_result = await p;
-// await_seq('h');
-//}
-//async fn await_another() i32 {
-// await_seq('c');
-// suspend {
-// await_seq('d');
-// await_a_promise = @frame();
-// }
-// await_seq('g');
-// return 1234;
-//}
-//
-//var await_points = [_]u8{0} ** "abcdefghi".len;
-//var await_seq_index: usize = 0;
-//
-//fn await_seq(c: u8) void {
-// await_points[await_seq_index] = c;
-// await_seq_index += 1;
-//}
-//
-//var early_final_result: i32 = 0;
-//
-//test "coroutine await early return" {
-// early_seq('a');
-// const p = async early_amain() catch @panic("out of memory");
-// early_seq('f');
-// expect(early_final_result == 1234);
-// expect(std.mem.eql(u8, early_points, "abcdef"));
-//}
-//async fn early_amain() void {
-// early_seq('b');
-// const p = async early_another() catch @panic("out of memory");
-// early_seq('d');
-// early_final_result = await p;
-// early_seq('e');
-//}
-//async fn early_another() i32 {
-// early_seq('c');
-// return 1234;
-//}
-//
-//var early_points = [_]u8{0} ** "abcdef".len;
-//var early_seq_index: usize = 0;
-//
-//fn early_seq(c: u8) void {
-// early_points[early_seq_index] = c;
-// early_seq_index += 1;
-//}
-//
-//test "coro allocation failure" {
-// var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0);
-// if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) {
-// @panic("expected allocation failure");
-// } else |err| switch (err) {
-// error.OutOfMemory => {},
-// }
-//}
-//async fn asyncFuncThatNeverGetsRun() void {
-// @panic("coro frame allocation should fail");
-//}
-//
-//test "async function with dot syntax" {
-// const S = struct {
-// var y: i32 = 1;
-// async fn foo() void {
-// y += 1;
-// suspend;
-// }
-// };
-// const p = try async S.foo();
-// cancel p;
-// expect(S.y == 2);
-//}
-//
+var await_a_promise: anyframe = undefined;
+var await_final_result: i32 = 0;
+
+test "coroutine await" {
+ await_seq('a');
+ const p = async await_amain();
+ await_seq('f');
+ resume await_a_promise;
+ await_seq('i');
+ expect(await_final_result == 1234);
+ expect(std.mem.eql(u8, await_points, "abcdefghi"));
+}
+async fn await_amain() void {
+ await_seq('b');
+ const p = async await_another();
+ await_seq('e');
+ await_final_result = await p;
+ await_seq('h');
+}
+async fn await_another() i32 {
+ await_seq('c');
+ suspend {
+ await_seq('d');
+ await_a_promise = @frame();
+ }
+ await_seq('g');
+ return 1234;
+}
+
+var await_points = [_]u8{0} ** "abcdefghi".len;
+var await_seq_index: usize = 0;
+
+fn await_seq(c: u8) void {
+ await_points[await_seq_index] = c;
+ await_seq_index += 1;
+}
+
+var early_final_result: i32 = 0;
+
+test "coroutine await early return" {
+ early_seq('a');
+ const p = async early_amain();
+ early_seq('f');
+ expect(early_final_result == 1234);
+ expect(std.mem.eql(u8, early_points, "abcdef"));
+}
+async fn early_amain() void {
+ early_seq('b');
+ const p = async early_another();
+ early_seq('d');
+ early_final_result = await p;
+ early_seq('e');
+}
+async fn early_another() i32 {
+ early_seq('c');
+ return 1234;
+}
+
+var early_points = [_]u8{0} ** "abcdef".len;
+var early_seq_index: usize = 0;
+
+fn early_seq(c: u8) void {
+ early_points[early_seq_index] = c;
+ early_seq_index += 1;
+}
+
+test "async function with dot syntax" {
+ const S = struct {
+ var y: i32 = 1;
+ async fn foo() void {
+ y += 1;
+ suspend;
+ }
+ };
+ const p = async S.foo();
+ // can't cancel in tests because they are non-async functions
+ expect(S.y == 2);
+}
+
//test "async fn pointer in a struct field" {
// var data: i32 = 1;
// const Foo = struct {
@@ -287,18 +275,17 @@ async fn testSuspendBlock() void {
// y.* += 1;
// suspend;
//}
-//
+
//test "async fn with inferred error set" {
-// const p = (async failing()) catch unreachable;
+// const p = async failing();
// resume p;
-// cancel p;
//}
//
//async fn failing() !void {
// suspend;
// return error.Fail;
//}
-//
+
//test "error return trace across suspend points - early return" {
// const p = nonFailing();
// resume p;
@@ -331,20 +318,18 @@ async fn testSuspendBlock() void {
// }
// };
//}
-//
-//test "break from suspend" {
-// var buf: [500]u8 = undefined;
-// var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
-// var my_result: i32 = 1;
-// const p = try async testBreakFromSuspend(&my_result);
-// cancel p;
-// std.testing.expect(my_result == 2);
-//}
-//async fn testBreakFromSuspend(my_result: *i32) void {
-// suspend {
-// resume @frame();
-// }
-// my_result.* += 1;
-// suspend;
-// my_result.* += 1;
-//}
+
+test "break from suspend" {
+ var my_result: i32 = 1;
+ const p = async testBreakFromSuspend(&my_result);
+ // can't cancel here
+ std.testing.expect(my_result == 2);
+}
+async fn testBreakFromSuspend(my_result: *i32) void {
+ suspend {
+ resume @frame();
+ }
+ my_result.* += 1;
+ suspend;
+ my_result.* += 1;
+}
--
cgit v1.2.3
From a5cb0f77d11bdcc504fe3e6afa928c88de821518 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 2 Aug 2019 13:54:58 -0400
Subject: assignment participates in result location
fix one regression with optionals but there are more
---
src/ir.cpp | 21 +++++++++++++++------
test/stage1/behavior/eval.zig | 10 ++++++++++
2 files changed, 25 insertions(+), 6 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index f34c840496..8a46fec7c9 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -4001,12 +4001,20 @@ static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *no
static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) {
IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr, nullptr);
- IrInstruction *rvalue = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
+ if (lvalue == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
- if (lvalue == irb->codegen->invalid_instruction || rvalue == irb->codegen->invalid_instruction)
+ ResultLocInstruction *result_loc_inst = allocate(1);
+ result_loc_inst->base.id = ResultLocIdInstruction;
+ result_loc_inst->base.source_instruction = lvalue;
+ ir_ref_instruction(lvalue, irb->current_basic_block);
+ ir_build_reset_result(irb, scope, node, &result_loc_inst->base);
+
+ IrInstruction *rvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op2, scope, LValNone,
+ &result_loc_inst->base);
+ if (rvalue == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- ir_build_store_ptr(irb, scope, node, lvalue, rvalue);
return ir_build_const_void(irb, scope, node);
}
@@ -17477,6 +17485,7 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
return result;
} else if (is_slice(array_type)) {
ConstExprValue *ptr_field = &array_ptr_val->data.x_struct.fields[slice_ptr_index];
+ ir_assert(ptr_field != nullptr, &elem_ptr_instruction->base);
if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope,
elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index, false,
@@ -17663,7 +17672,7 @@ static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction
return ira->codegen->invalid_instruction;
if (type_is_invalid(struct_val->type))
return ira->codegen->invalid_instruction;
- if (struct_val->special == ConstValSpecialUndef && initializing) {
+ if (initializing && struct_val->special == ConstValSpecialUndef) {
struct_val->data.x_struct.fields = create_const_vals(struct_type->data.structure.src_field_count);
struct_val->special = ConstValSpecialStatic;
for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) {
@@ -18764,7 +18773,7 @@ static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstr
if (optional_val == nullptr)
return ira->codegen->invalid_instruction;
- if (initializing && optional_val->special == ConstValSpecialUndef) {
+ if (initializing) {
switch (type_has_one_possible_value(ira->codegen, child_type)) {
case OnePossibleValueInvalid:
return ira->codegen->invalid_instruction;
@@ -23260,7 +23269,7 @@ static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruct
ConstExprValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node);
if (err_union_val == nullptr)
return ira->codegen->invalid_instruction;
- if (err_union_val->special == ConstValSpecialUndef && initializing) {
+ if (initializing && err_union_val->special == ConstValSpecialUndef) {
ConstExprValue *vals = create_const_vals(2);
ConstExprValue *err_set_val = &vals[0];
ConstExprValue *payload_val = &vals[1];
diff --git a/test/stage1/behavior/eval.zig b/test/stage1/behavior/eval.zig
index 97d3a269cc..58d662d768 100644
--- a/test/stage1/behavior/eval.zig
+++ b/test/stage1/behavior/eval.zig
@@ -1,5 +1,6 @@
const std = @import("std");
const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
const builtin = @import("builtin");
test "compile time recursion" {
@@ -794,3 +795,12 @@ test "no undeclared identifier error in unanalyzed branches" {
lol_this_doesnt_exist = nonsense;
}
}
+
+test "comptime assign int to optional int" {
+ comptime {
+ var x: ?i32 = null;
+ x = 2;
+ x.? *= 10;
+ expectEqual(20, x.?);
+ }
+}
--
cgit v1.2.3
From 9069ee957cb8c9069028b325af5b862bbf8f66af Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 2 Aug 2019 15:17:02 -0400
Subject: fix discarding function call results
---
src/ir.cpp | 65 +++++++++++++++++++++++++++------------------
test/stage1/behavior/fn.zig | 19 +++++++++++++
2 files changed, 58 insertions(+), 26 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 8a46fec7c9..de2e4e1654 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -189,7 +189,8 @@ static IrInstruction *ir_analyze_bit_cast(IrAnalyze *ira, IrInstruction *source_
static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspend_source_instr,
ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime, bool non_null_comptime);
static IrInstruction *ir_resolve_result(IrAnalyze *ira, IrInstruction *suspend_source_instr,
- ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime, bool non_null_comptime);
+ ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime,
+ bool non_null_comptime, bool allow_discard);
static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *base_ptr, bool safety_check_on, bool initializing);
static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruction *source_instr,
@@ -11163,7 +11164,8 @@ static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruc
}
if (result_loc == nullptr) result_loc = no_result_loc();
- IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false);
+ IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true,
+ false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -11623,7 +11625,7 @@ static IrInstruction *ir_analyze_optional_wrap(IrAnalyze *ira, IrInstruction *so
}
IrInstruction *result_loc_inst = nullptr;
if (result_loc != nullptr) {
- result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false);
+ result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -11666,7 +11668,7 @@ static IrInstruction *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInstruction
IrInstruction *result_loc_inst;
if (handle_is_ptr(wanted_type)) {
if (result_loc == nullptr) result_loc = no_result_loc();
- result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false);
+ result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -11751,7 +11753,7 @@ static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *so
IrInstruction *result_loc_inst;
if (handle_is_ptr(wanted_type)) {
if (result_loc == nullptr) result_loc = no_result_loc();
- result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false);
+ result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -11824,7 +11826,8 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
IrInstruction *result_loc;
if (type_has_bits(ptr_type) && !handle_is_ptr(value->value.type)) {
- result_loc = ir_resolve_result(ira, source_instruction, no_result_loc(), value->value.type, nullptr, true, false);
+ result_loc = ir_resolve_result(ira, source_instruction, no_result_loc(), value->value.type, nullptr, true,
+ false, true);
} else {
result_loc = nullptr;
}
@@ -11868,7 +11871,8 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s
if (!array_ptr) array_ptr = ir_get_ref(ira, source_instr, array, true, false);
if (result_loc == nullptr) result_loc = no_result_loc();
- IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false);
+ IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr,
+ true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -12524,7 +12528,8 @@ static IrInstruction *ir_analyze_vector_to_array(IrAnalyze *ira, IrInstruction *
if (result_loc == nullptr) {
result_loc = no_result_loc();
}
- IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, array_type, nullptr, true, false);
+ IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, array_type, nullptr,
+ true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -13105,7 +13110,8 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
IrInstruction *result_loc_inst;
if (type_entry->data.pointer.host_int_bytes != 0 && handle_is_ptr(child_type)) {
if (result_loc == nullptr) result_loc = no_result_loc();
- result_loc_inst = ir_resolve_result(ira, source_instruction, result_loc, child_type, nullptr, true, false);
+ result_loc_inst = ir_resolve_result(ira, source_instruction, result_loc, child_type, nullptr,
+ true, false, true);
if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) {
return result_loc_inst;
}
@@ -15360,7 +15366,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
if (peer_parent->peers.length == 1) {
IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
- value_type, value, force_runtime, non_null_comptime);
+ value_type, value, force_runtime, non_null_comptime, true);
result_peer->suspend_pos.basic_block_index = SIZE_MAX;
result_peer->suspend_pos.instruction_index = SIZE_MAX;
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) ||
@@ -15380,7 +15386,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
if (peer_parent->skipped) {
if (non_null_comptime) {
return ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
- value_type, value, force_runtime, non_null_comptime);
+ value_type, value, force_runtime, non_null_comptime, true);
}
return nullptr;
}
@@ -15398,7 +15404,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
}
IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent,
- peer_parent->resolved_type, nullptr, force_runtime, non_null_comptime);
+ peer_parent->resolved_type, nullptr, force_runtime, non_null_comptime, true);
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) ||
parent_result_loc->value.type->id == ZigTypeIdUnreachable)
{
@@ -15448,7 +15454,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
}
IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_bit_cast->parent,
- dest_type, bitcasted_value, force_runtime, non_null_comptime);
+ dest_type, bitcasted_value, force_runtime, non_null_comptime, true);
if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) ||
parent_result_loc->value.type->id == ZigTypeIdUnreachable)
{
@@ -15477,8 +15483,15 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
static IrInstruction *ir_resolve_result(IrAnalyze *ira, IrInstruction *suspend_source_instr,
ResultLoc *result_loc_pass1, ZigType *value_type, IrInstruction *value, bool force_runtime,
- bool non_null_comptime)
+ bool non_null_comptime, bool allow_discard)
{
+ if (!allow_discard && result_loc_pass1->id == ResultLocIdInstruction &&
+ instr_is_comptime(result_loc_pass1->source_instruction) &&
+ result_loc_pass1->source_instruction->value.type->id == ZigTypeIdPointer &&
+ result_loc_pass1->source_instruction->value.data.x_ptr.special == ConstPtrSpecialDiscard)
+ {
+ result_loc_pass1 = no_result_loc();
+ }
IrInstruction *result_loc = ir_resolve_result_raw(ira, suspend_source_instr, result_loc_pass1, value_type,
value, force_runtime, non_null_comptime);
if (result_loc == nullptr || (instr_is_unreachable(result_loc) || type_is_invalid(result_loc->value.type)))
@@ -15533,7 +15546,7 @@ static IrInstruction *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrIn
if (type_is_invalid(implicit_elem_type))
return ira->codegen->invalid_instruction;
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- implicit_elem_type, nullptr, false, true);
+ implicit_elem_type, nullptr, false, true, true);
if (result_loc != nullptr)
return result_loc;
@@ -15542,7 +15555,7 @@ static IrInstruction *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrIn
instruction->result_loc->id == ResultLocIdReturn)
{
result_loc = ir_resolve_result(ira, &instruction->base, no_result_loc(),
- implicit_elem_type, nullptr, false, true);
+ implicit_elem_type, nullptr, false, true, true);
if (result_loc != nullptr &&
(type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
{
@@ -15631,7 +15644,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(),
- async_return_type, nullptr, true, true);
+ async_return_type, nullptr, true, true, false);
if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
@@ -16390,7 +16403,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *result_loc;
if (handle_is_ptr(impl_fn_type_id->return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
- impl_fn_type_id->return_type, nullptr, true, true);
+ impl_fn_type_id->return_type, nullptr, true, true, false);
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) ||
instr_is_unreachable(result_loc)))
{
@@ -16512,7 +16525,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *result_loc;
if (handle_is_ptr(return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
- return_type, nullptr, true, true);
+ return_type, nullptr, true, true, false);
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
return result_loc;
}
@@ -17028,7 +17041,7 @@ static IrInstruction *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionPh
// In case resolving the parent activates a suspend, do it now
IrInstruction *parent_result_loc = ir_resolve_result(ira, &phi_instruction->base, peer_parent->parent,
- peer_parent->resolved_type, nullptr, false, false);
+ peer_parent->resolved_type, nullptr, false, false, true);
if (parent_result_loc != nullptr &&
(type_is_invalid(parent_result_loc->value.type) || instr_is_unreachable(parent_result_loc)))
{
@@ -21541,7 +21554,7 @@ static IrInstruction *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstructi
IrInstruction *result_loc;
if (handle_is_ptr(result_type)) {
result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- result_type, nullptr, true, false);
+ result_type, nullptr, true, false, true);
if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
@@ -21798,7 +21811,7 @@ static IrInstruction *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstru
}
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- dest_slice_type, nullptr, true, false);
+ dest_slice_type, nullptr, true, false, true);
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
return result_loc;
}
@@ -21875,7 +21888,7 @@ static IrInstruction *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstruct
}
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- dest_slice_type, nullptr, true, false);
+ dest_slice_type, nullptr, true, false, true);
if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
@@ -22617,7 +22630,7 @@ static IrInstruction *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstruction
}
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- return_type, nullptr, true, false);
+ return_type, nullptr, true, false, true);
if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
return result_loc;
}
@@ -25397,7 +25410,7 @@ static IrInstruction *ir_analyze_instruction_end_expr(IrAnalyze *ira, IrInstruct
bool was_written = instruction->result_loc->written;
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- value->value.type, value, false, false);
+ value->value.type, value, false, false, true);
if (result_loc != nullptr) {
if (type_is_invalid(result_loc->value.type))
return ira->codegen->invalid_instruction;
@@ -25429,7 +25442,7 @@ static IrInstruction *ir_analyze_instruction_bit_cast_src(IrAnalyze *ira, IrInst
return operand;
IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base,
- &instruction->result_loc_bit_cast->base, operand->value.type, operand, false, false);
+ &instruction->result_loc_bit_cast->base, operand->value.type, operand, false, false, true);
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
return result_loc;
diff --git a/test/stage1/behavior/fn.zig b/test/stage1/behavior/fn.zig
index d6d670b09b..6b9c8b8fe7 100644
--- a/test/stage1/behavior/fn.zig
+++ b/test/stage1/behavior/fn.zig
@@ -228,3 +228,22 @@ test "implicit cast fn call result to optional in field result" {
S.entry();
comptime S.entry();
}
+
+test "discard the result of a function that returns a struct" {
+ const S = struct {
+ fn entry() void {
+ _ = func();
+ }
+
+ fn func() Foo {
+ return undefined;
+ }
+
+ const Foo = struct {
+ a: u64,
+ b: u64,
+ };
+ };
+ S.entry();
+ comptime S.entry();
+}
--
cgit v1.2.3
From d105769926fd5360a5309be3e202cc65d32ce604 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 2 Aug 2019 16:09:40 -0400
Subject: fix regressions regarding writing through const pointers
---
src/all_types.hpp | 2 ++
src/ir.cpp | 34 ++++++++++++++++++----------------
test/compile_errors.zig | 16 ++++++++--------
3 files changed, 28 insertions(+), 24 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index a6b2bc51c3..4c3aeade9e 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2543,6 +2543,7 @@ struct IrInstructionLoadPtrGen {
struct IrInstructionStorePtr {
IrInstruction base;
+ bool allow_write_through_const;
IrInstruction *ptr;
IrInstruction *value;
};
@@ -3707,6 +3708,7 @@ enum ResultLocId {
struct ResultLoc {
ResultLocId id;
bool written;
+ bool allow_write_through_const;
IrInstruction *resolved_loc; // result ptr
IrInstruction *source_instruction;
IrInstruction *gen_instruction; // value to store to the result loc
diff --git a/src/ir.cpp b/src/ir.cpp
index de2e4e1654..65a21a418d 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -198,7 +198,7 @@ static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruct
static IrInstruction *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *base_ptr, bool initializing);
static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source_instr,
- IrInstruction *ptr, IrInstruction *uncasted_value);
+ IrInstruction *ptr, IrInstruction *uncasted_value, bool allow_write_through_const);
static IrInstruction *ir_gen_union_init_expr(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *union_type, IrInstruction *field_name, AstNode *expr_node,
LVal lval, ResultLoc *parent_result_loc);
@@ -1613,7 +1613,7 @@ static IrInstruction *ir_build_unreachable(IrBuilder *irb, Scope *scope, AstNode
return &unreachable_instruction->base;
}
-static IrInstruction *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstructionStorePtr *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *ptr, IrInstruction *value)
{
IrInstructionStorePtr *instruction = ir_build_instruction(irb, scope, source_node);
@@ -1625,7 +1625,7 @@ static IrInstruction *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode *
ir_ref_instruction(ptr, irb->current_basic_block);
ir_ref_instruction(value, irb->current_basic_block);
- return &instruction->base;
+ return instruction;
}
static IrInstruction *ir_build_var_decl_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
@@ -6051,6 +6051,7 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A
ResultLocInstruction *result_loc_inst = allocate(1);
result_loc_inst->base.id = ResultLocIdInstruction;
result_loc_inst->base.source_instruction = field_ptr;
+ result_loc_inst->base.allow_write_through_const = true;
ir_ref_instruction(field_ptr, irb->current_basic_block);
ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);
@@ -6089,6 +6090,7 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A
ResultLocInstruction *result_loc_inst = allocate(1);
result_loc_inst->base.id = ResultLocIdInstruction;
result_loc_inst->base.source_instruction = elem_ptr;
+ result_loc_inst->base.allow_write_through_const = true;
ir_ref_instruction(elem_ptr, irb->current_basic_block);
ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base);
@@ -6646,7 +6648,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
ir_set_cursor_at_end_and_append_block(irb, continue_block);
IrInstruction *new_index_val = ir_build_bin_op(irb, child_scope, node, IrBinOpAdd, index_val, one, false);
- ir_mark_gen(ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val));
+ ir_build_store_ptr(irb, child_scope, node, index_ptr, new_index_val)->allow_write_through_const = true;
ir_build_br(irb, child_scope, node, cond_block, is_comptime);
IrInstruction *else_result = nullptr;
@@ -14848,7 +14850,7 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira,
// instruction.
assert(deref->value.special != ConstValSpecialRuntime);
var_ptr->value.special = ConstValSpecialRuntime;
- ir_analyze_store_ptr(ira, var_ptr, var_ptr, deref);
+ ir_analyze_store_ptr(ira, var_ptr, var_ptr, deref, false);
}
if (var_ptr->value.special == ConstValSpecialStatic && var->mem_slot_index != SIZE_MAX) {
@@ -15862,7 +15864,7 @@ no_mem_slot:
}
static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source_instr,
- IrInstruction *ptr, IrInstruction *uncasted_value)
+ IrInstruction *ptr, IrInstruction *uncasted_value, bool allow_write_through_const)
{
assert(ptr->value.type->id == ZigTypeIdPointer);
@@ -15878,7 +15880,7 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
ZigType *child_type = ptr->value.type->data.pointer.child_type;
- if (ptr->value.type->data.pointer.is_const && !source_instr->is_gen) {
+ if (ptr->value.type->data.pointer.is_const && !allow_write_through_const) {
ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant"));
return ira->codegen->invalid_instruction;
}
@@ -15957,10 +15959,9 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
break;
}
- IrInstruction *result = ir_build_store_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node,
- ptr, value);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
+ IrInstructionStorePtr *store_ptr = ir_build_store_ptr(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, ptr, value);
+ return &store_ptr->base;
}
static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
@@ -18283,7 +18284,7 @@ static IrInstruction *ir_analyze_instruction_store_ptr(IrAnalyze *ira, IrInstruc
if (type_is_invalid(value->value.type))
return ira->codegen->invalid_instruction;
- return ir_analyze_store_ptr(ira, &instruction->base, ptr, value);
+ return ir_analyze_store_ptr(ira, &instruction->base, ptr, value, instruction->allow_write_through_const);
}
static IrInstruction *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *instruction) {
@@ -19691,7 +19692,7 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc
IrInstruction *field_ptr = ir_analyze_struct_field_ptr(ira, instruction, field, result_loc,
container_type, true);
- ir_analyze_store_ptr(ira, instruction, field_ptr, runtime_inst);
+ ir_analyze_store_ptr(ira, instruction, field_ptr, runtime_inst, false);
if (instr_is_comptime(field_ptr) && field_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) {
const_ptrs.append(field_ptr);
} else {
@@ -19708,7 +19709,7 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc
IrInstruction *field_result_loc = const_ptrs.at(i);
IrInstruction *deref = ir_get_deref(ira, field_result_loc, field_result_loc, nullptr);
field_result_loc->value.special = ConstValSpecialRuntime;
- ir_analyze_store_ptr(ira, field_result_loc, field_result_loc, deref);
+ ir_analyze_store_ptr(ira, field_result_loc, field_result_loc, deref, false);
}
}
}
@@ -19835,7 +19836,7 @@ static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira,
assert(elem_result_loc->value.special == ConstValSpecialStatic);
IrInstruction *deref = ir_get_deref(ira, elem_result_loc, elem_result_loc, nullptr);
elem_result_loc->value.special = ConstValSpecialRuntime;
- ir_analyze_store_ptr(ira, elem_result_loc, elem_result_loc, deref);
+ ir_analyze_store_ptr(ira, elem_result_loc, elem_result_loc, deref, false);
}
}
}
@@ -25418,7 +25419,8 @@ static IrInstruction *ir_analyze_instruction_end_expr(IrAnalyze *ira, IrInstruct
return result_loc;
if (!was_written) {
- IrInstruction *store_ptr = ir_analyze_store_ptr(ira, &instruction->base, result_loc, value);
+ IrInstruction *store_ptr = ir_analyze_store_ptr(ira, &instruction->base, result_loc, value,
+ instruction->result_loc->allow_write_through_const);
if (type_is_invalid(store_ptr->value.type)) {
return ira->codegen->invalid_instruction;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 40ce8d304b..a4bc2a66f0 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -201,7 +201,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return error.OutOfMemory;
\\}
,
- "tmp.zig:2:7: error: error is discarded",
+ "tmp.zig:2:12: error: error is discarded",
);
cases.add(
@@ -2740,7 +2740,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ 3 = 3;
\\}
,
- "tmp.zig:2:7: error: cannot assign to constant",
+ "tmp.zig:2:9: error: cannot assign to constant",
);
cases.add(
@@ -2750,7 +2750,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ a = 4;
\\}
,
- "tmp.zig:3:7: error: cannot assign to constant",
+ "tmp.zig:3:9: error: cannot assign to constant",
);
cases.add(
@@ -2820,7 +2820,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
\\export fn entry() void { f(); }
,
- "tmp.zig:3:7: error: cannot assign to constant",
+ "tmp.zig:3:9: error: cannot assign to constant",
);
cases.add(
@@ -3883,7 +3883,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
,
- "tmp.zig:6:24: error: unable to evaluate constant expression",
+ "tmp.zig:6:26: error: unable to evaluate constant expression",
"tmp.zig:4:17: note: called from here",
);
@@ -4133,7 +4133,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ cstr[0] = 'W';
\\}
,
- "tmp.zig:3:11: error: cannot assign to constant",
+ "tmp.zig:3:13: error: cannot assign to constant",
);
cases.add(
@@ -4143,7 +4143,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ cstr[0] = 'W';
\\}
,
- "tmp.zig:3:11: error: cannot assign to constant",
+ "tmp.zig:3:13: error: cannot assign to constant",
);
cases.add(
@@ -4291,7 +4291,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ f.field = 0;
\\}
,
- "tmp.zig:6:13: error: cannot assign to constant",
+ "tmp.zig:6:15: error: cannot assign to constant",
);
cases.add(
--
cgit v1.2.3
From 24d78177eec4d8fc3aa8ca99dd50788e38f9f8b6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 3 Aug 2019 01:06:14 -0400
Subject: add compile error for async call of function pointer
---
BRANCH_TODO | 2 +-
src/ir.cpp | 5 ++++-
test/compile_errors.zig | 12 ++++++++++++
test/stage1/behavior/coroutines.zig | 8 ++++----
4 files changed, 21 insertions(+), 6 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 92390f099f..f3d881f5e5 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,3 @@
- * struct types as the return type of an async function. make sure it works with return result locations.
* compile error for error: expected anyframe->T, found 'anyframe'
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
@@ -19,3 +18,4 @@
* make resuming inside a suspend block, with nothing after it, a must-tail call.
* make sure there are safety tests for all the new safety features (search the new PanicFnId enum values)
* error return tracing
+ * compile error for casting a function to a non-async function pointer, but then later it gets inferred to be an async function
diff --git a/src/ir.cpp b/src/ir.cpp
index f140cfeabe..b01f43b3e1 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14819,7 +14819,10 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count)
{
- ir_assert(fn_entry != nullptr, &call_instruction->base);
+ if (fn_entry == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ira->codegen->invalid_instruction;
+ }
ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry);
IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 272d99c930..4b1a24c675 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,18 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "runtime-known function called with async keyword",
+ \\export fn entry() void {
+ \\ var ptr = afunc;
+ \\ _ = async ptr();
+ \\}
+ \\
+ \\async fn afunc() void { }
+ ,
+ "tmp.zig:3:15: error: function is not comptime-known; @asyncCall required",
+ );
+
cases.add(
"function with ccc indirectly calling async function",
\\export fn entry() void {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index a1c1b7ad61..aa77541d19 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -263,15 +263,15 @@ test "async function with dot syntax" {
//test "async fn pointer in a struct field" {
// var data: i32 = 1;
// const Foo = struct {
-// bar: async<*std.mem.Allocator> fn (*i32) void,
+// bar: async fn (*i32) void,
// };
// var foo = Foo{ .bar = simpleAsyncFn2 };
-// const p = (async foo.bar(&data)) catch unreachable;
+// const p = async foo.bar(&data);
// expect(data == 2);
-// cancel p;
+// resume p;
// expect(data == 4);
//}
-//async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
+//async fn simpleAsyncFn2(y: *i32) void {
// defer y.* += 2;
// y.* += 1;
// suspend;
--
cgit v1.2.3
From 87710a1cc2c4d0e7ecc309e430f7d33baadc5f02 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 3 Aug 2019 16:14:24 -0400
Subject: implement `@asyncCall` which supports async function pointers
---
BRANCH_TODO | 16 +++++-
src/all_types.hpp | 3 ++
src/analyze.cpp | 3 ++
src/codegen.cpp | 105 +++++++++++++++++++++++++++---------
src/ir.cpp | 102 +++++++++++++++++++++++++++++------
test/compile_errors.zig | 12 +++++
test/runtime_safety.zig | 15 ++++++
test/stage1/behavior/coroutines.zig | 52 ++++++++++++------
8 files changed, 247 insertions(+), 61 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index a9bc5f3666..0ac1062b43 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,9 +1,8 @@
+ * @asyncCall with an async function pointer
* compile error for error: expected anyframe->T, found 'anyframe'
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
- * await in single-threaded mode
* async call on a non async function
- * @asyncCall with an async function pointer
* cancel
* defer and errdefer
* safety for double await
@@ -21,3 +20,16 @@
* compile error for copying a frame
* compile error for resuming a const frame pointer
* runtime safety enabling/disabling scope has to be coordinated across resume/await/calls/return
+ * await in single-threaded mode
+ * calling a generic function which is async
+ * make sure `await @asyncCall` and `await async` are handled correctly.
+ * allow @asyncCall with a real @Frame(func) (the point of this is result pointer)
+ * documentation
+ - @asyncCall
+ - @frame
+ - @Frame
+ - @frameSize
+ - coroutines section
+ - suspend
+ - resume
+ - anyframe, anyframe->T
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 0f8cce1376..87db8edf8d 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1503,6 +1503,7 @@ enum BuiltinFnId {
BuiltinFnIdInlineCall,
BuiltinFnIdNoInlineCall,
BuiltinFnIdNewStackCall,
+ BuiltinFnIdAsyncCall,
BuiltinFnIdTypeId,
BuiltinFnIdShlExact,
BuiltinFnIdShrExact,
@@ -1553,6 +1554,7 @@ enum PanicMsgId {
PanicMsgIdBadAwait,
PanicMsgIdBadReturn,
PanicMsgIdResumedAnAwaitingFn,
+ PanicMsgIdFrameTooSmall,
PanicMsgIdCount,
};
@@ -3699,6 +3701,7 @@ static const size_t maybe_null_index = 1;
static const size_t err_union_err_index = 0;
static const size_t err_union_payload_index = 1;
+// label (grep this): [coro_frame_struct_layout]
static const size_t coro_fn_ptr_index = 0;
static const size_t coro_awaiter_index = 1;
static const size_t coro_arg_start = 2;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 5eb70d6717..cd8f981ff3 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5205,6 +5205,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
call->frame_result_loc = &alloca_gen->base;
}
+ // label (grep this): [coro_frame_struct_layout]
ZigList field_types = {};
ZigList field_names = {};
@@ -7525,6 +7526,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
if (result_type == nullptr) {
g->anyframe_fn_type = ptr_result_type;
}
+ // label (grep this): [coro_frame_struct_layout]
LLVMTypeRef field_types[] = {
ptr_result_type, // fn_ptr
usize_type_ref, // awaiter
@@ -7558,6 +7560,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
} else {
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false);
+ // label (grep this): [coro_frame_struct_layout]
LLVMTypeRef field_types[] = {
LLVMPointerType(fn_type, 0), // fn_ptr
usize_type_ref, // awaiter
diff --git a/src/codegen.cpp b/src/codegen.cpp
index db617e636a..ebdd9e6120 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -879,6 +879,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("async function returned twice");
case PanicMsgIdResumedAnAwaitingFn:
return buf_create_from_str("awaiting function resumed");
+ case PanicMsgIdFrameTooSmall:
+ return buf_create_from_str("frame too small");
}
zig_unreachable();
}
@@ -3479,7 +3481,18 @@ static void render_async_var_decls(CodeGen *g, Scope *scope) {
}
}
+static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) {
+ LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type;
+ LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0);
+ LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
+ LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
+ LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
+ return LLVMBuildLoad(g->builder, prefix_ptr, "");
+}
+
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
LLVMValueRef fn_val;
ZigType *fn_type;
bool callee_is_async;
@@ -3511,34 +3524,54 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef awaiter_init_val;
LLVMValueRef ret_ptr;
if (instruction->is_async) {
- frame_result_loc = result_loc;
awaiter_init_val = zero;
- if (ret_has_bits) {
- ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, "");
- }
- // Use the result location which is inside the frame if this is an async call.
- if (ret_has_bits) {
- LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, "");
- LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+ if (instruction->new_stack == nullptr) {
+ frame_result_loc = result_loc;
+
+ if (ret_has_bits) {
+ // Use the result location which is inside the frame if this is an async call.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, "");
+ }
+ } else {
+ LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, "");
+ LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, "");
+ LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val);
+
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk");
+
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, "");
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, fail_block);
+ gen_safety_crash(g, PanicMsgIdFrameTooSmall);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+ LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
+ frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
+ get_llvm_type(g, instruction->base.value.type), "");
+
+ if (ret_has_bits) {
+ // Use the result location provided to the @asyncCall builtin
+ ret_ptr = result_loc;
+ }
}
} else if (callee_is_async) {
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr,
g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer
if (ret_has_bits) {
+ // Use the call instruction's result location.
ret_ptr = result_loc;
}
-
- // Use the call instruction's result location.
- if (ret_has_bits) {
- LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, "");
- LLVMBuildStore(g->builder, result_loc, ret_ptr_ptr);
- }
}
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
- assert(instruction->fn_entry != nullptr);
if (prefix_arg_err_ret_stack) {
zig_panic("TODO");
@@ -3547,6 +3580,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, "");
LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+ }
}
if (!instruction->is_async && !callee_is_async) {
if (first_arg_ret) {
@@ -3581,16 +3618,37 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->is_async || callee_is_async) {
size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0;
+ size_t arg_start_i = coro_arg_start + ret_2_or_0;
+
+ LLVMValueRef casted_frame;
+ if (instruction->new_stack != nullptr) {
+ // We need the frame type to be a pointer to a struct that includes the args
+ // label (grep this): [coro_frame_struct_layout]
+ size_t field_count = arg_start_i + gen_param_values.length;
+ LLVMTypeRef *field_types = allocate_nonzero(field_count);
+ LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
+ for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
+ field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i));
+ }
+ LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false);
+ LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0);
+
+ casted_frame = LLVMBuildBitCast(g->builder, frame_result_loc, ptr_frame_with_args_type, "");
+ } else {
+ casted_frame = frame_result_loc;
+ }
+
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
- LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
- coro_arg_start + ret_2_or_0 + arg_i, "");
+ LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, "");
LLVMBuildStore(g->builder, gen_param_values.at(arg_i), arg_ptr);
}
}
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (instruction->is_async) {
LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)};
ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
+ if (instruction->new_stack != nullptr) {
+ return frame_result_loc;
+ }
return nullptr;
} else if (callee_is_async) {
ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
@@ -5223,13 +5281,8 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
IrInstructionFrameSizeGen *instruction)
{
- LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type;
- LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0);
LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn);
- LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
- LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
- LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
- return LLVMBuildLoad(g->builder, prefix_ptr, "");
+ return gen_frame_size(g, fn_val);
}
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
@@ -7097,13 +7150,13 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2);
create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2);
create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2);
- //Needs library support on Windows
- //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
+ create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
+ create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2);
create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2);
diff --git a/src/ir.cpp b/src/ir.cpp
index b01f43b3e1..fbf9da9656 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1402,6 +1402,10 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], irb->current_basic_block);
+ if (is_async && new_stack != nullptr) {
+ // in this case the arg at the end is the return pointer
+ ir_ref_instruction(args[arg_count], irb->current_basic_block);
+ }
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
@@ -5203,8 +5207,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
case BuiltinFnIdNewStackCall:
{
- if (node->data.fn_call_expr.params.length == 0) {
- add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
+ if (node->data.fn_call_expr.params.length < 2) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least 2 arguments, found %" ZIG_PRI_usize,
+ node->data.fn_call_expr.params.length));
return irb->codegen->invalid_instruction;
}
@@ -5232,6 +5238,50 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
FnInlineAuto, false, new_stack, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
+ case BuiltinFnIdAsyncCall:
+ {
+ size_t arg_offset = 3;
+ if (node->data.fn_call_expr.params.length < arg_offset) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
+ arg_offset, node->data.fn_call_expr.params.length));
+ return irb->codegen->invalid_instruction;
+ }
+
+ AstNode *bytes_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope);
+ if (bytes == irb->codegen->invalid_instruction)
+ return bytes;
+
+ AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
+ if (ret_ptr == irb->codegen->invalid_instruction)
+ return ret_ptr;
+
+ AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2);
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ size_t arg_count = node->data.fn_call_expr.params.length - arg_offset;
+
+ // last "arg" is return pointer
+ IrInstruction **args = allocate(arg_count + 1);
+
+ for (size_t i = 0; i < arg_count; i += 1) {
+ AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset);
+ IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
+ if (arg == irb->codegen->invalid_instruction)
+ return arg;
+ args[i] = arg;
+ }
+
+ args[arg_count] = ret_ptr;
+
+ IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, true, bytes, result_loc);
+ return ir_lval_wrap(irb, scope, call, lval, result_loc);
+ }
case BuiltinFnIdTypeId:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -14817,11 +14867,31 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
}
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
- ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count)
+ ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
+ IrInstruction *casted_new_stack)
{
if (fn_entry == nullptr) {
- ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
- return ira->codegen->invalid_instruction;
+ if (call_instruction->new_stack == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ira->codegen->invalid_instruction;
+ }
+ // this is an @asyncCall
+
+ if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
+ ir_add_error(ira, fn_ref,
+ buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child;
+ if (type_is_invalid(ret_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type);
+
+ IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref,
+ arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type);
+ return &call_gen->base;
}
ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry);
@@ -15559,13 +15629,13 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
- zig_panic("TODO async call");
+ IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
+ nullptr, casted_args, call_param_count, casted_new_stack);
+ return ir_finish_anal(ira, result);
}
- if (!call_instruction->is_async) {
- if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
- parent_fn_entry->inferred_async_node = fn_ref->source_node;
- }
+ if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
}
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
@@ -15645,18 +15715,16 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
return ira->codegen->invalid_instruction;
}
- if (!call_instruction->is_async) {
- if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
- parent_fn_entry->inferred_async_node = fn_ref->source_node;
- }
- }
-
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
- casted_args, call_param_count);
+ casted_args, call_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
+ if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ }
+
IrInstruction *result_loc;
if (handle_is_ptr(return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 3245632e37..2941cadcf5 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,18 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "non async function pointer passed to @asyncCall",
+ \\export fn entry() void {
+ \\ var ptr = afunc;
+ \\ var bytes: [100]u8 = undefined;
+ \\ _ = @asyncCall(&bytes, {}, ptr);
+ \\}
+ \\fn afunc() void { }
+ ,
+ "tmp.zig:4:32: error: expected async function, found 'fn() void'",
+ );
+
cases.add(
"runtime-known async function called",
\\export fn entry() void {
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index 43cf0856c3..ac9037caae 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,6 +1,20 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompareOutputContext) void {
+ cases.addRuntimeSafety("@asyncCall with too small a frame",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ var bytes: [1]u8 = undefined;
+ \\ var ptr = other;
+ \\ var frame = @asyncCall(&bytes, {}, ptr);
+ \\}
+ \\async fn other() void {
+ \\ suspend;
+ \\}
+ );
+
cases.addRuntimeSafety("resuming a function which is awaiting a frame",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
@@ -17,6 +31,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ suspend;
\\}
);
+
cases.addRuntimeSafety("resuming a function which is awaiting a call",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 2b82dce707..511568a898 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -260,22 +260,42 @@ test "async function with dot syntax" {
expect(S.y == 2);
}
-//test "async fn pointer in a struct field" {
-// var data: i32 = 1;
-// const Foo = struct {
-// bar: async fn (*i32) void,
-// };
-// var foo = Foo{ .bar = simpleAsyncFn2 };
-// const p = async foo.bar(&data);
-// expect(data == 2);
-// resume p;
-// expect(data == 4);
-//}
-//async fn simpleAsyncFn2(y: *i32) void {
-// defer y.* += 2;
-// y.* += 1;
-// suspend;
-//}
+test "async fn pointer in a struct field" {
+ var data: i32 = 1;
+ const Foo = struct {
+ bar: async fn (*i32) void,
+ };
+ var foo = Foo{ .bar = simpleAsyncFn2 };
+ var bytes: [64]u8 = undefined;
+ const p = @asyncCall(&bytes, {}, foo.bar, &data);
+ comptime expect(@typeOf(p) == anyframe->void);
+ expect(data == 2);
+ resume p;
+ expect(data == 4);
+}
+async fn simpleAsyncFn2(y: *i32) void {
+ defer y.* += 2;
+ y.* += 1;
+ suspend;
+}
+
+test "@asyncCall with return type" {
+ const Foo = struct {
+ bar: async fn () i32,
+
+ async fn afunc() i32 {
+ suspend;
+ return 1234;
+ }
+ };
+ var foo = Foo{ .bar = Foo.afunc };
+ var bytes: [64]u8 = undefined;
+ var aresult: i32 = 0;
+ const frame = @asyncCall(&bytes, &aresult, foo.bar);
+ expect(aresult == 0);
+ resume frame;
+ expect(aresult == 1234);
+}
//test "async fn with inferred error set" {
// const p = async failing();
--
cgit v1.2.3
From fa30ebfbe5949fc63aee9853d66932facfd1d168 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 4 Aug 2019 18:24:10 -0400
Subject: suspension points inside branching control flow
---
BRANCH_TODO | 1 -
src/all_types.hpp | 15 +++-
src/analyze.cpp | 35 ++++++--
src/codegen.cpp | 163 +++++++++++++++++++++---------------
src/ir.cpp | 2 +-
test/stage1/behavior/coroutines.zig | 26 ++++++
6 files changed, 160 insertions(+), 82 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 62fee38371..f76252d935 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,3 @@
- * suspension points inside branching control flow
* go over the commented out tests
* error return tracing
* compile error for error: expected anyframe->T, found 'anyframe'
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 87db8edf8d..8e12e720ef 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1716,6 +1716,9 @@ struct CodeGen {
ZigLLVMDIFile *dummy_di_file;
LLVMValueRef cur_ret_ptr;
LLVMValueRef cur_fn_val;
+ LLVMValueRef cur_async_switch_instr;
+ LLVMValueRef cur_async_resume_index_ptr;
+ LLVMValueRef cur_async_awaiter_ptr;
LLVMValueRef cur_err_ret_trace_val_arg;
LLVMValueRef cur_err_ret_trace_val_stack;
LLVMValueRef memcpy_fn_val;
@@ -2166,8 +2169,8 @@ struct IrBasicBlock {
size_t ref_count;
// index into the basic block list
size_t index;
- // for async functions, the split function which corresponds to this block
- LLVMValueRef split_llvm_fn;
+ // for async functions, the resume index which corresponds to this block
+ size_t resume_index;
LLVMBasicBlockRef llvm_block;
LLVMBasicBlockRef llvm_exit_block;
// The instruction that referenced this basic block and caused us to
@@ -3703,8 +3706,12 @@ static const size_t err_union_payload_index = 1;
// label (grep this): [coro_frame_struct_layout]
static const size_t coro_fn_ptr_index = 0;
-static const size_t coro_awaiter_index = 1;
-static const size_t coro_arg_start = 2;
+static const size_t coro_resume_index = 1;
+static const size_t coro_awaiter_index = 2;
+static const size_t coro_arg_start = 3;
+
+// one for the Entry block, resume blocks are indexed after that.
+static const size_t coro_extra_resume_block_count = 1;
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 009cb2de12..e7480c579b 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5215,6 +5215,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_names.append("fn_ptr");
field_types.append(fn_type);
+ field_names.append("resume_index");
+ field_types.append(g->builtin_types.entry_usize);
+
field_names.append("awaiter");
field_types.append(g->builtin_types.entry_usize);
@@ -7532,9 +7535,10 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
// label (grep this): [coro_frame_struct_layout]
LLVMTypeRef field_types[] = {
ptr_result_type, // fn_ptr
+ usize_type_ref, // resume_index
usize_type_ref, // awaiter
};
- LLVMStructSetBody(frame_header_type, field_types, 2, false);
+ LLVMStructSetBody(frame_header_type, field_types, 3, false);
ZigLLVMDIType *di_element_types[] = {
ZigLLVMCreateDebugMemberType(g->dbuilder,
@@ -7545,12 +7549,19 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0),
ZigLLVM_DIFlags_Zero, usize_di_type),
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types[1]),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
ZigLLVM_DIFlags_Zero, usize_di_type),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
};
ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
compile_unit_scope, buf_ptr(name),
@@ -7558,7 +7569,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
ZigLLVM_DIFlags_Zero,
- nullptr, di_element_types, 2, 0, nullptr, "");
+ nullptr, di_element_types, 3, 0, nullptr, "");
ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
} else {
@@ -7566,11 +7577,12 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
// label (grep this): [coro_frame_struct_layout]
LLVMTypeRef field_types[] = {
LLVMPointerType(fn_type, 0), // fn_ptr
+ usize_type_ref, // resume_index
usize_type_ref, // awaiter
get_llvm_type(g, ptr_result_type), // result_ptr
get_llvm_type(g, result_type), // result
};
- LLVMStructSetBody(frame_header_type, field_types, 4, false);
+ LLVMStructSetBody(frame_header_type, field_types, 5, false);
ZigLLVMDIType *di_element_types[] = {
ZigLLVMCreateDebugMemberType(g->dbuilder,
@@ -7588,18 +7600,25 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
ZigLLVM_DIFlags_Zero, usize_di_type),
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types[2]),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)),
+ ZigLLVM_DIFlags_Zero, usize_di_type),
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types[3]),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)),
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types[4]),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[4]),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 4),
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)),
};
ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
@@ -7608,7 +7627,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
ZigLLVM_DIFlags_Zero,
- nullptr, di_element_types, 2, 0, nullptr, "");
+ nullptr, di_element_types, 5, 0, nullptr, "");
ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index ebdd9e6120..1943859d41 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -1997,7 +1997,9 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
return call_instruction;
}
-static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
+static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable,
+ IrInstructionReturn *return_instruction)
+{
if (fn_is_async(g->cur_fn)) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef locals_ptr = g->cur_ret_ptr;
@@ -2006,12 +2008,10 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr;
if (ir_want_runtime_safety(g, &return_instruction->base)) {
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, "");
- LLVMValueRef new_resume_fn = g->cur_fn->resume_blocks.last()->split_llvm_fn;
- LLVMBuildStore(g->builder, new_resume_fn, resume_index_ptr);
+ LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
+ LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
}
- LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_awaiter_index, "");
LLVMValueRef result_ptr_as_usize;
if (ret_type_has_bits) {
LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, "");
@@ -2029,8 +2029,8 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
}
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr,
- all_ones, LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded);
+ LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr,
+ all_ones, LLVMAtomicOrderingMonotonic, g->is_single_threaded);
LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
@@ -3453,7 +3453,6 @@ static void render_async_spills(CodeGen *g) {
}
static void render_async_var_decls(CodeGen *g, Scope *scope) {
- render_async_spills(g);
for (;;) {
switch (scope->id) {
case ScopeIdCImport:
@@ -3573,6 +3572,14 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_fn_ptr_index, "");
+ LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val,
+ LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), "");
+ LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr);
+
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index, "");
+ LLVMBuildStore(g->builder, zero, resume_index_ptr);
+
if (prefix_arg_err_ret_stack) {
zig_panic("TODO");
}
@@ -3652,23 +3659,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
return nullptr;
} else if (callee_is_async) {
ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
- LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn);
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, "");
- LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr);
+
+ LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume");
+ size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count;
+ g->cur_fn->resume_blocks.append(nullptr);
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb);
+
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)};
LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
ZigLLVMSetTailCall(call_inst);
LLVMBuildRetVoid(g->builder);
- g->cur_fn_val = split_llvm_fn;
- g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0);
- LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "CallResume");
LLVMPositionBuilderAtEnd(g->builder, call_bb);
-
if (ir_want_runtime_safety(g, &instruction->base)) {
- LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume");
- LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume");
- LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1);
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
+ LLVMValueRef arg_val = LLVMGetParam(g->cur_fn_val, 1);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, "");
LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block);
@@ -5144,10 +5152,9 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab
static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable,
IrInstructionSuspendBegin *instruction)
{
- LLVMValueRef locals_ptr = g->cur_ret_ptr;
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_fn_ptr_index, "");
- LLVMValueRef new_fn_ptr = instruction->resume_block->split_llvm_fn;
- LLVMBuildStore(g->builder, new_fn_ptr, fn_ptr_ptr);
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef new_resume_index = LLVMConstInt(usize_type_ref, instruction->resume_block->resume_index, false);
+ LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
return nullptr;
}
@@ -5159,19 +5166,22 @@ static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
}
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
ZigType *result_type = instruction->base.value.type;
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
// Prepare to be suspended
- LLVMValueRef split_llvm_fn = make_fn_llvm_value(g, g->cur_fn);
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_fn_ptr_index, "");
- LLVMBuildStore(g->builder, split_llvm_fn, fn_ptr_ptr);
+ LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume");
+ size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count;
+ g->cur_fn->resume_blocks.append(nullptr);
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
// At this point resuming the function will do the correct thing.
// This code is as if it is running inside the suspend block.
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
// caller's own frame pointer
LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
@@ -5184,18 +5194,20 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
result_ptr_as_usize = LLVMGetUndef(usize_type_ref);
}
LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
- LLVMAtomicOrderingSequentiallyConsistent, g->is_single_threaded);
+ LLVMAtomicOrderingMonotonic, g->is_single_threaded);
LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
- LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2);
+ LLVMBasicBlockRef predecessor_bb = LLVMGetInsertBlock(g->builder);
LLVMAddCase(switch_instr, zero, complete_suspend_block);
- LLVMAddCase(switch_instr, all_ones, early_return_block);
+
+ // Early return: The async function has already completed. No need to suspend.
+ LLVMAddCase(switch_instr, all_ones, resume_bb);
// We discovered that another awaiter was already here.
LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
@@ -5205,25 +5217,18 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
LLVMBuildRetVoid(g->builder);
- // The async function has already completed. So we use a tail call to resume ourselves.
- LLVMPositionBuilderAtEnd(g->builder, early_return_block);
- LLVMValueRef args[] = {g->cur_ret_ptr, result_ptr_as_usize};
- LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, split_llvm_fn, args, 2, LLVMFastCallConv,
- ZigLLVM_FnInlineAuto, "");
- ZigLLVMSetTailCall(call_inst);
- LLVMBuildRetVoid(g->builder);
-
- g->cur_fn_val = split_llvm_fn;
- g->cur_ret_ptr = LLVMGetParam(split_llvm_fn, 0);
- LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(split_llvm_fn, "AwaitResume");
- LLVMPositionBuilderAtEnd(g->builder, call_bb);
+ LLVMPositionBuilderAtEnd(g->builder, resume_bb);
+ // We either got here from Entry (function call) or from the switch above
+ LLVMValueRef spilled_result_ptr = LLVMBuildPhi(g->builder, usize_type_ref, "");
+ LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), result_ptr_as_usize };
+ LLVMBasicBlockRef incoming_blocks[] = { g->cur_fn->preamble_llvm_block, predecessor_bb };
+ LLVMAddIncoming(spilled_result_ptr, incoming_values, incoming_blocks, 2);
if (ir_want_runtime_safety(g, &instruction->base)) {
- LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "BadResume");
- LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(split_llvm_fn, "OkResume");
- LLVMValueRef arg_val = LLVMGetParam(split_llvm_fn, 1);
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, "");
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, spilled_result_ptr, all_ones, "");
LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block);
LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
@@ -5235,7 +5240,6 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
render_async_var_decls(g, instruction->base.scope);
if (type_has_bits(result_type)) {
- LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1);
LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr,
get_llvm_type(g, ptr_result_type), "");
return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type);
@@ -5547,13 +5551,18 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
IrExecutable *executable = &fn_entry->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+
+ if (fn_is_async(fn_entry)) {
+ IrBasicBlock *entry_block = executable->basic_block_list.at(0);
+ LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
+ render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
+ }
+
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
assert(current_block->llvm_block);
LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block);
- if (current_block->split_llvm_fn != nullptr) {
- g->cur_fn_val = current_block->split_llvm_fn;
- g->cur_ret_ptr = LLVMGetParam(g->cur_fn_val, 0);
+ if (current_block->resume_index != 0) {
render_async_var_decls(g, current_block->instruction_list.at(0)->scope);
}
for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) {
@@ -6416,17 +6425,19 @@ static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ LLVMBasicBlockRef first_bb = nullptr;
+ if (fn_is_async(fn)) {
+ first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
+ fn->preamble_llvm_block = first_bb;
+ }
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *bb = executable->basic_block_list.at(block_i);
- if (bb->split_llvm_fn != nullptr) {
- assert(bb->split_llvm_fn == reinterpret_cast(0x1));
- fn_val = make_fn_llvm_value(g, fn);
- bb->split_llvm_fn = fn_val;
- }
bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint);
}
- IrBasicBlock *entry_bb = executable->basic_block_list.at(0);
- LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block);
+ if (first_bb == nullptr) {
+ first_bb = executable->basic_block_list.at(0)->llvm_block;
+ }
+ LLVMPositionBuilderAtEnd(g->builder, first_bb);
}
static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
@@ -6636,9 +6647,7 @@ static void do_code_gen(CodeGen *g) {
g->cur_err_ret_trace_val_stack = nullptr;
}
- if (is_async) {
- render_async_spills(g);
- } else {
+ if (!is_async) {
// allocate temporary stack data
for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
@@ -6752,17 +6761,35 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
- if (ir_want_runtime_safety_scope(g, fn_table_entry->child_scope)) {
- IrBasicBlock *bad_resume_block = allocate(1);
- bad_resume_block->name_hint = "BadResume";
- bad_resume_block->split_llvm_fn = make_fn_llvm_value(g, fn_table_entry);
-
- LLVMBasicBlockRef llvm_block = LLVMAppendBasicBlock(bad_resume_block->split_llvm_fn, "BadResume");
- LLVMPositionBuilderAtEnd(g->builder, llvm_block);
- gen_safety_crash(g, PanicMsgIdBadResume);
+ if (!g->strip_debug_symbols) {
+ AstNode *source_node = fn_table_entry->proto_node;
+ ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
+ (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
+ }
+ IrExecutable *executable = &fn_table_entry->analyzed_executable;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
- fn_table_entry->resume_blocks.append(bad_resume_block);
+ LLVMPositionBuilderAtEnd(g->builder, fn_table_entry->preamble_llvm_block);
+ render_async_spills(g);
+ g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, "");
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
+ g->cur_async_resume_index_ptr = resume_index_ptr;
+ LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block,
+ fn_table_entry->resume_blocks.length + coro_extra_resume_block_count);
+ g->cur_async_switch_instr = switch_instr;
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block);
+
+ for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
+ IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i);
+ LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false);
+ LLVMAddCase(switch_instr, case_value, resume_block->llvm_block);
}
+
} else {
// create debug variable declarations for parameters
// rely on the first variables in the variable_list being parameters.
diff --git a/src/ir.cpp b/src/ir.cpp
index fbf9da9656..c81000573c 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -24474,7 +24474,7 @@ static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstru
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
- new_bb->split_llvm_fn = reinterpret_cast(0x1);
+ new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count;
fn_entry->resume_blocks.append(new_bb);
if (fn_entry->inferred_async_node == nullptr) {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 511568a898..ccf9485b51 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -419,3 +419,29 @@ test "async function call return value" {
};
S.doTheTest();
}
+
+test "suspension points inside branching control flow" {
+ const S = struct {
+ var global_result: i32 = 10;
+
+ fn doTheTest() void {
+ expect(10 == global_result);
+ var frame = async func(true);
+ expect(10 == global_result);
+ resume frame;
+ expect(11 == global_result);
+ resume frame;
+ expect(12 == global_result);
+ resume frame;
+ expect(13 == global_result);
+ }
+
+ fn func(b: bool) void {
+ while (b) {
+ suspend;
+ global_result += 1;
+ }
+ }
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From fbf21efd24bf812e0fd52a5917708a4c45f05b5e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 4 Aug 2019 18:57:59 -0400
Subject: simpler, less memory intensive suspend/resume implementation
---
src/all_types.hpp | 16 ++++--------
src/codegen.cpp | 54 +++++++++++++++++++---------------------
src/ir.cpp | 74 +++++++++++++++++++++----------------------------------
src/ir_print.cpp | 10 +++-----
4 files changed, 62 insertions(+), 92 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 653e6b6254..7c903677a8 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1386,7 +1386,6 @@ struct ZigFn {
ZigList alloca_gen_list;
ZigList variable_list;
- ZigList resume_blocks;
Buf *section_name;
AstNode *set_alignstack_node;
@@ -1719,6 +1718,7 @@ struct CodeGen {
LLVMValueRef cur_async_resume_index_ptr;
LLVMValueRef cur_async_awaiter_ptr;
LLVMBasicBlockRef cur_preamble_llvm_block;
+ size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
LLVMValueRef cur_err_ret_trace_val_stack;
LLVMValueRef memcpy_fn_val;
@@ -2114,7 +2114,6 @@ struct ScopeRuntime {
struct ScopeSuspend {
Scope base;
- IrBasicBlock *resume_block;
bool reported_err;
};
@@ -2169,8 +2168,6 @@ struct IrBasicBlock {
size_t ref_count;
// index into the basic block list
size_t index;
- // for async functions, the resume index which corresponds to this block
- size_t resume_index;
LLVMBasicBlockRef llvm_block;
LLVMBasicBlockRef llvm_exit_block;
// The instruction that referenced this basic block and caused us to
@@ -2354,7 +2351,7 @@ enum IrInstructionId {
IrInstructionIdPtrOfArrayToSlice,
IrInstructionIdUnionInitNamedField,
IrInstructionIdSuspendBegin,
- IrInstructionIdSuspendBr,
+ IrInstructionIdSuspendFinish,
IrInstructionIdAwait,
IrInstructionIdCoroResume,
};
@@ -3600,13 +3597,13 @@ struct IrInstructionPtrOfArrayToSlice {
struct IrInstructionSuspendBegin {
IrInstruction base;
- IrBasicBlock *resume_block;
+ LLVMBasicBlockRef resume_bb;
};
-struct IrInstructionSuspendBr {
+struct IrInstructionSuspendFinish {
IrInstruction base;
- IrBasicBlock *resume_block;
+ IrInstructionSuspendBegin *begin;
};
struct IrInstructionAwait {
@@ -3710,9 +3707,6 @@ static const size_t coro_resume_index = 1;
static const size_t coro_awaiter_index = 2;
static const size_t coro_arg_start = 3;
-// one for the Entry block, resume blocks are indexed after that.
-static const size_t coro_extra_resume_block_count = 1;
-
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
static const size_t stack_trace_ptr_count = 32;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 13b3ab5073..1b9019ad08 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -3661,8 +3661,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume");
- size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count;
- g->cur_fn->resume_blocks.append(nullptr);
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb);
@@ -5153,15 +5153,22 @@ static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable
IrInstructionSuspendBegin *instruction)
{
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- LLVMValueRef new_resume_index = LLVMConstInt(usize_type_ref, instruction->resume_block->resume_index, false);
- LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
+ instruction->resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "SuspendResume");
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, instruction->resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
return nullptr;
}
-static LLVMValueRef ir_render_suspend_br(CodeGen *g, IrExecutable *executable,
- IrInstructionSuspendBr *instruction)
+static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendFinish *instruction)
{
LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, instruction->begin->resume_bb);
+ render_async_var_decls(g, instruction->base.scope);
return nullptr;
}
@@ -5173,8 +5180,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Prepare to be suspended
LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume");
- size_t new_block_index = g->cur_fn->resume_blocks.length + coro_extra_resume_block_count;
- g->cur_fn->resume_blocks.append(nullptr);
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
@@ -5534,8 +5541,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction);
case IrInstructionIdSuspendBegin:
return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction);
- case IrInstructionIdSuspendBr:
- return ir_render_suspend_br(g, executable, (IrInstructionSuspendBr *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_render_suspend_finish(g, executable, (IrInstructionSuspendFinish *)instruction);
case IrInstructionIdCoroResume:
return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
case IrInstructionIdFrameSizeGen:
@@ -5552,19 +5559,10 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
IrExecutable *executable = &fn_entry->analyzed_executable;
assert(executable->basic_block_list.length > 0);
- if (fn_is_async(fn_entry)) {
- IrBasicBlock *entry_block = executable->basic_block_list.at(0);
- LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
- render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
- }
-
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
assert(current_block->llvm_block);
LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block);
- if (current_block->resume_index != 0) {
- render_async_var_decls(g, current_block->instruction_list.at(0)->scope);
- }
for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) {
IrInstruction *instruction = current_block->instruction_list.at(instr_i);
if (instruction->ref_count == 0 && !ir_has_side_effects(instruction))
@@ -6757,6 +6755,8 @@ static void do_code_gen(CodeGen *g) {
}
if (is_async) {
+ g->cur_resume_block_count = 0;
+
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
@@ -6777,19 +6777,15 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
g->cur_async_resume_index_ptr = resume_index_ptr;
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
- LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block,
- fn_table_entry->resume_blocks.length + coro_extra_resume_block_count);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
g->cur_async_switch_instr = switch_instr;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
- LLVMAddCase(switch_instr, zero, executable->basic_block_list.at(0)->llvm_block);
-
- for (size_t resume_i = 0; resume_i < fn_table_entry->resume_blocks.length; resume_i += 1) {
- IrBasicBlock *resume_block = fn_table_entry->resume_blocks.at(resume_i);
- LLVMValueRef case_value = LLVMConstInt(usize_type_ref, resume_block->resume_index, false);
- LLVMAddCase(switch_instr, case_value, resume_block->llvm_block);
- }
-
+ IrBasicBlock *entry_block = executable->basic_block_list.at(0);
+ LLVMAddCase(switch_instr, zero, entry_block->llvm_block);
+ g->cur_resume_block_count += 1;
+ LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
+ render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
} else {
// create debug variable declarations for parameters
// rely on the first variables in the variable_list being parameters.
diff --git a/src/ir.cpp b/src/ir.cpp
index c81000573c..45a48d6f50 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1049,8 +1049,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *)
return IrInstructionIdSuspendBegin;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBr *) {
- return IrInstructionIdSuspendBr;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *) {
+ return IrInstructionIdSuspendFinish;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) {
@@ -3260,25 +3260,21 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrBasicBlock *resume_block)
-{
+static IrInstructionSuspendBegin *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionSuspendBegin *instruction = ir_build_instruction(irb, scope, source_node);
instruction->base.value.type = irb->codegen->builtin_types.entry_void;
- instruction->resume_block = resume_block;
-
- ir_ref_bb(resume_block);
- return &instruction->base;
+ return instruction;
}
-static IrInstruction *ir_build_suspend_br(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrBasicBlock *resume_block)
+static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSuspendBegin *begin)
{
- IrInstructionSuspendBr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->resume_block = resume_block;
+ IrInstructionSuspendFinish *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->begin = begin;
- ir_ref_bb(resume_block);
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
return &instruction->base;
}
@@ -7890,22 +7886,15 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
return irb->codegen->invalid_instruction;
}
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
-
- ir_build_suspend_begin(irb, parent_scope, node, resume_block);
+ IrInstructionSuspendBegin *begin = ir_build_suspend_begin(irb, parent_scope, node);
if (node->data.suspend.block != nullptr) {
- Scope *child_scope;
ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
- suspend_scope->resume_block = resume_block;
- child_scope = &suspend_scope->base;
+ Scope *child_scope = &suspend_scope->base;
IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
}
- IrInstruction *result = ir_build_suspend_br(irb, parent_scope, node, resume_block);
- result->value.type = irb->codegen->builtin_types.entry_void;
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- return result;
+ return ir_build_suspend_finish(irb, parent_scope, node, begin);
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -24458,35 +24447,28 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i
}
static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) {
- IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, instruction->resume_block, &instruction->base);
- if (new_bb == nullptr)
- return ir_unreach_error(ira);
- return ir_build_suspend_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, new_bb);
+ IrInstructionSuspendBegin *result = ir_build_suspend_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node);
+ return &result->base;
}
-static IrInstruction *ir_analyze_instruction_suspend_br(IrAnalyze *ira, IrInstructionSuspendBr *instruction) {
- IrBasicBlock *old_dest_block = instruction->resume_block;
-
- IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &instruction->base);
- if (new_bb == nullptr)
- return ir_unreach_error(ira);
+static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
+ IrInstructionSuspendFinish *instruction)
+{
+ IrInstruction *begin_base = instruction->begin->base.child;
+ if (type_is_invalid(begin_base->value.type))
+ return ira->codegen->invalid_instruction;
+ ir_assert(begin_base->id == IrInstructionIdSuspendBegin, &instruction->base);
+ IrInstructionSuspendBegin *begin = reinterpret_cast(begin_base);
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
- new_bb->resume_index = fn_entry->resume_blocks.length + coro_extra_resume_block_count;
-
- fn_entry->resume_blocks.append(new_bb);
if (fn_entry->inferred_async_node == nullptr) {
fn_entry->inferred_async_node = instruction->base.source_node;
}
- ir_push_resume_block(ira, old_dest_block);
-
- IrInstruction *result = ir_build_suspend_br(&ira->new_irb,
- instruction->base.scope, instruction->base.source_node, new_bb);
- result->value.type = ira->codegen->builtin_types.entry_unreachable;
- return ir_finish_anal(ira, result);
+ return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
}
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) {
@@ -24847,8 +24829,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction);
case IrInstructionIdSuspendBegin:
return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction);
- case IrInstructionIdSuspendBr:
- return ir_analyze_instruction_suspend_br(ira, (IrInstructionSuspendBr *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction);
case IrInstructionIdCoroResume:
return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
case IrInstructionIdAwait:
@@ -24986,7 +24968,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdVectorToArray:
case IrInstructionIdResetResult:
case IrInstructionIdSuspendBegin:
- case IrInstructionIdSuspendBr:
+ case IrInstructionIdSuspendFinish:
case IrInstructionIdCoroResume:
case IrInstructionIdAwait:
return true;
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 46d2906d30..549da9de19 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1534,10 +1534,8 @@ static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *inst
fprintf(irp->f, "@suspendBegin()");
}
-static void ir_print_suspend_br(IrPrint *irp, IrInstructionSuspendBr *instruction) {
- fprintf(irp->f, "@suspendBr(");
- ir_print_other_block(irp, instruction->resume_block);
- fprintf(irp->f, ")");
+static void ir_print_suspend_finish(IrPrint *irp, IrInstructionSuspendFinish *instruction) {
+ fprintf(irp->f, "@suspendFinish()");
}
static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) {
@@ -2025,8 +2023,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdSuspendBegin:
ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction);
break;
- case IrInstructionIdSuspendBr:
- ir_print_suspend_br(irp, (IrInstructionSuspendBr *)instruction);
+ case IrInstructionIdSuspendFinish:
+ ir_print_suspend_finish(irp, (IrInstructionSuspendFinish *)instruction);
break;
case IrInstructionIdCoroResume:
ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
--
cgit v1.2.3
From 0d8c9fcb18b399bd2afedbcbcc7736326ef92297 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 5 Aug 2019 00:41:49 -0400
Subject: support async functions with inferred error sets
---
BRANCH_TODO | 2 +
src/all_types.hpp | 2 +-
src/analyze.cpp | 226 ++++++++++++++++++++----------------
src/codegen.cpp | 141 +++++++++++++---------
src/codegen.hpp | 1 +
src/ir.cpp | 9 +-
test/stage1/behavior/coroutines.zig | 77 ++++++++----
7 files changed, 269 insertions(+), 189 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index f76252d935..fcd98f0f71 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,3 +1,4 @@
+ * delete IrInstructionMarkErrRetTracePtr
* go over the commented out tests
* error return tracing
* compile error for error: expected anyframe->T, found 'anyframe'
@@ -32,3 +33,4 @@
- resume
- anyframe, anyframe->T
* safety for double await
+ * call graph analysis to have fewer stack trace frames
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 7c903677a8..1ea4954dec 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -3705,7 +3705,7 @@ static const size_t err_union_payload_index = 1;
static const size_t coro_fn_ptr_index = 0;
static const size_t coro_resume_index = 1;
static const size_t coro_awaiter_index = 2;
-static const size_t coro_arg_start = 3;
+static const size_t coro_ret_start = 3;
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e7480c579b..e1b386d9af 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7,6 +7,7 @@
#include "analyze.hpp"
#include "ast_render.hpp"
+#include "codegen.hpp"
#include "config.h"
#include "error.hpp"
#include "ir.hpp"
@@ -5212,23 +5213,34 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
ZigList field_types = {};
ZigList field_names = {};
- field_names.append("fn_ptr");
+ field_names.append("@fn_ptr");
field_types.append(fn_type);
- field_names.append("resume_index");
+ field_names.append("@resume_index");
field_types.append(g->builtin_types.entry_usize);
- field_names.append("awaiter");
+ field_names.append("@awaiter");
field_types.append(g->builtin_types.entry_usize);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
- field_names.append("result_ptr");
+ field_names.append("@ptr_result");
field_types.append(ptr_return_type);
- field_names.append("result");
+ field_names.append("@result");
field_types.append(fn_type_id->return_type);
+ if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) {
+ field_names.append("@ptr_stack_trace");
+ field_types.append(get_ptr_to_stack_trace_type(g));
+
+ field_names.append("@stack_trace");
+ field_types.append(g->stack_trace_type);
+
+ field_names.append("@instruction_addresses");
+ field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
+ }
+
for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
@@ -5237,7 +5249,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
if (param_decl_node && !is_var_args) {
param_name = param_decl_node->data.param_decl.name;
} else {
- param_name = buf_sprintf("arg%" ZIG_PRI_usize "", arg_i);
+ param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i);
}
ZigType *param_type = param_info->type;
field_names.append(buf_ptr(param_name));
@@ -5260,7 +5272,13 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
continue;
}
}
- field_names.append(instruction->name_hint);
+ const char *name;
+ if (*instruction->name_hint == 0) {
+ name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
+ } else {
+ name = instruction->name_hint;
+ }
+ field_names.append(name);
field_types.append(child_type);
}
@@ -7369,7 +7387,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
}
fn_type->data.fn.gen_return_type = gen_return_type;
- if (prefix_arg_error_return_trace) {
+ if (prefix_arg_error_return_trace && !is_async) {
ZigType *gen_type = get_ptr_to_stack_trace_type(g);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
@@ -7527,110 +7545,112 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
ZigType *result_type = any_frame_type->data.any_frame.result_type;
- if (result_type == nullptr || !type_has_bits(result_type)) {
- LLVMTypeRef ptr_result_type = LLVMPointerType(fn_type, 0);
- if (result_type == nullptr) {
- g->anyframe_fn_type = ptr_result_type;
- }
- // label (grep this): [coro_frame_struct_layout]
- LLVMTypeRef field_types[] = {
- ptr_result_type, // fn_ptr
- usize_type_ref, // resume_index
- usize_type_ref, // awaiter
- };
- LLVMStructSetBody(frame_header_type, field_types, 3, false);
+ ZigType *ptr_result_type = (result_type == nullptr) ? nullptr : get_pointer_to_type(g, result_type, false);
+ LLVMTypeRef ptr_fn_llvm_type = LLVMPointerType(fn_type, 0);
+ if (result_type == nullptr) {
+ g->anyframe_fn_type = ptr_fn_llvm_type;
+ }
- ZigLLVMDIType *di_element_types[] = {
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0),
- ZigLLVM_DIFlags_Zero, usize_di_type),
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
- ZigLLVM_DIFlags_Zero, usize_di_type),
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2),
- ZigLLVM_DIFlags_Zero, usize_di_type),
- };
- ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
- compile_unit_scope, buf_ptr(name),
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
- ZigLLVM_DIFlags_Zero,
- nullptr, di_element_types, 3, 0, nullptr, "");
+ ZigList field_types = {};
+ ZigList di_element_types = {};
- ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
- } else {
- ZigType *ptr_result_type = get_pointer_to_type(g, result_type, false);
- // label (grep this): [coro_frame_struct_layout]
- LLVMTypeRef field_types[] = {
- LLVMPointerType(fn_type, 0), // fn_ptr
- usize_type_ref, // resume_index
- usize_type_ref, // awaiter
- get_llvm_type(g, ptr_result_type), // result_ptr
- get_llvm_type(g, result_type), // result
- };
- LLVMStructSetBody(frame_header_type, field_types, 5, false);
+ // label (grep this): [coro_frame_struct_layout]
+ field_types.append(ptr_fn_llvm_type); // fn_ptr
+ field_types.append(usize_type_ref); // resume_index
+ field_types.append(usize_type_ref); // awaiter
- ZigLLVMDIType *di_element_types[] = {
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[0]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[0]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 0),
- ZigLLVM_DIFlags_Zero, usize_di_type),
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[1]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[1]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 1),
- ZigLLVM_DIFlags_Zero, usize_di_type),
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[2]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[2]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 2),
- ZigLLVM_DIFlags_Zero, usize_di_type),
+ bool have_result_type = result_type != nullptr && type_has_bits(result_type);
+ if (have_result_type) {
+ field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result
+ field_types.append(get_llvm_type(g, result_type)); // result
+ if (codegen_fn_has_err_ret_tracing(g, result_type)) {
+ field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace
+ field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace
+ field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses
+ }
+ }
+ LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
+
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+
+ if (have_result_type) {
+ di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_result",
di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[3]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[3]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 3),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)),
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types[4]),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types[4]),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, 4),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)),
- };
- ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
- compile_unit_scope, buf_ptr(name),
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
- ZigLLVM_DIFlags_Zero,
- nullptr, di_element_types, 5, 0, nullptr, "");
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
+
+ if (codegen_fn_has_err_ret_tracing(g, result_type)) {
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g))));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, g->stack_trace_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "instruction_addresses",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))));
+ }
+ };
- ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
- }
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types.items, di_element_types.length, 0, nullptr, "");
+
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
}
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 4ecdfd3bdd..59289523a7 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -297,12 +297,30 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
zig_unreachable();
}
+// label (grep this): [coro_frame_struct_layout]
+static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) {
+ // [0] *ReturnType
+ // [1] ReturnType
+ uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0;
+ return coro_ret_start + return_field_count;
+}
+
+// label (grep this): [coro_frame_struct_layout]
+static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) {
+ bool have_stack_trace = g->have_err_ret_tracing && codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type);
+ // [0] *StackTrace
+ // [1] StackTrace
+ // [2] [stack_trace_ptr_count]usize
+ uint32_t trace_field_count = have_stack_trace ? 3 : 0;
+ return frame_index_trace(g, fn_type_id) + trace_field_count;
+}
+
static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) {
if (!g->have_err_ret_tracing) {
return UINT32_MAX;
}
- if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return 0;
+ if (fn_is_async(fn_table_entry)) {
+ return UINT32_MAX;
}
ZigType *fn_type = fn_table_entry->type_entry;
if (!fn_type_can_fail(&fn_type->data.fn.fn_type_id)) {
@@ -438,10 +456,6 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
} else {
LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
}
- if (cc == CallingConventionAsync) {
- addLLVMFnAttr(llvm_fn, "optnone");
- addLLVMFnAttr(llvm_fn, "noinline");
- }
bool want_cold = fn->is_cold || cc == CallingConventionCold;
if (want_cold) {
@@ -1273,8 +1287,8 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (!g->have_err_ret_tracing) {
return nullptr;
}
- if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return g->cur_err_ret_trace_val_stack;
+ if (fn_is_async(g->cur_fn)) {
+ return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
}
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
@@ -2006,7 +2020,6 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable,
{
if (fn_is_async(g->cur_fn)) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- LLVMValueRef locals_ptr = g->cur_ret_ptr;
bool ret_type_has_bits = return_instruction->value != nullptr &&
type_has_bits(return_instruction->value->value.type);
ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr;
@@ -2018,7 +2031,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable,
LLVMValueRef result_ptr_as_usize;
if (ret_type_has_bits) {
- LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, locals_ptr, coro_arg_start, "");
+ LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
if (!handle_is_ptr(ret_type)) {
// It's a scalar, so it didn't get written to the result ptr. Do that now.
@@ -3256,7 +3269,7 @@ static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable,
return nullptr;
src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node);
if (fn_is_async(g->cur_fn)) {
- LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_arg_start, "");
+ LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
return LLVMBuildLoad(g->builder, ptr_ptr, "");
}
return g->cur_ret_ptr;
@@ -3356,12 +3369,6 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
}
}
-static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) {
- return g->have_err_ret_tracing &&
- (fn_type_id->return_type->id == ZigTypeIdErrorUnion ||
- fn_type_id->return_type->id == ZigTypeIdErrorSet);
-}
-
static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, "");
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, "");
@@ -3402,7 +3409,7 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
static void render_async_spills(CodeGen *g) {
ZigType *fn_type = g->cur_fn->type_entry;
ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
- size_t async_var_index = coro_arg_start + (type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0);
+ uint32_t async_var_index = frame_index_arg(g, &fn_type->data.fn.fn_type_id);
for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
ZigVar *var = g->cur_fn->variable_list.at(var_i);
@@ -3518,11 +3525,11 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
CallingConvention cc = fn_type->data.fn.fn_type_id.cc;
bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id);
- bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id);
+ bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type);
bool is_var_args = fn_type_id->is_var_args;
ZigList gen_param_values = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
- LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef frame_result_loc;
LLVMValueRef awaiter_init_val;
LLVMValueRef ret_ptr;
@@ -3534,7 +3541,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (ret_has_bits) {
// Use the result location which is inside the frame if this is an async call.
- ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start + 1, "");
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, "");
}
} else {
LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
@@ -3564,14 +3571,49 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
ret_ptr = result_loc;
}
}
+
+ if (prefix_arg_err_ret_stack) {
+ uint32_t trace_field_index = frame_index_trace(g, fn_type_id);
+ LLVMValueRef trace_field_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index, "");
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index + 1, "");
+ LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index + 2, "");
+ LLVMBuildStore(g->builder, trace_field_ptr, trace_field_ptr_ptr);
+
+ LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
+ LLVMBuildStore(g->builder, zero, index_ptr);
+
+ LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, "");
+ LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) };
+ LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, "");
+ LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr);
+
+ LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, "");
+ LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr);
+ }
} else if (callee_is_async) {
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
- awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr,
- g->builtin_types.entry_usize->llvm_type, ""); // caller's own frame pointer
+ awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
- // Use the call instruction's result location.
- ret_ptr = result_loc;
+ if (result_loc != nullptr) {
+ // Use the call instruction's result location.
+ ret_ptr = result_loc;
+ } else {
+ // return type is a scalar, but we still need a pointer to it. Use the async fn frame.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, "");
+ }
+ }
+
+ if (prefix_arg_err_ret_stack) {
+ uint32_t trace_field_index = frame_index_trace(g, fn_type_id);
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, trace_field_index, "");
+ LLVMValueRef err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMBuildStore(g->builder, err_trace_val, trace_field_ptr);
}
+
}
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
@@ -3584,19 +3626,14 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_resume_index, "");
LLVMBuildStore(g->builder, zero, resume_index_ptr);
- if (prefix_arg_err_ret_stack) {
- zig_panic("TODO");
- }
-
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_awaiter_index, "");
LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
if (ret_has_bits) {
- LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_arg_start, "");
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start, "");
LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
}
- }
- if (!instruction->is_async && !callee_is_async) {
+ } else {
if (first_arg_ret) {
gen_param_values.append(result_loc);
}
@@ -3628,16 +3665,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef result;
if (instruction->is_async || callee_is_async) {
- size_t ret_2_or_0 = type_has_bits(fn_type->data.fn.fn_type_id.return_type) ? 2 : 0;
- size_t arg_start_i = coro_arg_start + ret_2_or_0;
+ uint32_t arg_start_i = frame_index_arg(g, &fn_type->data.fn.fn_type_id);
LLVMValueRef casted_frame;
if (instruction->new_stack != nullptr) {
// We need the frame type to be a pointer to a struct that includes the args
- // label (grep this): [coro_frame_struct_layout]
size_t field_count = arg_start_i + gen_param_values.length;
LLVMTypeRef *field_types = allocate_nonzero(field_count);
LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
+ assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_start_i);
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i));
}
@@ -5198,7 +5234,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
LLVMValueRef result_ptr_as_usize;
if (type_has_bits(result_type)) {
- LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_arg_start, "");
+ LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start, "");
LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, "");
} else {
@@ -5259,23 +5295,6 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
}
}
-static LLVMTypeRef anyframe_fn_type(CodeGen *g) {
- if (g->anyframe_fn_type != nullptr)
- return g->anyframe_fn_type;
-
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- ZigType *anyframe_type = get_any_frame_type(g, nullptr);
- LLVMTypeRef return_type = LLVMVoidType();
- LLVMTypeRef param_types[] = {
- get_llvm_type(g, anyframe_type),
- usize_type_ref,
- };
- LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, 2, false);
- g->anyframe_fn_type = LLVMPointerType(fn_type, 0);
-
- return g->anyframe_fn_type;
-}
-
static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
IrInstructionCoroResume *instruction)
{
@@ -5285,7 +5304,7 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
assert(frame_type->id == ZigTypeIdAnyFrame);
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, "");
LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
- LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, anyframe_fn_type(g), "");
+ LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, g->anyframe_fn_type, "");
LLVMValueRef arg_val = ir_want_runtime_safety(g, &instruction->base) ?
LLVMConstAllOnes(usize_type_ref) : LLVMGetUndef(usize_type_ref);
LLVMValueRef args[] = {frame, arg_val};
@@ -6636,7 +6655,8 @@ static void do_code_gen(CodeGen *g) {
}
// error return tracing setup
- bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && !is_async && !have_err_ret_trace_arg;
+ bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn &&
+ !is_async && !have_err_ret_trace_arg;
LLVMValueRef err_ret_array_val = nullptr;
if (have_err_ret_trace_stack) {
ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count);
@@ -6780,6 +6800,11 @@ static void do_code_gen(CodeGen *g) {
g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, "");
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
g->cur_async_resume_index_ptr = resume_index_ptr;
+ if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) {
+ uint32_t field_index = frame_index_trace(g, fn_type_id);
+ g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, field_index, "");
+ }
+
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
g->cur_async_switch_instr = switch_instr;
@@ -9691,3 +9716,9 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget
return g;
}
+
+bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type) {
+ return g->have_err_ret_tracing &&
+ (return_type->id == ZigTypeIdErrorUnion ||
+ return_type->id == ZigTypeIdErrorSet);
+}
diff --git a/src/codegen.hpp b/src/codegen.hpp
index cdff61a26f..0ee4ce837e 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -61,5 +61,6 @@ Buf *codegen_generate_builtin_source(CodeGen *g);
TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
+bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type);
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index 45a48d6f50..e6212fa079 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14859,11 +14859,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
IrInstruction *casted_new_stack)
{
- if (fn_entry == nullptr) {
- if (call_instruction->new_stack == nullptr) {
- ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
- return ira->codegen->invalid_instruction;
- }
+ if (casted_new_stack != nullptr) {
// this is an @asyncCall
if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
@@ -14881,6 +14877,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref,
arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type);
return &call_gen->base;
+ } else if (fn_entry == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ira->codegen->invalid_instruction;
}
ZigType *frame_type = get_coro_frame_type(ira->codegen, fn_entry);
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index ccf9485b51..c4f4cd3c99 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -161,13 +161,13 @@ fn seq(c: u8) void {
test "coroutine suspend with block" {
const p = async testSuspendBlock();
- expect(!result);
+ expect(!global_result);
resume a_promise;
- expect(result);
+ expect(global_result);
}
var a_promise: anyframe = undefined;
-var result = false;
+var global_result = false;
async fn testSuspendBlock() void {
suspend {
comptime expect(@typeOf(@frame()) == *@Frame(testSuspendBlock));
@@ -178,7 +178,7 @@ async fn testSuspendBlock() void {
// var our_handle: anyframe = @frame();
expect(a_promise == anyframe(@frame()));
- result = true;
+ global_result = true;
}
var await_a_promise: anyframe = undefined;
@@ -283,29 +283,56 @@ test "@asyncCall with return type" {
const Foo = struct {
bar: async fn () i32,
- async fn afunc() i32 {
+ var global_frame: anyframe = undefined;
+
+ async fn middle() i32 {
+ return afunc();
+ }
+
+ fn afunc() i32 {
+ global_frame = @frame();
suspend;
return 1234;
}
};
- var foo = Foo{ .bar = Foo.afunc };
- var bytes: [64]u8 = undefined;
+ var foo = Foo{ .bar = Foo.middle };
+ var bytes: [100]u8 = undefined;
var aresult: i32 = 0;
- const frame = @asyncCall(&bytes, &aresult, foo.bar);
+ _ = @asyncCall(&bytes, &aresult, foo.bar);
expect(aresult == 0);
- resume frame;
+ resume Foo.global_frame;
expect(aresult == 1234);
}
-//test "async fn with inferred error set" {
-// const p = async failing();
-// resume p;
-//}
-//
-//async fn failing() !void {
-// suspend;
-// return error.Fail;
-//}
+test "async fn with inferred error set" {
+ const S = struct {
+ var global_frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ var frame: [1]@Frame(middle) = undefined;
+ var result: anyerror!void = undefined;
+ _ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle);
+ resume global_frame;
+ std.testing.expectError(error.Fail, result);
+ }
+
+ async fn middle() !void {
+ var f = async middle2();
+ return await f;
+ }
+
+ fn middle2() !void {
+ return failing();
+ }
+
+ fn failing() !void {
+ global_frame = @frame();
+ suspend;
+ return error.Fail;
+ }
+ };
+ S.doTheTest();
+}
//test "error return trace across suspend points - early return" {
// const p = nonFailing();
@@ -422,24 +449,24 @@ test "async function call return value" {
test "suspension points inside branching control flow" {
const S = struct {
- var global_result: i32 = 10;
+ var result: i32 = 10;
fn doTheTest() void {
- expect(10 == global_result);
+ expect(10 == result);
var frame = async func(true);
- expect(10 == global_result);
+ expect(10 == result);
resume frame;
- expect(11 == global_result);
+ expect(11 == result);
resume frame;
- expect(12 == global_result);
+ expect(12 == result);
resume frame;
- expect(13 == global_result);
+ expect(13 == result);
}
fn func(b: bool) void {
while (b) {
suspend;
- global_result += 1;
+ result += 1;
}
}
};
--
cgit v1.2.3
From a7763c06f9941bdeccc0679abf863b21f7cc33a3 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 5 Aug 2019 00:44:39 -0400
Subject: delete IrInstructionMarkErrRetTracePtr
this IR instruction is no longer needed
---
BRANCH_TODO | 1 -
src/all_types.hpp | 7 -------
src/codegen.cpp | 10 ----------
src/ir.cpp | 27 ---------------------------
src/ir_print.cpp | 9 ---------
5 files changed, 54 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index fcd98f0f71..85fe81b3b1 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,3 @@
- * delete IrInstructionMarkErrRetTracePtr
* go over the commented out tests
* error return tracing
* compile error for error: expected anyframe->T, found 'anyframe'
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 1ea4954dec..85f00a6baf 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2334,7 +2334,6 @@ enum IrInstructionId {
IrInstructionIdAtomicLoad,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
- IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdErrSetCast,
IrInstructionIdToBytes,
IrInstructionIdFromBytes,
@@ -3451,12 +3450,6 @@ struct IrInstructionAddImplicitReturnType {
IrInstruction *value;
};
-struct IrInstructionMarkErrRetTracePtr {
- IrInstruction base;
-
- IrInstruction *err_ret_trace_ptr;
-};
-
// For float ops which take a single argument
struct IrInstructionFloatOp {
IrInstruction base;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 59289523a7..f9e0598707 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5062,14 +5062,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
return load_inst;
}
-static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable,
- IrInstructionMarkErrRetTracePtr *instruction)
-{
- assert(g->have_err_ret_tracing);
- g->cur_err_ret_trace_val_stack = ir_llvm_value(g, instruction->err_ret_trace_ptr);
- return nullptr;
-}
-
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
assert(instruction->base.value.type->id == ZigTypeIdFloat);
@@ -5544,8 +5536,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_render_float_op(g, executable, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
diff --git a/src/ir.cpp b/src/ir.cpp
index e6212fa079..cae2768bef 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -993,10 +993,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur
return IrInstructionIdAddImplicitReturnType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) {
- return IrInstructionIdMarkErrRetTracePtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatOp *) {
return IrInstructionIdFloatOp;
}
@@ -3092,15 +3088,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s
return &instruction->base;
}
-static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) {
- IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->err_ret_trace_ptr = err_ret_trace_ptr;
-
- ir_ref_instruction(err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_has_decl(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *container, IrInstruction *name)
{
@@ -23908,17 +23895,6 @@ static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, I
return result;
}
-static IrInstruction *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *ira, IrInstructionMarkErrRetTracePtr *instruction) {
- IrInstruction *err_ret_trace_ptr = instruction->err_ret_trace_ptr->child;
- if (type_is_invalid(err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_mark_err_ret_trace_ptr(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static void ir_eval_float_op(IrAnalyze *ira, IrInstructionFloatOp *source_instr, ZigType *float_type,
ConstExprValue *op, ConstExprValue *out_val) {
assert(ira && source_instr && float_type && out_val && op);
@@ -24798,8 +24774,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdAddImplicitReturnType:
return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_analyze_instruction_float_op(ira, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
@@ -24951,7 +24925,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCancel:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
- case IrInstructionIdMarkErrRetTracePtr:
case IrInstructionIdAtomicRmw:
case IrInstructionIdCmpxchgGen:
case IrInstructionIdCmpxchgSrc:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 549da9de19..bc9d09b30c 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1457,12 +1457,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl
fprintf(irp->f, ")");
}
-static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) {
- fprintf(irp->f, "@markErrRetTracePtr(");
- ir_print_other_instruction(irp, instruction->err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) {
fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false));
@@ -1963,9 +1957,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAddImplicitReturnType:
ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction);
break;
- case IrInstructionIdMarkErrRetTracePtr:
- ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction);
- break;
case IrInstructionIdFloatOp:
ir_print_float_op(irp, (IrInstructionFloatOp *)instruction);
break;
--
cgit v1.2.3
From 20f63e588e62c4a7250bc96c9e5b54c8106ad1af Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 5 Aug 2019 03:10:14 -0400
Subject: async functions have error return traces where appropriate
however the traces are not merged on `await` or async function calls
yet.
When an async function has an error set or error union as its return
type, it has a `StackTrace` before the args in the frame, so that it is
accessible from `anyframe->T` awaiters. However when it does not have an
errorable return type, but it does call or await an errorable, it has a
stack trace just before the locals. This way when doing an `@asyncCall`
on an async function pointer, it can populate the args (which are after
the `StackTrace`) because it knows the offset of the args based only on
the return type.
This sort of matches normal functions, where a stack trace pointer could
be supplied by a parameter, or it could be supplied by the stack of the
function, depending on whether the function itself is errorable.
---
BRANCH_TODO | 2 +-
src/analyze.cpp | 28 +++++-----
src/codegen.cpp | 105 +++++++++++++++++++++---------------
src/codegen.hpp | 3 +-
src/ir.cpp | 7 +--
test/stage1/behavior/coroutines.zig | 16 +++---
6 files changed, 91 insertions(+), 70 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 85fe81b3b1..00aab06910 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,5 +1,5 @@
+ * error return tracing - handle `await` and function calls
* go over the commented out tests
- * error return tracing
* compile error for error: expected anyframe->T, found 'anyframe'
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e1b386d9af..4f301d2355 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5230,9 +5230,8 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_names.append("@result");
field_types.append(fn_type_id->return_type);
- if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) {
- field_names.append("@ptr_stack_trace");
- field_types.append(get_ptr_to_stack_trace_type(g));
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
field_names.append("@stack_trace");
field_types.append(g->stack_trace_type);
@@ -5256,6 +5255,16 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_types.append(param_type);
}
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn)) {
+ (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
+
+ field_names.append("@stack_trace");
+ field_types.append(g->stack_trace_type);
+
+ field_names.append("@instruction_addresses");
+ field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
+ }
+
for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
ZigType *ptr_type = instruction->base.value.type;
@@ -7563,8 +7572,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
if (have_result_type) {
field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result
field_types.append(get_llvm_type(g, result_type)); // result
- if (codegen_fn_has_err_ret_tracing(g, result_type)) {
- field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace
field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses
}
@@ -7614,15 +7622,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
- if (codegen_fn_has_err_ret_tracing(g, result_type)) {
- di_element_types.append(
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g))));
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace",
diff --git a/src/codegen.cpp b/src/codegen.cpp
index f9e0598707..1f8711012f 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -298,7 +298,7 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
}
// label (grep this): [coro_frame_struct_layout]
-static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) {
+static uint32_t frame_index_trace_arg(CodeGen *g, FnTypeId *fn_type_id) {
// [0] *ReturnType
// [1] ReturnType
uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0;
@@ -307,14 +307,25 @@ static uint32_t frame_index_trace(CodeGen *g, FnTypeId *fn_type_id) {
// label (grep this): [coro_frame_struct_layout]
static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) {
- bool have_stack_trace = g->have_err_ret_tracing && codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type);
- // [0] *StackTrace
- // [1] StackTrace
- // [2] [stack_trace_ptr_count]usize
- uint32_t trace_field_count = have_stack_trace ? 3 : 0;
- return frame_index_trace(g, fn_type_id) + trace_field_count;
+ bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type);
+ // [0] StackTrace
+ // [1] [stack_trace_ptr_count]usize
+ uint32_t trace_field_count = have_stack_trace ? 2 : 0;
+ return frame_index_trace_arg(g, fn_type_id) + trace_field_count;
}
+// label (grep this): [coro_frame_struct_layout]
+static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) {
+ uint32_t result = frame_index_arg(g, fn_type_id);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ if (type_has_bits(fn_type_id->param_info->type)) {
+ result += 1;
+ }
+ }
+ return result;
+}
+
+
static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) {
if (!g->have_err_ret_tracing) {
return UINT32_MAX;
@@ -1287,9 +1298,6 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (!g->have_err_ret_tracing) {
return nullptr;
}
- if (fn_is_async(g->cur_fn)) {
- return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
- }
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
@@ -3441,6 +3449,10 @@ static void render_async_spills(CodeGen *g) {
gen_var_debug_decl(g, var);
}
}
+ // label (grep this): [coro_frame_struct_layout]
+ if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn)) {
+ async_var_index += 2;
+ }
for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) {
IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i);
ZigType *ptr_type = instruction->base.value.type;
@@ -3525,7 +3537,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
CallingConvention cc = fn_type->data.fn.fn_type_id.cc;
bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id);
- bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type);
+ bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type);
bool is_var_args = fn_type_id->is_var_args;
ZigList gen_param_values = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
@@ -3572,28 +3584,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
}
- if (prefix_arg_err_ret_stack) {
- uint32_t trace_field_index = frame_index_trace(g, fn_type_id);
- LLVMValueRef trace_field_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
- trace_field_index, "");
- LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
- trace_field_index + 1, "");
- LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, frame_result_loc,
- trace_field_index + 2, "");
- LLVMBuildStore(g->builder, trace_field_ptr, trace_field_ptr_ptr);
-
- LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
- LLVMBuildStore(g->builder, zero, index_ptr);
-
- LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, "");
- LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, "");
- LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) };
- LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, "");
- LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr);
-
- LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, "");
- LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr);
- }
+ // even if prefix_arg_err_ret_stack is true, let the async function do its own
+ // initialization.
} else if (callee_is_async) {
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer
@@ -3607,13 +3599,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
}
- if (prefix_arg_err_ret_stack) {
- uint32_t trace_field_index = frame_index_trace(g, fn_type_id);
- LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, trace_field_index, "");
- LLVMValueRef err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
- LLVMBuildStore(g->builder, err_trace_val, trace_field_ptr);
- }
-
+ // even if prefix_arg_err_ret_stack is true, let the async function do its
+ // error return tracing normally, and then we'll invoke merge_error_return_traces like normal.
}
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
@@ -6790,9 +6777,16 @@ static void do_code_gen(CodeGen *g) {
g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, "");
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
g->cur_async_resume_index_ptr = resume_index_ptr;
- if (codegen_fn_has_err_ret_tracing(g, fn_type_id->return_type)) {
- uint32_t field_index = frame_index_trace(g, fn_type_id);
- g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, field_index, "");
+ LLVMValueRef err_ret_trace_val = nullptr;
+ uint32_t trace_field_index;
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ trace_field_index = frame_index_trace_arg(g, fn_type_id);
+ err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
+ g->cur_err_ret_trace_val_arg = err_ret_trace_val;
+ } else if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry)) {
+ trace_field_index = frame_index_trace_stack(g, fn_type_id);
+ err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
+ g->cur_err_ret_trace_val_stack = err_ret_trace_val;
}
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
@@ -6804,6 +6798,24 @@ static void do_code_gen(CodeGen *g) {
LLVMAddCase(switch_instr, zero, entry_block->llvm_block);
g->cur_resume_block_count += 1;
LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
+ if (err_ret_trace_val != nullptr) {
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ trace_field_index, "");
+ LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ trace_field_index + 1, "");
+
+ LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
+ LLVMBuildStore(g->builder, zero, index_ptr);
+
+ LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, "");
+ LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) };
+ LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, trace_field_addrs, indices, 2, "");
+ LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr);
+
+ LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, "");
+ LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr);
+ }
render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
} else {
// create debug variable declarations for parameters
@@ -9707,8 +9719,13 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget
return g;
}
-bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type) {
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
return g->have_err_ret_tracing &&
(return_type->id == ZigTypeIdErrorUnion ||
return_type->id == ZigTypeIdErrorSet);
}
+
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn) {
+ return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
+ !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
+}
diff --git a/src/codegen.hpp b/src/codegen.hpp
index 0ee4ce837e..c84ef4bc48 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -61,6 +61,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g);
TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
-bool codegen_fn_has_err_ret_tracing(CodeGen *g, ZigType *return_type);
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn);
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index cae2768bef..ca54d54c2d 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15560,9 +15560,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
break;
}
}
- if (call_instruction->is_async) {
- zig_panic("TODO async call");
- }
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
if (existing_entry) {
@@ -24483,6 +24480,10 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
fn_entry->inferred_async_node = instruction->base.source_node;
}
+ if (type_can_fail(result_type)) {
+ fn_entry->calls_or_awaits_errorable_fn = true;
+ }
+
IrInstruction *result = ir_build_await(&ira->new_irb,
instruction->base.scope, instruction->base.source_node, frame);
result->value.type = result_type;
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index c4f4cd3c99..4cea8d1507 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -337,19 +337,21 @@ test "async fn with inferred error set" {
//test "error return trace across suspend points - early return" {
// const p = nonFailing();
// resume p;
-// const p2 = try async printTrace(p);
-// cancel p2;
+// const p2 = async printTrace(p);
//}
//
//test "error return trace across suspend points - async return" {
// const p = nonFailing();
-// const p2 = try async printTrace(p);
+// const p2 = async printTrace(p);
// resume p;
-// cancel p2;
//}
//
//fn nonFailing() (anyframe->anyerror!void) {
-// return async suspendThenFail() catch unreachable;
+// const Static = struct {
+// var frame: @Frame(suspendThenFail) = undefined;
+// };
+// Static.frame = async suspendThenFail();
+// return &Static.frame;
//}
//async fn suspendThenFail() anyerror!void {
// suspend;
@@ -361,8 +363,8 @@ test "async fn with inferred error set" {
// if (@errorReturnTrace()) |trace| {
// expect(trace.index == 1);
// } else switch (builtin.mode) {
-// builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"),
-// builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {},
+// .Debug, .ReleaseSafe => @panic("expected return trace"),
+// .ReleaseFast, .ReleaseSmall => {},
// }
// };
//}
--
cgit v1.2.3
From 400500a3afafca8178f13a7e4e1cd0ae7808aff2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 6 Aug 2019 16:37:25 -0400
Subject: improve async function semantics
* add safety panic for resuming a function which is returning, pending
an await
* remove IrInstructionResultPtr
* add IrInstructionReturnBegin. This does the early return in async
functions; does nothing in normal functions.
* `await` gets a result location
* `analyze_fn_async` will call `analyze_fn_body` if necessary.
* async function frames have a result pointer field for themselves
to access and one for the awaiter to supply before the atomic rmw.
when returning, async functions copy the result to the awaiter result
pointer, if it is non-null.
* async function frames have a stack trace pointer which is supplied by
the awaiter before the atomicrmw. Later in the frame is a stack trace
struct and addresses, which is used for its own calls and awaits.
* when awaiting an async function, if an early return occurred, the
awaiter tail resumes the frame.
* when an async function returns, early return does a suspend
(in IrInstructionReturnBegin) before copying
the error return trace data, result, and running the defers.
After the last defer runs, the frame will no longer be accessed.
* proper acquire/release atomic ordering attributes in async functions.
---
BRANCH_TODO | 3 +
src/all_types.hpp | 27 +++-
src/analyze.cpp | 51 ++++---
src/codegen.cpp | 441 +++++++++++++++++++++++++++++++++---------------------
src/codegen.hpp | 2 +-
src/ir.cpp | 245 +++++++++++++++++-------------
src/ir_print.cpp | 45 +++---
7 files changed, 491 insertions(+), 323 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 00aab06910..1efaf1acc4 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -33,3 +33,6 @@
- anyframe, anyframe->T
* safety for double await
* call graph analysis to have fewer stack trace frames
+ * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions"
+ * when there are multiple calls to async functions in a function, reuse the same frame buffer, so that the
+ needed bytes is equal to the largest callee's frame
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 0098a630d8..bc65948579 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1557,6 +1557,7 @@ enum PanicMsgId {
PanicMsgIdBadReturn,
PanicMsgIdResumedAnAwaitingFn,
PanicMsgIdFrameTooSmall,
+ PanicMsgIdResumedFnPendingAwait,
PanicMsgIdCount,
};
@@ -1717,10 +1718,12 @@ struct CodeGen {
LLVMTargetMachineRef target_machine;
ZigLLVMDIFile *dummy_di_file;
LLVMValueRef cur_ret_ptr;
+ LLVMValueRef cur_ret_ptr_ptr;
LLVMValueRef cur_fn_val;
LLVMValueRef cur_async_switch_instr;
LLVMValueRef cur_async_resume_index_ptr;
LLVMValueRef cur_async_awaiter_ptr;
+ LLVMValueRef cur_async_prev_val;
LLVMBasicBlockRef cur_preamble_llvm_block;
size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
@@ -2223,6 +2226,7 @@ enum IrInstructionId {
IrInstructionIdCallGen,
IrInstructionIdConst,
IrInstructionIdReturn,
+ IrInstructionIdReturnBegin,
IrInstructionIdCast,
IrInstructionIdResizeSlice,
IrInstructionIdContainerInitList,
@@ -2326,7 +2330,6 @@ enum IrInstructionId {
IrInstructionIdImplicitCast,
IrInstructionIdResolveResult,
IrInstructionIdResetResult,
- IrInstructionIdResultPtr,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
IrInstructionIdArgType,
@@ -2355,7 +2358,8 @@ enum IrInstructionId {
IrInstructionIdUnionInitNamedField,
IrInstructionIdSuspendBegin,
IrInstructionIdSuspendFinish,
- IrInstructionIdAwait,
+ IrInstructionIdAwaitSrc,
+ IrInstructionIdAwaitGen,
IrInstructionIdCoroResume,
};
@@ -2630,7 +2634,13 @@ struct IrInstructionConst {
struct IrInstructionReturn {
IrInstruction base;
- IrInstruction *value;
+ IrInstruction *operand;
+};
+
+struct IrInstructionReturnBegin {
+ IrInstruction base;
+
+ IrInstruction *operand;
};
enum CastOp {
@@ -3136,6 +3146,7 @@ struct IrInstructionTestErrSrc {
IrInstruction base;
bool resolve_err_set;
+ bool base_ptr_is_payload;
IrInstruction *base_ptr;
};
@@ -3603,10 +3614,18 @@ struct IrInstructionSuspendFinish {
IrInstructionSuspendBegin *begin;
};
-struct IrInstructionAwait {
+struct IrInstructionAwaitSrc {
IrInstruction base;
IrInstruction *frame;
+ ResultLoc *result_loc;
+};
+
+struct IrInstructionAwaitGen {
+ IrInstruction base;
+
+ IrInstruction *frame;
+ IrInstruction *result_loc;
};
struct IrInstructionCoroResume {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 4f301d2355..36eeaeac9c 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3848,6 +3848,13 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn) {
if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
continue;
+ if (callee->anal_state == FnAnalStateReady) {
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ }
assert(callee->anal_state == FnAnalStateComplete);
analyze_fn_async(g, callee);
if (callee->anal_state == FnAnalStateInvalid) {
@@ -5224,20 +5231,18 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
- field_names.append("@ptr_result");
+ field_names.append("@result_ptr_callee");
+ field_types.append(ptr_return_type);
+
+ field_names.append("@result_ptr_awaiter");
field_types.append(ptr_return_type);
field_names.append("@result");
field_types.append(fn_type_id->return_type);
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
- (void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
-
- field_names.append("@stack_trace");
- field_types.append(g->stack_trace_type);
-
- field_names.append("@instruction_addresses");
- field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
+ field_names.append("@ptr_stack_trace");
+ field_types.append(get_ptr_to_stack_trace_type(g));
}
for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
@@ -5255,7 +5260,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_types.append(param_type);
}
- if (codegen_fn_has_err_ret_tracing_stack(g, fn)) {
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
(void)get_ptr_to_stack_trace_type(g); // populate g->stack_trace_type
field_names.append("@stack_trace");
@@ -7570,11 +7575,11 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
bool have_result_type = result_type != nullptr && type_has_bits(result_type);
if (have_result_type) {
- field_types.append(get_llvm_type(g, ptr_result_type)); // ptr_result
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_callee
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
field_types.append(get_llvm_type(g, result_type)); // result
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
- field_types.append(get_llvm_type(g, g->stack_trace_type)); // stack_trace
- field_types.append(get_llvm_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))); // instruction_addresses
+ field_types.append(get_llvm_type(g, get_ptr_to_stack_trace_type(g))); // ptr_stack_trace
}
}
LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
@@ -7607,7 +7612,15 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
if (have_result_type) {
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_result",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_awaiter",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
@@ -7625,20 +7638,12 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
di_element_types.append(
ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "stack_trace",
- di_file, line,
- 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
- 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
- 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, g->stack_trace_type)));
- di_element_types.append(
- ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "instruction_addresses",
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace",
di_file, line,
8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
- ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count))));
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, get_ptr_to_stack_trace_type(g))));
}
};
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 1f8711012f..cf846d99e9 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -24,6 +24,14 @@
#include
#include
+enum ResumeId {
+ ResumeIdManual,
+ ResumeIdReturn,
+ ResumeIdCall,
+
+ ResumeIdAwaitEarlyReturn // must be last
+};
+
static void init_darwin_native(CodeGen *g) {
char *osx_target = getenv("MACOSX_DEPLOYMENT_TARGET");
char *ios_target = getenv("IPHONEOS_DEPLOYMENT_TARGET");
@@ -298,25 +306,25 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
}
// label (grep this): [coro_frame_struct_layout]
-static uint32_t frame_index_trace_arg(CodeGen *g, FnTypeId *fn_type_id) {
- // [0] *ReturnType
- // [1] ReturnType
- uint32_t return_field_count = type_has_bits(fn_type_id->return_type) ? 2 : 0;
+static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
+ // [0] *ReturnType (callee's)
+ // [1] *ReturnType (awaiter's)
+ // [2] ReturnType
+ uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0;
return coro_ret_start + return_field_count;
}
// label (grep this): [coro_frame_struct_layout]
-static uint32_t frame_index_arg(CodeGen *g, FnTypeId *fn_type_id) {
- bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type);
- // [0] StackTrace
- // [1] [stack_trace_ptr_count]usize
- uint32_t trace_field_count = have_stack_trace ? 2 : 0;
- return frame_index_trace_arg(g, fn_type_id) + trace_field_count;
+static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
+ bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
+ // [0] *StackTrace
+ uint32_t trace_field_count = have_stack_trace ? 1 : 0;
+ return frame_index_trace_arg(g, return_type) + trace_field_count;
}
// label (grep this): [coro_frame_struct_layout]
static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) {
- uint32_t result = frame_index_arg(g, fn_type_id);
+ uint32_t result = frame_index_arg(g, fn_type_id->return_type);
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
if (type_has_bits(fn_type_id->param_info->type)) {
result += 1;
@@ -901,7 +909,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
case PanicMsgIdPtrCastNull:
return buf_create_from_str("cast causes pointer to be null");
case PanicMsgIdBadResume:
- return buf_create_from_str("invalid resume of async function");
+ return buf_create_from_str("resumed an async function which already returned");
case PanicMsgIdBadAwait:
return buf_create_from_str("async function awaited twice");
case PanicMsgIdBadReturn:
@@ -910,6 +918,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("awaiting function resumed");
case PanicMsgIdFrameTooSmall:
return buf_create_from_str("frame too small");
+ case PanicMsgIdResumedFnPendingAwait:
+ return buf_create_from_str("resumed an async function which can only be awaited");
}
zig_unreachable();
}
@@ -1301,7 +1311,14 @@ static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
- return g->cur_err_ret_trace_val_arg;
+ if (g->cur_err_ret_trace_val_arg != nullptr) {
+ if (fn_is_async(g->cur_fn)) {
+ return LLVMBuildLoad(g->builder, g->cur_err_ret_trace_val_arg, "");
+ } else {
+ return g->cur_err_ret_trace_val_arg;
+ }
+ }
+ return nullptr;
}
static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *scope) {
@@ -2023,99 +2040,191 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut
return call_instruction;
}
-static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable,
- IrInstructionReturn *return_instruction)
+static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, ResumeId resume_id, PanicMsgId msg_id,
+ LLVMBasicBlockRef end_bb)
+{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
+ LLVMValueRef ok_bit;
+ if (resume_id == ResumeIdAwaitEarlyReturn) {
+ LLVMValueRef last_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false), "");
+ ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, "");
+ } else {
+ LLVMValueRef expected_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false), "");
+ ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
+ }
+ LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion(g, msg_id, source_instr);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
+}
+
+static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr,
+ ResumeId resume_id, LLVMValueRef arg_val)
+{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ if (fn_val == nullptr) {
+ if (g->anyframe_fn_type == nullptr) {
+ (void)get_llvm_type(g, get_any_frame_type(g, nullptr));
+ }
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, "");
+ fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ }
+ if (arg_val == nullptr) {
+ arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false), "");
+ } else {
+ assert(resume_id == ResumeIdAwaitEarlyReturn);
+ }
+ LLVMValueRef args[] = {target_frame_ptr, arg_val};
+ return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+}
+
+static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionReturnBegin *instruction)
{
+ if (!fn_is_async(g->cur_fn)) return nullptr;
+
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
+ bool ret_type_has_bits = instruction->operand != nullptr &&
+ type_has_bits(instruction->operand->value.type);
+ ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr;
+ if (ret_type_has_bits && !handle_is_ptr(ret_type)) {
+ // It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw.
+ LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, "");
+ LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), result_ptr);
+ }
+
+ // Prepare to be suspended. We might end up not having to suspend though.
+ LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnResume");
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr,
+ all_ones, LLVMAtomicOrderingAcquire, g->is_single_threaded);
+
+ LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+ LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
+
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
+ LLVMBasicBlockRef switch_bb = LLVMGetInsertBlock(g->builder);
+
+ LLVMAddCase(switch_instr, zero, early_return_block);
+ LLVMAddCase(switch_instr, all_ones, bad_return_block);
+
+ // Something has gone horribly wrong, and this is an invalid second return.
+ LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
+ gen_assertion(g, PanicMsgIdBadReturn, &instruction->base);
+
+ // The caller has not done an await yet. So we suspend at the return instruction, until a
+ // cancel or await is performed.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // Add a safety check for when getting resumed by the awaiter.
+ LLVMPositionBuilderAtEnd(g->builder, resume_bb);
+ LLVMBasicBlockRef after_resume_block = LLVMGetInsertBlock(g->builder);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdAwaitEarlyReturn, PanicMsgIdResumedFnPendingAwait,
+ resume_them_block);
+
+ // We need to resume the caller by tail calling them.
+ // That will happen when rendering IrInstructionReturn after running the defers/errdefers.
+ // We either got here from Entry (function call) or from the switch above
+ g->cur_async_prev_val = LLVMBuildPhi(g->builder, usize_type_ref, "");
+ LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), prev_val };
+ LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb };
+ LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2);
+
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
if (fn_is_async(g->cur_fn)) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- bool ret_type_has_bits = return_instruction->value != nullptr &&
- type_has_bits(return_instruction->value->value.type);
- ZigType *ret_type = ret_type_has_bits ? return_instruction->value->value.type : nullptr;
+ bool ret_type_has_bits = instruction->operand != nullptr &&
+ type_has_bits(instruction->operand->value.type);
+ ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr;
- if (ir_want_runtime_safety(g, &return_instruction->base)) {
+ if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
}
- LLVMValueRef result_ptr_as_usize;
if (ret_type_has_bits) {
- LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
- LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
- if (!handle_is_ptr(ret_type)) {
- // It's a scalar, so it didn't get written to the result ptr. Do that now.
- LLVMBuildStore(g->builder, ir_llvm_value(g, return_instruction->value), result_ptr);
- }
- result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, "");
- } else {
- // For debug safety, this value has to be anything other than all 1's, which signals
- // that it is being resumed. 0 is a bad choice since null pointers are special.
- result_ptr_as_usize = ir_want_runtime_safety(g, &return_instruction->base) ?
- LLVMConstInt(usize_type_ref, 1, false) : LLVMGetUndef(usize_type_ref);
+ // If the awaiter result pointer is non-null, we need to copy the result to there.
+ LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
+ LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start + 1, "");
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
+ LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
+ LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_block);
+ LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, "");
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, ret_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, ret_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ LLVMBuildBr(g->builder, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
}
- LLVMValueRef zero = LLVMConstNull(usize_type_ref);
- LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, g->cur_async_awaiter_ptr,
- all_ones, LLVMAtomicOrderingMonotonic, g->is_single_threaded);
-
- LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
- LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
- LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
-
- LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
-
- LLVMAddCase(switch_instr, zero, early_return_block);
- LLVMAddCase(switch_instr, all_ones, bad_return_block);
-
- // Something has gone horribly wrong, and this is an invalid second return.
- LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
- gen_assertion(g, PanicMsgIdBadReturn, &return_instruction->base);
-
- // The caller will deal with fetching the result - we're done.
- LLVMPositionBuilderAtEnd(g->builder, early_return_block);
- LLVMBuildRetVoid(g->builder);
// We need to resume the caller by tail calling them.
- LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
ZigType *any_frame_type = get_any_frame_type(g, ret_type);
- LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val,
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, g->cur_async_prev_val,
get_llvm_type(g, any_frame_type), "");
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, their_frame_ptr, coro_fn_ptr_index, "");
- LLVMValueRef awaiter_fn = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
- LLVMValueRef args[] = {their_frame_ptr, result_ptr_as_usize};
- LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, awaiter_fn, args, 2, LLVMFastCallConv,
- ZigLLVM_FnInlineAuto, "");
+ LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
ZigLLVMSetTailCall(call_inst);
LLVMBuildRetVoid(g->builder);
return nullptr;
}
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
return nullptr;
}
assert(g->cur_ret_ptr);
- src_assert(return_instruction->value->value.special != ConstValSpecialRuntime,
- return_instruction->base.source_node);
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
- ZigType *return_type = return_instruction->value->value.type;
+ src_assert(instruction->operand->value.special != ConstValSpecialRuntime,
+ instruction->base.source_node);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
+ ZigType *return_type = instruction->operand->value.type;
gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value);
LLVMBuildRetVoid(g->builder);
} else if (g->cur_fn->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync &&
handle_is_ptr(g->cur_fn->type_entry->data.fn.fn_type_id.return_type))
{
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
}
- } else if (return_instruction->value == nullptr) {
+ } else if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMBuildRet(g->builder, value);
}
return nullptr;
@@ -3417,7 +3526,7 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
static void render_async_spills(CodeGen *g) {
ZigType *fn_type = g->cur_fn->type_entry;
ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
- uint32_t async_var_index = frame_index_arg(g, &fn_type->data.fn.fn_type_id);
+ uint32_t async_var_index = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
ZigVar *var = g->cur_fn->variable_list.at(var_i);
@@ -3450,7 +3559,7 @@ static void render_async_spills(CodeGen *g) {
}
}
// label (grep this): [coro_frame_struct_layout]
- if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn)) {
+ if (codegen_fn_has_err_ret_tracing_stack(g, g->cur_fn, true)) {
async_var_index += 2;
}
for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) {
@@ -3553,7 +3662,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (ret_has_bits) {
// Use the result location which is inside the frame if this is an async call.
- ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, "");
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, "");
}
} else {
LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
@@ -3590,17 +3699,26 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
- if (result_loc != nullptr) {
+ if (result_loc == nullptr) {
+ // return type is a scalar, but we still need a pointer to it. Use the async fn frame.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, "");
+ } else {
// Use the call instruction's result location.
ret_ptr = result_loc;
- } else {
- // return type is a scalar, but we still need a pointer to it. Use the async fn frame.
- ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, "");
}
+
+ // Store a zero in the awaiter's result ptr to indicate we do not need a copy made.
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 1, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr);
}
- // even if prefix_arg_err_ret_stack is true, let the async function do its
- // error return tracing normally, and then we'll invoke merge_error_return_traces like normal.
+ if (prefix_arg_err_ret_stack) {
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ frame_index_trace_arg(g, src_return_type), "");
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
}
if (instruction->is_async || callee_is_async) {
assert(frame_result_loc != nullptr);
@@ -3652,7 +3770,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef result;
if (instruction->is_async || callee_is_async) {
- uint32_t arg_start_i = frame_index_arg(g, &fn_type->data.fn.fn_type_id);
+ uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
LLVMValueRef casted_frame;
if (instruction->new_stack != nullptr) {
@@ -3678,8 +3796,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
}
if (instruction->is_async) {
- LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)};
- ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
+ gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
if (instruction->new_stack != nullptr) {
return frame_result_loc;
}
@@ -3694,36 +3811,23 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb);
LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
- LLVMValueRef args[] = {frame_result_loc, LLVMGetUndef(usize_type_ref)};
- LLVMValueRef call_inst = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, fn_inline, "");
+
+ LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
ZigLLVMSetTailCall(call_inst);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, call_bb);
- if (ir_want_runtime_safety(g, &instruction->base)) {
- LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
- LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
- LLVMValueRef arg_val = LLVMGetParam(g->cur_fn_val, 1);
- LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, arg_val, all_ones, "");
- LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block);
-
- LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
- gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn);
-
- LLVMPositionBuilderAtEnd(g->builder, ok_resume_block);
- }
-
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
render_async_var_decls(g, instruction->base.scope);
- if (type_has_bits(src_return_type)) {
- LLVMValueRef spilled_result_ptr = LLVMGetParam(g->cur_fn_val, 1);
- LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr,
- get_llvm_type(g, ptr_result_type), "");
- return get_handle_value(g, casted_spilled_result_ptr, src_return_type, ptr_result_type);
- } else {
+ if (!type_has_bits(src_return_type))
return nullptr;
- }
+
+ if (result_loc != nullptr)
+ return get_handle_value(g, result_loc, src_return_type, ptr_result_type);
+
+ LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, coro_ret_start + 2, "");
+ return LLVMBuildLoad(g->builder, result_ptr, "");
}
if (instruction->new_stack == nullptr) {
@@ -5191,8 +5295,9 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
-static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwait *instruction) {
+static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
ZigType *result_type = instruction->base.value.type;
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
@@ -5208,86 +5313,75 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// At this point resuming the function will do the correct thing.
// This code is as if it is running inside the suspend block.
+ // supply the awaiter return pointer
+ LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
+ nullptr : ir_llvm_value(g, instruction->result_loc);
+ if (type_has_bits(result_type)) {
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start + 1, "");
+ if (result_loc == nullptr) {
+ // no copy needed
+ LLVMBuildStore(g->builder, zero, awaiter_ret_ptr_ptr);
+ } else {
+ LLVMBuildStore(g->builder, result_loc, awaiter_ret_ptr_ptr);
+ }
+ }
+
+ // supply the error return trace pointer
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ if (my_err_ret_trace_val != nullptr) {
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
+
// caller's own frame pointer
LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
- LLVMValueRef result_ptr_as_usize;
- if (type_has_bits(result_type)) {
- LLVMValueRef result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_ret_start, "");
- LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, result_ptr_ptr, "");
- result_ptr_as_usize = LLVMBuildPtrToInt(g->builder, result_ptr, usize_type_ref, "");
- } else {
- result_ptr_as_usize = LLVMGetUndef(usize_type_ref);
- }
LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
- LLVMAtomicOrderingMonotonic, g->is_single_threaded);
+ LLVMAtomicOrderingRelease, g->is_single_threaded);
LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
- LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2);
- LLVMBasicBlockRef predecessor_bb = LLVMGetInsertBlock(g->builder);
LLVMAddCase(switch_instr, zero, complete_suspend_block);
-
- // Early return: The async function has already completed. No need to suspend.
- LLVMAddCase(switch_instr, all_ones, resume_bb);
+ LLVMAddCase(switch_instr, all_ones, early_return_block);
// We discovered that another awaiter was already here.
LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
+ // Early return: The async function has already completed, but it is suspending before setting the result,
+ // populating the error return trace if applicable, and running the defers.
+ // Tail resume it now, so that it can complete.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val);
+ ZigLLVMSetTailCall(call_inst);
+ LLVMBuildRetVoid(g->builder);
+
// Rely on the target to resume us from suspension.
LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
- // We either got here from Entry (function call) or from the switch above
- LLVMValueRef spilled_result_ptr = LLVMBuildPhi(g->builder, usize_type_ref, "");
- LLVMValueRef incoming_values[] = { LLVMGetParam(g->cur_fn_val, 1), result_ptr_as_usize };
- LLVMBasicBlockRef incoming_blocks[] = { g->cur_preamble_llvm_block, predecessor_bb };
- LLVMAddIncoming(spilled_result_ptr, incoming_values, incoming_blocks, 2);
-
- if (ir_want_runtime_safety(g, &instruction->base)) {
- LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
- LLVMBasicBlockRef ok_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
- LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntNE, spilled_result_ptr, all_ones, "");
- LLVMBuildCondBr(g->builder, ok_bit, ok_resume_block, bad_resume_block);
-
- LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
- gen_safety_crash(g, PanicMsgIdResumedAnAwaitingFn);
-
- LLVMPositionBuilderAtEnd(g->builder, ok_resume_block);
- }
-
- render_async_var_decls(g, instruction->base.scope);
-
- if (type_has_bits(result_type)) {
- LLVMValueRef casted_spilled_result_ptr = LLVMBuildIntToPtr(g->builder, spilled_result_ptr,
- get_llvm_type(g, ptr_result_type), "");
- return get_handle_value(g, casted_spilled_result_ptr, result_type, ptr_result_type);
- } else {
- return nullptr;
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ return get_handle_value(g, result_loc, result_type, ptr_result_type);
}
+ return nullptr;
}
static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable,
IrInstructionCoroResume *instruction)
{
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
ZigType *frame_type = instruction->frame->value.type;
assert(frame_type->id == ZigTypeIdAnyFrame);
- LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame, coro_fn_ptr_index, "");
- LLVMValueRef uncasted_fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
- LLVMValueRef fn_val = LLVMBuildIntToPtr(g->builder, uncasted_fn_val, g->anyframe_fn_type, "");
- LLVMValueRef arg_val = ir_want_runtime_safety(g, &instruction->base) ?
- LLVMConstAllOnes(usize_type_ref) : LLVMGetUndef(usize_type_ref);
- LLVMValueRef args[] = {frame, arg_val};
- ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+
+ gen_resume(g, nullptr, frame, ResumeIdManual, nullptr);
return nullptr;
}
@@ -5383,7 +5477,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdImplicitCast:
case IrInstructionIdResolveResult:
case IrInstructionIdResetResult:
- case IrInstructionIdResultPtr:
case IrInstructionIdContainerInitList:
case IrInstructionIdSliceSrc:
case IrInstructionIdRef:
@@ -5393,10 +5486,13 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdFrameType:
case IrInstructionIdFrameSizeSrc:
case IrInstructionIdAllocaGen:
+ case IrInstructionIdAwaitSrc:
zig_unreachable();
case IrInstructionIdDeclVarGen:
return ir_render_decl_var(g, executable, (IrInstructionDeclVarGen *)instruction);
+ case IrInstructionIdReturnBegin:
+ return ir_render_return_begin(g, executable, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_render_return(g, executable, (IrInstructionReturn *)instruction);
case IrInstructionIdBinOp:
@@ -5547,8 +5643,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
case IrInstructionIdFrameSizeGen:
return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
- case IrInstructionIdAwait:
- return ir_render_await(g, executable, (IrInstructionAwait *)instruction);
+ case IrInstructionIdAwaitGen:
+ return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
}
zig_unreachable();
}
@@ -6777,16 +6873,19 @@ static void do_code_gen(CodeGen *g) {
g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, "");
LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
g->cur_async_resume_index_ptr = resume_index_ptr;
- LLVMValueRef err_ret_trace_val = nullptr;
- uint32_t trace_field_index;
+
+ if (type_has_bits(fn_type_id->return_type)) {
+ g->cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
+ }
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
- trace_field_index = frame_index_trace_arg(g, fn_type_id);
- err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
- g->cur_err_ret_trace_val_arg = err_ret_trace_val;
- } else if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry)) {
- trace_field_index = frame_index_trace_stack(g, fn_type_id);
- err_ret_trace_val = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
- g->cur_err_ret_trace_val_stack = err_ret_trace_val;
+ uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type);
+ g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
+ }
+ uint32_t trace_field_index_stack = UINT32_MAX;
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
+ trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
+ g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ trace_field_index_stack, "");
}
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
@@ -6798,11 +6897,11 @@ static void do_code_gen(CodeGen *g) {
LLVMAddCase(switch_instr, zero, entry_block->llvm_block);
g->cur_resume_block_count += 1;
LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
- if (err_ret_trace_val != nullptr) {
+ if (trace_field_index_stack != UINT32_MAX) {
LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
- trace_field_index, "");
+ trace_field_index_stack, "");
LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
- trace_field_index + 1, "");
+ trace_field_index_stack + 1, "");
LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
LLVMBuildStore(g->builder, zero, index_ptr);
@@ -9725,7 +9824,7 @@ bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
return_type->id == ZigTypeIdErrorSet);
}
-bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn) {
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) {
return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
- !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
+ (is_async || !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
}
diff --git a/src/codegen.hpp b/src/codegen.hpp
index c84ef4bc48..794a0fd5a6 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -62,6 +62,6 @@ TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
-bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn);
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async);
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index ca54d54c2d..64e5e31a1b 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -525,6 +525,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionReturn *) {
return IrInstructionIdReturn;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnBegin *) {
+ return IrInstructionIdReturnBegin;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionCast *) {
return IrInstructionIdCast;
}
@@ -945,10 +949,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResetResult *) {
return IrInstructionIdResetResult;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionResultPtr *) {
- return IrInstructionIdResultPtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrOfArrayToSlice *) {
return IrInstructionIdPtrOfArrayToSlice;
}
@@ -1049,8 +1049,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *)
return IrInstructionIdSuspendFinish;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionAwait *) {
- return IrInstructionIdAwait;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitSrc *) {
+ return IrInstructionIdAwaitSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitGen *) {
+ return IrInstructionIdAwaitGen;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
@@ -1109,18 +1113,32 @@ static IrInstruction *ir_build_cond_br(IrBuilder *irb, Scope *scope, AstNode *so
}
static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *return_value)
+ IrInstruction *operand)
{
IrInstructionReturn *return_instruction = ir_build_instruction(irb, scope, source_node);
return_instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
return_instruction->base.value.special = ConstValSpecialStatic;
- return_instruction->value = return_value;
+ return_instruction->operand = operand;
+
+ if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block);
+
+ return &return_instruction->base;
+}
+
+static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand)
+{
+ IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node);
+ return_instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ return_instruction->base.value.special = ConstValSpecialStatic;
+ return_instruction->operand = operand;
- if (return_value != nullptr) ir_ref_instruction(return_value, irb->current_basic_block);
+ ir_ref_instruction(operand, irb->current_basic_block);
return &return_instruction->base;
}
+
static IrInstruction *ir_build_const_void(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node);
const_instruction->base.value.type = irb->codegen->builtin_types.entry_void;
@@ -2525,11 +2543,12 @@ static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *s
}
static IrInstruction *ir_build_test_err_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *base_ptr, bool resolve_err_set)
+ IrInstruction *base_ptr, bool resolve_err_set, bool base_ptr_is_payload)
{
IrInstructionTestErrSrc *instruction = ir_build_instruction(irb, scope, source_node);
instruction->base_ptr = base_ptr;
instruction->resolve_err_set = resolve_err_set;
+ instruction->base_ptr_is_payload = base_ptr_is_payload;
ir_ref_instruction(base_ptr, irb->current_basic_block);
@@ -2971,18 +2990,6 @@ static IrInstruction *ir_build_reset_result(IrBuilder *irb, Scope *scope, AstNod
return &instruction->base;
}
-static IrInstruction *ir_build_result_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ResultLoc *result_loc, IrInstruction *result)
-{
- IrInstructionResultPtr *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->result_loc = result_loc;
- instruction->result = result;
-
- ir_ref_instruction(result, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_opaque_type(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionOpaqueType *instruction = ir_build_instruction(irb, scope, source_node);
@@ -3266,17 +3273,33 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN
return &instruction->base;
}
-static IrInstruction *ir_build_await(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *frame)
+static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *frame, ResultLoc *result_loc)
{
- IrInstructionAwait *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionAwaitSrc *instruction = ir_build_instruction(irb, scope, source_node);
instruction->frame = frame;
+ instruction->result_loc = result_loc;
ir_ref_instruction(frame, irb->current_basic_block);
return &instruction->base;
}
+static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+ IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc)
+{
+ IrInstructionAwaitGen *instruction = ir_build_instruction(&ira->new_irb,
+ source_instruction->scope, source_instruction->source_node);
+ instruction->base.value.type = result_type;
+ instruction->frame = frame;
+ instruction->result_loc = result_loc;
+
+ ir_ref_instruction(frame, ira->new_irb.current_basic_block);
+ if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *frame)
{
@@ -3416,16 +3439,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
return nullptr;
}
-static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value,
- bool is_generated_code)
-{
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
-
- IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value);
- return_inst->is_gen = is_generated_code;
- return return_inst;
-}
-
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
assert(node->type == NodeTypeReturnExpr);
@@ -3467,19 +3480,16 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return_value = ir_build_const_void(irb, scope, node);
}
+ ir_build_return_begin(irb, scope, node, return_value);
+
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
bool have_err_defers = defer_counts[ReturnKindError] > 0;
if (have_err_defers || irb->codegen->have_err_ret_tracing) {
IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
- if (!have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
- IrInstruction *ret_ptr = ir_build_result_ptr(irb, scope, node, &result_loc_ret->base,
- return_value);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, ret_ptr, false);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
bool should_inline = ir_should_inline(irb->exec, scope);
IrInstruction *is_comptime;
@@ -3493,28 +3503,26 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
ir_set_cursor_at_end_and_append_block(irb, err_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- }
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ok_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
result_loc_ret->base.source_instruction = result;
return result;
} else {
// generate unconditional defers
ir_gen_defers_for_block(irb, scope, outer_scope, false);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
result_loc_ret->base.source_instruction = result;
return result;
}
@@ -3525,7 +3533,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true);
+ IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false);
IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn");
IrBasicBlock *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue");
@@ -3539,10 +3547,10 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime));
ir_set_cursor_at_end_and_append_block(irb, return_block);
+ IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
+ IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
+ ir_build_return_begin(irb, scope, node, err_val);
if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) {
- IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
- IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
-
ResultLocReturn *result_loc_ret = allocate(1);
result_loc_ret->base.id = ResultLocIdReturn;
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
@@ -3551,7 +3559,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
- IrInstruction *ret_inst = ir_gen_async_return(irb, scope, node, err_val, false);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
+ IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@@ -6081,7 +6090,8 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
LValPtr, nullptr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr,
+ true, false);
IrBasicBlock *after_cond_block = irb->current_basic_block;
IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
IrInstruction *cond_br_inst;
@@ -6897,7 +6907,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *
return err_val_ptr;
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false);
IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "TryOk");
IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "TryElse");
@@ -7513,7 +7523,7 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false);
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, parent_scope)) {
@@ -7830,7 +7840,9 @@ static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_build_coro_resume(irb, scope, node, target_inst);
}
-static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
+static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
+ ResultLoc *result_loc)
+{
assert(node->type == NodeTypeAwaitExpr);
ZigFn *fn_entry = exec_fn_entry(irb->exec);
@@ -7852,7 +7864,8 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_build_await(irb, scope, node, target_inst);
+ IrInstruction *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc);
+ return ir_lval_wrap(irb, scope, await_inst, lval, result_loc);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
@@ -8016,7 +8029,7 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
case NodeTypeResume:
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
case NodeTypeAwaitExpr:
- return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval, result_loc);
+ return ir_gen_await_expr(irb, scope, node, lval, result_loc);
case NodeTypeSuspend:
return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc);
case NodeTypeEnumLiteral:
@@ -8088,8 +8101,10 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
return false;
if (!instr_is_unreachable(result)) {
+ ir_mark_gen(ir_build_return_begin(irb, scope, node, result));
// no need for save_err_ret_addr because this cannot return error
- ir_gen_async_return(irb, scope, result->source_node, result, true);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
+ ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
return true;
@@ -8181,18 +8196,19 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
IrInstruction *instruction = bb->instruction_list.at(i);
if (instruction->id == IrInstructionIdReturn) {
IrInstructionReturn *ret_inst = (IrInstructionReturn *)instruction;
- IrInstruction *value = ret_inst->value;
- if (value->value.special == ConstValSpecialRuntime) {
- exec_add_error_node(codegen, exec, value->source_node,
+ IrInstruction *operand = ret_inst->operand;
+ if (operand->value.special == ConstValSpecialRuntime) {
+ exec_add_error_node(codegen, exec, operand->source_node,
buf_sprintf("unable to evaluate constant expression"));
return &codegen->invalid_instruction->value;
}
- return &value->value;
+ return &operand->value;
} else if (ir_has_side_effects(instruction)) {
if (instr_is_comptime(instruction)) {
switch (instruction->id) {
case IrInstructionIdUnwrapErrPayload:
case IrInstructionIdUnionFieldPtr:
+ case IrInstructionIdReturnBegin:
continue;
default:
break;
@@ -12593,12 +12609,32 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInstructionReturnBegin *instruction) {
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
+ if (type_is_invalid(casted_operand->value.type)) {
+ AstNode *source_node = ira->explicit_return_type_source_node;
+ if (source_node != nullptr) {
+ ErrorMsg *msg = ira->codegen->errors.last();
+ add_error_note(ira->codegen, msg, source_node,
+ buf_sprintf("return type declared here"));
+ }
+ return ir_unreach_error(ira);
+ }
+
+ return ir_build_return_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
+ casted_operand);
+}
+
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
- IrInstruction *value = instruction->value->child;
- if (type_is_invalid(value->value.type))
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
return ir_unreach_error(ira);
- if (!instr_is_comptime(value) && handle_is_ptr(ira->explicit_return_type)) {
+ if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, nullptr);
@@ -12606,26 +12642,21 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_finish_anal(ira, result);
}
- IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->explicit_return_type);
- if (type_is_invalid(casted_value->value.type)) {
- AstNode *source_node = ira->explicit_return_type_source_node;
- if (source_node != nullptr) {
- ErrorMsg *msg = ira->codegen->errors.last();
- add_error_note(ira->codegen, msg, source_node,
- buf_sprintf("return type declared here"));
- }
+ IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
+ if (type_is_invalid(casted_operand->value.type)) {
+ // error already reported by IrInstructionReturnBegin
return ir_unreach_error(ira);
}
- if (casted_value->value.special == ConstValSpecialRuntime &&
- casted_value->value.type->id == ZigTypeIdPointer &&
- casted_value->value.data.rh_ptr == RuntimeHintPtrStack)
+ if (casted_operand->value.special == ConstValSpecialRuntime &&
+ casted_operand->value.type->id == ZigTypeIdPointer &&
+ casted_operand->value.data.rh_ptr == RuntimeHintPtrStack)
{
- ir_add_error(ira, casted_value, buf_sprintf("function returns address of local variable"));
+ ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable"));
return ir_unreach_error(ira);
}
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_value);
+ instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
@@ -22176,19 +22207,6 @@ static IrInstruction *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_result_ptr(IrAnalyze *ira, IrInstructionResultPtr *instruction) {
- IrInstruction *result = instruction->result->child;
- if (type_is_invalid(result->value.type))
- return result;
-
- if (instruction->result_loc->written && instruction->result_loc->resolved_loc != nullptr &&
- !instr_is_comptime(result))
- {
- return instruction->result_loc->resolved_loc;
- }
- return ir_get_ref(ira, &instruction->base, result, true, false);
-}
-
static void ir_eval_mul_add(IrAnalyze *ira, IrInstructionMulAdd *source_instr, ZigType *float_type,
ConstExprValue *op1, ConstExprValue *op2, ConstExprValue *op3, ConstExprValue *out_val) {
if (float_type->id == ZigTypeIdComptimeFloat) {
@@ -22313,11 +22331,16 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct
if (type_is_invalid(base_ptr->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ IrInstruction *value;
+ if (instruction->base_ptr_is_payload) {
+ value = base_ptr;
+ } else {
+ value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ }
+
ZigType *type_entry = value->value.type;
if (type_is_invalid(type_entry))
return ira->codegen->invalid_instruction;
-
if (type_entry->id == ZigTypeIdErrorUnion) {
if (instr_is_comptime(value)) {
ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
@@ -24443,7 +24466,7 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
}
-static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwait *instruction) {
+static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
IrInstruction *frame_ptr = instruction->frame->child;
if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
@@ -24484,9 +24507,17 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
fn_entry->calls_or_awaits_errorable_fn = true;
}
- IrInstruction *result = ir_build_await(&ira->new_irb,
- instruction->base.scope, instruction->base.source_node, frame);
- result->value.type = result_type;
+ IrInstruction *result_loc;
+ if (type_has_bits(result_type)) {
+ result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
+ result_type, nullptr, true, false, true);
+ if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
+ return result_loc;
+ } else {
+ result_loc = nullptr;
+ }
+
+ IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
return ir_finish_anal(ira, result);
}
@@ -24541,8 +24572,11 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdRefGen:
case IrInstructionIdTestErrGen:
case IrInstructionIdFrameSizeGen:
+ case IrInstructionIdAwaitGen:
zig_unreachable();
+ case IrInstructionIdReturnBegin:
+ return ir_analyze_instruction_return_begin(ira, (IrInstructionReturnBegin *)instruction);
case IrInstructionIdReturn:
return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction);
case IrInstructionIdConst:
@@ -24749,8 +24783,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_resolve_result(ira, (IrInstructionResolveResult *)instruction);
case IrInstructionIdResetResult:
return ir_analyze_instruction_reset_result(ira, (IrInstructionResetResult *)instruction);
- case IrInstructionIdResultPtr:
- return ir_analyze_instruction_result_ptr(ira, (IrInstructionResultPtr *)instruction);
case IrInstructionIdOpaqueType:
return ir_analyze_instruction_opaque_type(ira, (IrInstructionOpaqueType *)instruction);
case IrInstructionIdSetAlignStack:
@@ -24807,8 +24839,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction);
case IrInstructionIdCoroResume:
return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdAwait:
- return ir_analyze_instruction_await(ira, (IrInstructionAwait *)instruction);
+ case IrInstructionIdAwaitSrc:
+ return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
}
zig_unreachable();
}
@@ -24898,6 +24930,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdStorePtr:
case IrInstructionIdCallSrc:
case IrInstructionIdCallGen:
+ case IrInstructionIdReturnBegin:
case IrInstructionIdReturn:
case IrInstructionIdUnreachable:
case IrInstructionIdSetCold:
@@ -24943,7 +24976,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSuspendBegin:
case IrInstructionIdSuspendFinish:
case IrInstructionIdCoroResume:
- case IrInstructionIdAwait:
+ case IrInstructionIdAwaitSrc:
+ case IrInstructionIdAwaitGen:
return true;
case IrInstructionIdPhi:
@@ -25041,7 +25075,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
- case IrInstructionIdResultPtr:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index bc9d09b30c..c56a660e29 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -64,11 +64,15 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) {
}
}
-static void ir_print_return(IrPrint *irp, IrInstructionReturn *return_instruction) {
+static void ir_print_return_begin(IrPrint *irp, IrInstructionReturnBegin *instruction) {
+ fprintf(irp->f, "@returnBegin(");
+ ir_print_other_instruction(irp, instruction->operand);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) {
fprintf(irp->f, "return ");
- if (return_instruction->value != nullptr) {
- ir_print_other_instruction(irp, return_instruction->value);
- }
+ ir_print_other_instruction(irp, instruction->operand);
}
static void ir_print_const(IrPrint *irp, IrInstructionConst *const_instruction) {
@@ -1329,14 +1333,6 @@ static void ir_print_reset_result(IrPrint *irp, IrInstructionResetResult *instru
fprintf(irp->f, ")");
}
-static void ir_print_result_ptr(IrPrint *irp, IrInstructionResultPtr *instruction) {
- fprintf(irp->f, "ResultPtr(");
- ir_print_result_loc(irp, instruction->result_loc);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->result);
- fprintf(irp->f, ")");
-}
-
static void ir_print_opaque_type(IrPrint *irp, IrInstructionOpaqueType *instruction) {
fprintf(irp->f, "@OpaqueType()");
}
@@ -1538,9 +1534,19 @@ static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruct
fprintf(irp->f, ")");
}
-static void ir_print_await(IrPrint *irp, IrInstructionAwait *instruction) {
+static void ir_print_await_src(IrPrint *irp, IrInstructionAwaitSrc *instruction) {
fprintf(irp->f, "@await(");
ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_result_loc(irp, instruction->result_loc);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->result_loc);
fprintf(irp->f, ")");
}
@@ -1549,6 +1555,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
zig_unreachable();
+ case IrInstructionIdReturnBegin:
+ ir_print_return_begin(irp, (IrInstructionReturnBegin *)instruction);
+ break;
case IrInstructionIdReturn:
ir_print_return(irp, (IrInstructionReturn *)instruction);
break;
@@ -1921,9 +1930,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdResetResult:
ir_print_reset_result(irp, (IrInstructionResetResult *)instruction);
break;
- case IrInstructionIdResultPtr:
- ir_print_result_ptr(irp, (IrInstructionResultPtr *)instruction);
- break;
case IrInstructionIdOpaqueType:
ir_print_opaque_type(irp, (IrInstructionOpaqueType *)instruction);
break;
@@ -2020,8 +2026,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCoroResume:
ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
break;
- case IrInstructionIdAwait:
- ir_print_await(irp, (IrInstructionAwait *)instruction);
+ case IrInstructionIdAwaitSrc:
+ ir_print_await_src(irp, (IrInstructionAwaitSrc *)instruction);
+ break;
+ case IrInstructionIdAwaitGen:
+ ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
break;
}
fprintf(irp->f, "\n");
--
cgit v1.2.3
From 17199b087915661c935f0970cc1e4eb29968a68d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 6 Aug 2019 18:29:56 -0400
Subject: passing the error return trace async function test
---
src/all_types.hpp | 2 +-
src/analyze.cpp | 5 +++
src/codegen.cpp | 68 ++++++++++++++++++++-----------------
src/ir.cpp | 66 +++++++++++++++++++----------------
test/stage1/behavior/coroutines.zig | 68 ++++++++++++++++++-------------------
5 files changed, 114 insertions(+), 95 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index bc65948579..1dad546a7e 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1718,7 +1718,7 @@ struct CodeGen {
LLVMTargetMachineRef target_machine;
ZigLLVMDIFile *dummy_di_file;
LLVMValueRef cur_ret_ptr;
- LLVMValueRef cur_ret_ptr_ptr;
+ LLVMValueRef cur_frame_ptr;
LLVMValueRef cur_fn_val;
LLVMValueRef cur_async_switch_instr;
LLVMValueRef cur_async_resume_index_ptr;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 36eeaeac9c..764b28ed45 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5160,6 +5160,8 @@ static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
}
static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
+ Error err;
+
if (frame_type->data.frame.locals_struct != nullptr)
return ErrorNone;
@@ -5286,6 +5288,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
continue;
}
}
+ if ((err = type_resolve(g, child_type, ResolveStatusSizeKnown))) {
+ return err;
+ }
const char *name;
if (*instruction->name_hint == 0) {
name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
diff --git a/src/codegen.cpp b/src/codegen.cpp
index cf846d99e9..d1b5ebedad 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -2088,17 +2088,19 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
IrInstructionReturnBegin *instruction)
{
- if (!fn_is_async(g->cur_fn)) return nullptr;
+ bool ret_type_has_bits = instruction->operand != nullptr &&
+ type_has_bits(instruction->operand->value.type);
+
+ if (!fn_is_async(g->cur_fn)) {
+ return ret_type_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr;
+ }
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- bool ret_type_has_bits = instruction->operand != nullptr &&
- type_has_bits(instruction->operand->value.type);
ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr;
if (ret_type_has_bits && !handle_is_ptr(ret_type)) {
// It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw.
- LLVMValueRef result_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, "");
- LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), result_ptr);
+ LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), g->cur_ret_ptr);
}
// Prepare to be suspended. We might end up not having to suspend though.
@@ -2147,7 +2149,11 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb };
LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2);
- return nullptr;
+ if (!ret_type_has_bits) {
+ return nullptr;
+ }
+
+ return get_handle_value(g, g->cur_ret_ptr, ret_type, get_pointer_to_type(g, ret_type, true));
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
@@ -2166,17 +2172,16 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
// If the awaiter result pointer is non-null, we need to copy the result to there.
LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
- LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start + 1, "");
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start + 1, "");
LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
LLVMPositionBuilderAtEnd(g->builder, copy_block);
- LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr_ptr, "");
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
- LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, ret_ptr, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, ret_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
@@ -3385,10 +3390,6 @@ static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable,
if (!type_has_bits(instruction->base.value.type))
return nullptr;
src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node);
- if (fn_is_async(g->cur_fn)) {
- LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
- return LLVMBuildLoad(g->builder, ptr_ptr, "");
- }
return g->cur_ret_ptr;
}
@@ -3547,7 +3548,7 @@ static void render_async_spills(CodeGen *g) {
continue;
}
- var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
+ var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index,
buf_ptr(&var->name));
async_var_index += 1;
if (var->decl_node) {
@@ -3578,7 +3579,7 @@ static void render_async_spills(CodeGen *g) {
continue;
}
}
- instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, async_var_index,
+ instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index,
instruction->name_hint);
async_var_index += 1;
}
@@ -3697,7 +3698,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
// initialization.
} else if (callee_is_async) {
frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
- awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, ""); // caller's own frame pointer
+ awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
if (result_loc == nullptr) {
// return type is a scalar, but we still need a pointer to it. Use the async fn frame.
@@ -4850,7 +4851,7 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
}
static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) {
- return g->cur_ret_ptr;
+ return g->cur_frame_ptr;
}
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
@@ -5335,7 +5336,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
}
// caller's own frame pointer
- LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_ret_ptr, usize_type_ref, "");
+ LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
LLVMAtomicOrderingRelease, g->is_single_threaded);
@@ -6710,13 +6711,17 @@ static void do_code_gen(CodeGen *g) {
bool is_async = fn_is_async(fn_table_entry);
- if (want_sret || is_async) {
- g->cur_ret_ptr = LLVMGetParam(fn, 0);
- } else if (handle_is_ptr(fn_type_id->return_type)) {
- g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
- // TODO add debug info variable for this
+ if (is_async) {
+ g->cur_frame_ptr = LLVMGetParam(fn, 0);
} else {
- g->cur_ret_ptr = nullptr;
+ if (want_sret) {
+ g->cur_ret_ptr = LLVMGetParam(fn, 0);
+ } else if (handle_is_ptr(fn_type_id->return_type)) {
+ g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
+ // TODO add debug info variable for this
+ } else {
+ g->cur_ret_ptr = nullptr;
+ }
}
uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
@@ -6870,21 +6875,22 @@ static void do_code_gen(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block);
render_async_spills(g);
- g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_awaiter_index, "");
- LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_resume_index, "");
+ g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_awaiter_index, "");
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_resume_index, "");
g->cur_async_resume_index_ptr = resume_index_ptr;
if (type_has_bits(fn_type_id->return_type)) {
- g->cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, coro_ret_start, "");
+ LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, coro_ret_start, "");
+ g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, "");
}
if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
uint32_t trace_field_index = frame_index_trace_arg(g, fn_type_id->return_type);
- g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr, trace_field_index, "");
+ g->cur_err_ret_trace_val_arg = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, trace_field_index, "");
}
uint32_t trace_field_index_stack = UINT32_MAX;
if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
- g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
}
@@ -6898,9 +6904,9 @@ static void do_code_gen(CodeGen *g) {
g->cur_resume_block_count += 1;
LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
if (trace_field_index_stack != UINT32_MAX) {
- LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
- LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_ret_ptr,
+ LLVMValueRef trace_field_addrs = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack + 1, "");
LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
diff --git a/src/ir.cpp b/src/ir.cpp
index 64e5e31a1b..7cb868cab2 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1129,8 +1129,6 @@ static IrInstruction *ir_build_return_begin(IrBuilder *irb, Scope *scope, AstNod
IrInstruction *operand)
{
IrInstructionReturnBegin *return_instruction = ir_build_instruction(irb, scope, source_node);
- return_instruction->base.value.type = irb->codegen->builtin_types.entry_void;
- return_instruction->base.value.special = ConstValSpecialStatic;
return_instruction->operand = operand;
ir_ref_instruction(operand, irb->current_basic_block);
@@ -3480,7 +3478,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return_value = ir_build_const_void(irb, scope, node);
}
- ir_build_return_begin(irb, scope, node, return_value);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
+ return_value = ir_build_return_begin(irb, scope, node, return_value);
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
@@ -3514,14 +3513,12 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
IrInstruction *result = ir_build_return(irb, scope, node, return_value);
result_loc_ret->base.source_instruction = result;
return result;
} else {
// generate unconditional defers
ir_gen_defers_for_block(irb, scope, outer_scope, false);
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
IrInstruction *result = ir_build_return(irb, scope, node, return_value);
result_loc_ret->base.source_instruction = result;
return result;
@@ -3549,7 +3546,8 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_set_cursor_at_end_and_append_block(irb, return_block);
IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
- ir_build_return_begin(irb, scope, node, err_val);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
+ err_val = ir_build_return_begin(irb, scope, node, err_val);
if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) {
ResultLocReturn *result_loc_ret = allocate(1);
result_loc_ret->base.id = ResultLocIdReturn;
@@ -3559,7 +3557,6 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@@ -4972,7 +4969,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc);
case BuiltinFnIdFrameHandle:
if (!irb->exec->fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition"));
+ add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition"));
return irb->codegen->invalid_instruction;
}
return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc);
@@ -8101,9 +8098,9 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
return false;
if (!instr_is_unreachable(result)) {
- ir_mark_gen(ir_build_return_begin(irb, scope, node, result));
- // no need for save_err_ret_addr because this cannot return error
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
+ result = ir_mark_gen(ir_build_return_begin(irb, scope, node, result));
+ // no need for save_err_ret_addr because this cannot return error
ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
@@ -9789,6 +9786,8 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
+ if (prev_err_set_type == cur_err_set_type)
+ continue;
if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -12614,6 +12613,14 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst
if (type_is_invalid(operand->value.type))
return ira->codegen->invalid_instruction;
+ if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
+ // result location mechanism took care of it.
+ IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, operand);
+ copy_const_val(&result->value, &operand->value, true);
+ return result;
+ }
+
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
if (type_is_invalid(casted_operand->value.type)) {
AstNode *source_node = ira->explicit_return_type_source_node;
@@ -12625,8 +12632,18 @@ static IrInstruction *ir_analyze_instruction_return_begin(IrAnalyze *ira, IrInst
return ir_unreach_error(ira);
}
- return ir_build_return_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- casted_operand);
+ if (casted_operand->value.special == ConstValSpecialRuntime &&
+ casted_operand->value.type->id == ZigTypeIdPointer &&
+ casted_operand->value.data.rh_ptr == RuntimeHintPtrStack)
+ {
+ ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable"));
+ return ir_unreach_error(ira);
+ }
+
+ IrInstruction *result = ir_build_return_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, casted_operand);
+ copy_const_val(&result->value, &casted_operand->value, true);
+ return result;
}
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
@@ -12642,21 +12659,8 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_finish_anal(ira, result);
}
- IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
- if (type_is_invalid(casted_operand->value.type)) {
- // error already reported by IrInstructionReturnBegin
- return ir_unreach_error(ira);
- }
-
- if (casted_operand->value.special == ConstValSpecialRuntime &&
- casted_operand->value.type->id == ZigTypeIdPointer &&
- casted_operand->value.data.rh_ptr == RuntimeHintPtrStack)
- {
- ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable"));
- return ir_unreach_error(ira);
- }
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_operand);
+ instruction->base.source_node, operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
@@ -14612,8 +14616,12 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
if ((err = type_resolve(ira->codegen, ira->explicit_return_type, ResolveStatusZeroBitsKnown))) {
return ira->codegen->invalid_instruction;
}
- if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type))
- return nullptr;
+ if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) {
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ if (fn_entry == nullptr || fn_entry->inferred_async_node == nullptr) {
+ return nullptr;
+ }
+ }
ZigType *ptr_return_type = get_pointer_to_type(ira->codegen, ira->explicit_return_type, false);
result_loc->written = true;
@@ -24510,7 +24518,7 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
IrInstruction *result_loc;
if (type_has_bits(result_type)) {
result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
- result_type, nullptr, true, false, true);
+ result_type, nullptr, true, true, true);
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
return result_loc;
} else {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 4cea8d1507..7a8edd793c 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -334,40 +334,40 @@ test "async fn with inferred error set" {
S.doTheTest();
}
-//test "error return trace across suspend points - early return" {
-// const p = nonFailing();
-// resume p;
-// const p2 = async printTrace(p);
-//}
-//
-//test "error return trace across suspend points - async return" {
-// const p = nonFailing();
-// const p2 = async printTrace(p);
-// resume p;
-//}
-//
-//fn nonFailing() (anyframe->anyerror!void) {
-// const Static = struct {
-// var frame: @Frame(suspendThenFail) = undefined;
-// };
-// Static.frame = async suspendThenFail();
-// return &Static.frame;
-//}
-//async fn suspendThenFail() anyerror!void {
-// suspend;
-// return error.Fail;
-//}
-//async fn printTrace(p: anyframe->(anyerror!void)) void {
-// (await p) catch |e| {
-// std.testing.expect(e == error.Fail);
-// if (@errorReturnTrace()) |trace| {
-// expect(trace.index == 1);
-// } else switch (builtin.mode) {
-// .Debug, .ReleaseSafe => @panic("expected return trace"),
-// .ReleaseFast, .ReleaseSmall => {},
-// }
-// };
-//}
+test "error return trace across suspend points - early return" {
+ const p = nonFailing();
+ resume p;
+ const p2 = async printTrace(p);
+}
+
+test "error return trace across suspend points - async return" {
+ const p = nonFailing();
+ const p2 = async printTrace(p);
+ resume p;
+}
+
+fn nonFailing() (anyframe->anyerror!void) {
+ const Static = struct {
+ var frame: @Frame(suspendThenFail) = undefined;
+ };
+ Static.frame = async suspendThenFail();
+ return &Static.frame;
+}
+async fn suspendThenFail() anyerror!void {
+ suspend;
+ return error.Fail;
+}
+async fn printTrace(p: anyframe->(anyerror!void)) void {
+ (await p) catch |e| {
+ std.testing.expect(e == error.Fail);
+ if (@errorReturnTrace()) |trace| {
+ expect(trace.index == 1);
+ } else switch (builtin.mode) {
+ .Debug, .ReleaseSafe => @panic("expected return trace"),
+ .ReleaseFast, .ReleaseSmall => {},
+ }
+ };
+}
test "break from suspend" {
var my_result: i32 = 1;
--
cgit v1.2.3
From 7e1fcb55b3e96524ce6f5620e2e98a3e3cc56608 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 7 Aug 2019 00:52:56 -0400
Subject: implement cancel
all behavior tests passing in this branch
---
BRANCH_TODO | 2 +-
src/all_types.hpp | 3 +-
src/analyze.cpp | 3 +
src/codegen.cpp | 109 ++++++++++++++++---------
src/ir.cpp | 55 +++++++++++--
src/ir_print.cpp | 2 +-
test/stage1/behavior/cancel.zig | 176 +++++++++++++++++++++-------------------
7 files changed, 216 insertions(+), 134 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 294bb42d55..ca3888f391 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,4 @@
- * go over the commented out tests in cancel.zig
+ * clean up the bitcasting of awaiter fn ptr
* compile error for error: expected anyframe->T, found 'anyframe'
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
diff --git a/src/all_types.hpp b/src/all_types.hpp
index e1fff953b4..a7fb542ad3 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1556,6 +1556,7 @@ enum PanicMsgId {
PanicMsgIdBadAwait,
PanicMsgIdBadReturn,
PanicMsgIdResumedAnAwaitingFn,
+ PanicMsgIdResumedACancelingFn,
PanicMsgIdFrameTooSmall,
PanicMsgIdResumedFnPendingAwait,
@@ -3432,7 +3433,7 @@ struct IrInstructionErrorUnion {
struct IrInstructionCancel {
IrInstruction base;
- IrInstruction *target;
+ IrInstruction *frame;
};
struct IrInstructionAtomicRmw {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 764b28ed45..cf71bd90f3 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3811,6 +3811,9 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
} else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("await is a suspend point"));
+ } else if (fn->inferred_async_node->type == NodeTypeCancel) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("cancel is a suspend point"));
} else {
zig_unreachable();
}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 7a27585e45..2a6c5f8b8f 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -911,11 +911,13 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
case PanicMsgIdBadResume:
return buf_create_from_str("resumed an async function which already returned");
case PanicMsgIdBadAwait:
- return buf_create_from_str("async function awaited twice");
+ return buf_create_from_str("async function awaited/canceled twice");
case PanicMsgIdBadReturn:
return buf_create_from_str("async function returned twice");
case PanicMsgIdResumedAnAwaitingFn:
return buf_create_from_str("awaiting function resumed");
+ case PanicMsgIdResumedACancelingFn:
+ return buf_create_from_str("canceling function resumed");
case PanicMsgIdFrameTooSmall:
return buf_create_from_str("frame too small");
case PanicMsgIdResumedFnPendingAwait:
@@ -2189,12 +2191,12 @@ static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, Resume
if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
LLVMValueRef ok_bit;
if (resume_id == ResumeIdAwaitEarlyReturn) {
- LLVMValueRef last_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
- LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false), "");
+ LLVMValueRef last_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, ResumeIdAwaitEarlyReturn, false));
ok_bit = LLVMBuildICmp(g->builder, LLVMIntULT, LLVMGetParam(g->cur_fn_val, 1), last_value, "");
} else {
- LLVMValueRef expected_value = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
- LLVMConstInt(usize_type_ref, resume_id, false), "");
+ LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false));
ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
}
LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
@@ -2210,11 +2212,13 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
{
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
if (fn_val == nullptr) {
- if (g->anyframe_fn_type == nullptr) {
- (void)get_llvm_type(g, get_any_frame_type(g, nullptr));
- }
LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_fn_ptr_index, "");
- fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ LLVMValueRef fn_val_typed = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ LLVMValueRef as_int = LLVMBuildPtrToInt(g->builder, fn_val_typed, usize_type_ref, "");
+ LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
+ LLVMValueRef mask_val = LLVMConstNot(one);
+ LLVMValueRef as_int_masked = LLVMBuildAnd(g->builder, as_int, mask_val, "");
+ fn_val = LLVMBuildIntToPtr(g->builder, as_int_masked, LLVMTypeOf(fn_val_typed), "");
}
if (arg_val == nullptr) {
arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
@@ -2226,6 +2230,17 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
}
+static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ return resume_bb;
+}
+
static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
IrInstructionReturnBegin *instruction)
{
@@ -2245,12 +2260,7 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
}
// Prepare to be suspended. We might end up not having to suspend though.
- LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnResume");
- size_t new_block_index = g->cur_resume_block_count;
- g->cur_resume_block_count += 1;
- LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
- LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
- LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "ReturnResume");
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
@@ -2335,7 +2345,10 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
// We need to resume the caller by tail calling them.
ZigType *any_frame_type = get_any_frame_type(g, ret_type);
- LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, g->cur_async_prev_val,
+ LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
+ LLVMValueRef mask_val = LLVMConstNot(one);
+ LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, g->cur_async_prev_val, mask_val, "");
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val,
get_llvm_type(g, any_frame_type), "");
LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
ZigLLVMSetTailCall(call_inst);
@@ -3945,13 +3958,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
} else if (callee_is_async) {
ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
- LLVMBasicBlockRef call_bb = LLVMAppendBasicBlock(g->cur_fn_val, "CallResume");
- size_t new_block_index = g->cur_resume_block_count;
- g->cur_resume_block_count += 1;
- LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
- LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, call_bb);
-
- LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
ZigLLVMSetTailCall(call_inst);
@@ -4672,10 +4679,6 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
return cur_err_ret_trace_val;
}
-static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
- zig_panic("TODO cancel");
-}
-
static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) {
switch (atomic_order) {
case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered;
@@ -5416,13 +5419,7 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab
static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable,
IrInstructionSuspendBegin *instruction)
{
- LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- instruction->resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "SuspendResume");
- size_t new_block_index = g->cur_resume_block_count;
- g->cur_resume_block_count += 1;
- LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
- LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, instruction->resume_bb);
- LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ instruction->resume_bb = gen_suspend_begin(g, "SuspendResume");
return nullptr;
}
@@ -5436,6 +5433,43 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
+static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
+
+ LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
+ LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume");
+
+ LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
+ LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, "");
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
+
+ LLVMValueRef prev_val = LLVMBuildAtomicRMW(g->builder, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_ored_val,
+ LLVMAtomicOrderingRelease, g->is_single_threaded);
+
+ LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CancelSuspend");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_bb, 2);
+ LLVMAddCase(switch_instr, zero, complete_suspend_block);
+ LLVMAddCase(switch_instr, all_ones, early_return_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val);
+ ZigLLVMSetTailCall(call_inst);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, resume_bb);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedACancelingFn, nullptr);
+
+ return nullptr;
+}
+
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
@@ -5444,12 +5478,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
// Prepare to be suspended
- LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitResume");
- size_t new_block_index = g->cur_resume_block_count;
- g->cur_resume_block_count += 1;
- LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
- LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
- LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
// At this point resuming the function will do the correct thing.
// This code is as if it is running inside the suspend block.
diff --git a/src/ir.cpp b/src/ir.cpp
index 7cb868cab2..853cf4daa1 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3271,6 +3271,16 @@ static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstN
return &instruction->base;
}
+static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
+ IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->frame = frame;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *frame, ResultLoc *result_loc)
{
@@ -7820,11 +7830,26 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeCancel);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
+ ZigFn *fn_entry = exec_fn_entry(irb->exec);
+ if (!fn_entry) {
+ add_node_error(irb->codegen, node, buf_sprintf("cancel outside function definition"));
+ return irb->codegen->invalid_instruction;
+ }
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot cancel inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
+ existing_suspend_scope->reported_err = true;
+ }
+ return irb->codegen->invalid_instruction;
+ }
+
+ IrInstruction *operand = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
+ if (operand == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- zig_panic("TODO ir_gen_cancel");
+ return ir_build_cancel(irb, scope, node, operand);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -23781,10 +23806,6 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct
}
}
-static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- zig_panic("TODO analyze cancel");
-}
-
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) {
ZigType *operand_type = ir_resolve_type(ira, op);
if (type_is_invalid(operand_type))
@@ -24474,6 +24495,26 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
}
+static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
+ IrInstruction *frame = instruction->frame->child;
+ if (type_is_invalid(frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
+}
+
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
IrInstruction *frame_ptr = instruction->frame->child;
if (type_is_invalid(frame_ptr->value.type))
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index c56a660e29..0348cfc986 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1396,7 +1396,7 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct
static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
fprintf(irp->f, "cancel ");
- ir_print_other_instruction(irp, instruction->target);
+ ir_print_other_instruction(irp, instruction->frame);
}
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
index cb8a075279..c8636212b0 100644
--- a/test/stage1/behavior/cancel.zig
+++ b/test/stage1/behavior/cancel.zig
@@ -1,86 +1,94 @@
const std = @import("std");
+const expect = std.testing.expect;
-//var defer_f1: bool = false;
-//var defer_f2: bool = false;
-//var defer_f3: bool = false;
-//
-//test "cancel forwards" {
-// const p = async f1() catch unreachable;
-// cancel p;
-// std.testing.expect(defer_f1);
-// std.testing.expect(defer_f2);
-// std.testing.expect(defer_f3);
-//}
-//
-//async fn f1() void {
-// defer {
-// defer_f1 = true;
-// }
-// await (async f2() catch unreachable);
-//}
-//
-//async fn f2() void {
-// defer {
-// defer_f2 = true;
-// }
-// await (async f3() catch unreachable);
-//}
-//
-//async fn f3() void {
-// defer {
-// defer_f3 = true;
-// }
-// suspend;
-//}
-//
-//var defer_b1: bool = false;
-//var defer_b2: bool = false;
-//var defer_b3: bool = false;
-//var defer_b4: bool = false;
-//
-//test "cancel backwards" {
-// const p = async b1() catch unreachable;
-// cancel p;
-// std.testing.expect(defer_b1);
-// std.testing.expect(defer_b2);
-// std.testing.expect(defer_b3);
-// std.testing.expect(defer_b4);
-//}
-//
-//async fn b1() void {
-// defer {
-// defer_b1 = true;
-// }
-// await (async b2() catch unreachable);
-//}
-//
-//var b4_handle: promise = undefined;
-//
-//async fn b2() void {
-// const b3_handle = async b3() catch unreachable;
-// resume b4_handle;
-// cancel b4_handle;
-// defer {
-// defer_b2 = true;
-// }
-// const value = await b3_handle;
-// @panic("unreachable");
-//}
-//
-//async fn b3() i32 {
-// defer {
-// defer_b3 = true;
-// }
-// await (async b4() catch unreachable);
-// return 1234;
-//}
-//
-//async fn b4() void {
-// defer {
-// defer_b4 = true;
-// }
-// suspend {
-// b4_handle = @handle();
-// }
-// suspend;
-//}
+var defer_f1: bool = false;
+var defer_f2: bool = false;
+var defer_f3: bool = false;
+var f3_frame: anyframe = undefined;
+
+test "cancel forwards" {
+ _ = async atest1();
+ resume f3_frame;
+}
+
+fn atest1() void {
+ const p = async f1();
+ cancel &p;
+ expect(defer_f1);
+ expect(defer_f2);
+ expect(defer_f3);
+}
+
+async fn f1() void {
+ defer {
+ defer_f1 = true;
+ }
+ var f2_frame = async f2();
+ await f2_frame;
+}
+
+async fn f2() void {
+ defer {
+ defer_f2 = true;
+ }
+ f3();
+}
+
+async fn f3() void {
+ f3_frame = @frame();
+ defer {
+ defer_f3 = true;
+ }
+ suspend;
+}
+
+var defer_b1: bool = false;
+var defer_b2: bool = false;
+var defer_b3: bool = false;
+var defer_b4: bool = false;
+
+test "cancel backwards" {
+ _ = async b1();
+ resume b4_handle;
+ expect(defer_b1);
+ expect(defer_b2);
+ expect(defer_b3);
+ expect(defer_b4);
+}
+
+async fn b1() void {
+ defer {
+ defer_b1 = true;
+ }
+ b2();
+}
+
+var b4_handle: anyframe = undefined;
+
+async fn b2() void {
+ const b3_handle = async b3();
+ resume b4_handle;
+ defer {
+ defer_b2 = true;
+ }
+ const value = await b3_handle;
+ expect(value == 1234);
+}
+
+async fn b3() i32 {
+ defer {
+ defer_b3 = true;
+ }
+ b4();
+ return 1234;
+}
+
+async fn b4() void {
+ defer {
+ defer_b4 = true;
+ }
+ suspend {
+ b4_handle = @frame();
+ }
+ suspend;
+}
--
cgit v1.2.3
From e11cafbd4f11fa5eae0cbdf03854291834b4cd77 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 7 Aug 2019 10:56:37 -0400
Subject: cancel works on non-pointers
---
src/ir.cpp | 16 +++++++++++++---
test/stage1/behavior/cancel.zig | 16 ++++++++++++++++
2 files changed, 29 insertions(+), 3 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 853cf4daa1..76e8c91f39 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -7845,7 +7845,7 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node)
return irb->codegen->invalid_instruction;
}
- IrInstruction *operand = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
+ IrInstruction *operand = ir_gen_node_extra(irb, node->data.cancel_expr.expr, scope, LValPtr, nullptr);
if (operand == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -24496,10 +24496,20 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
}
static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *frame = instruction->frame->child;
- if (type_is_invalid(frame->value.type))
+ IrInstruction *frame_ptr = instruction->frame->child;
+ if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
+ {
+ frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ }
+
ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
if (type_is_invalid(casted_frame->value.type))
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
index c8636212b0..b546857ae3 100644
--- a/test/stage1/behavior/cancel.zig
+++ b/test/stage1/behavior/cancel.zig
@@ -92,3 +92,19 @@ async fn b4() void {
}
suspend;
}
+
+test "cancel on a non-pointer" {
+ const S = struct {
+ fn doTheTest() void {
+ _ = async atest();
+ }
+ fn atest() void {
+ var f = async func();
+ cancel f;
+ }
+ fn func() void {
+ suspend;
+ }
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From 8621e3b5bd814005129b58469d93c2499e3d085e Mon Sep 17 00:00:00 2001
From: Sam Tebbs
Date: Wed, 7 Aug 2019 11:51:12 +0100
Subject: Don't emit clang error if source or filename pointer is null
---
src/ir.cpp | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 65a21a418d..2b096a3383 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -21312,12 +21312,15 @@ static IrInstruction *ir_analyze_instruction_c_import(IrAnalyze *ira, IrInstruct
}
for (size_t i = 0; i < errors_len; i += 1) {
Stage2ErrorMsg *clang_err = &errors_ptr[i];
- ErrorMsg *err_msg = err_msg_create_with_offset(
- clang_err->filename_ptr ?
- buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(),
- clang_err->line, clang_err->column, clang_err->offset, clang_err->source,
- buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len));
- err_msg_add_note(parent_err_msg, err_msg);
+ // Clang can emit "too many errors, stopping now", in which case `source` and `filename_ptr` are null
+ if (clang_err->source && clang_err->filename_ptr) {
+ ErrorMsg *err_msg = err_msg_create_with_offset(
+ clang_err->filename_ptr ?
+ buf_create_from_mem(clang_err->filename_ptr, clang_err->filename_len) : buf_alloc(),
+ clang_err->line, clang_err->column, clang_err->offset, clang_err->source,
+ buf_create_from_mem(clang_err->msg_ptr, clang_err->msg_len));
+ err_msg_add_note(parent_err_msg, err_msg);
+ }
}
return ira->codegen->invalid_instruction;
--
cgit v1.2.3
From 34bfdf193aee4cb4fc931c6cc4ee82ef0a3a506f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 8 Aug 2019 11:37:49 -0400
Subject: cancel, defer, errdefer all working as intended now
---
BRANCH_TODO | 3 +-
src/all_types.hpp | 7 ++
src/codegen.cpp | 14 ++++
src/ir.cpp | 157 ++++++++++++++++++++++++++++--------
src/ir_print.cpp | 8 ++
test/stage1/behavior/cancel.zig | 9 ++-
test/stage1/behavior/coroutines.zig | 107 ++++++++++++++++++------
7 files changed, 243 insertions(+), 62 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index bf7fc98310..d7f6b31dd5 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -2,8 +2,7 @@
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
* async call on a non async function
- * cancel
- * defer and errdefer
+ * a test where an async function destroys its own frame in a defer
* implicit cast of normal function to async function should be allowed when it is inferred to be async
* revive std.event.Loop
* @typeInfo for @Frame(func)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index a7fb542ad3..e6daa1c726 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2363,6 +2363,7 @@ enum IrInstructionId {
IrInstructionIdAwaitSrc,
IrInstructionIdAwaitGen,
IrInstructionIdCoroResume,
+ IrInstructionIdTestCancelRequested,
};
struct IrInstruction {
@@ -3636,6 +3637,12 @@ struct IrInstructionCoroResume {
IrInstruction *frame;
};
+struct IrInstructionTestCancelRequested {
+ IrInstruction base;
+
+ bool use_return_begin_prev_value;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 86bd48c894..00458c7665 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5557,6 +5557,18 @@ static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
return gen_frame_size(g, fn_val);
}
+static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *executable,
+ IrInstructionTestCancelRequested *instruction)
+{
+ if (!fn_is_async(g->cur_fn))
+ return LLVMConstInt(LLVMInt1Type(), 0, false);
+ if (instruction->use_return_begin_prev_value) {
+ return LLVMBuildTrunc(g->builder, g->cur_async_prev_val, LLVMInt1Type(), "");
+ } else {
+ zig_panic("TODO");
+ }
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5810,6 +5822,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
case IrInstructionIdAwaitGen:
return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
+ case IrInstructionIdTestCancelRequested:
+ return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction);
}
zig_unreachable();
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 76e8c91f39..b2389d1501 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -26,6 +26,7 @@ struct IrBuilder {
CodeGen *codegen;
IrExecutable *exec;
IrBasicBlock *current_basic_block;
+ AstNode *main_block_node;
};
struct IrAnalyze {
@@ -1061,6 +1062,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
return IrInstructionIdCoroResume;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelRequested *) {
+ return IrInstructionIdTestCancelRequested;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -3320,6 +3325,16 @@ static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ bool use_return_begin_prev_value)
+{
+ IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_bool;
+ instruction->use_return_begin_prev_value = use_return_begin_prev_value;
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -3494,45 +3509,62 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
bool have_err_defers = defer_counts[ReturnKindError] > 0;
- if (have_err_defers || irb->codegen->have_err_ret_tracing) {
- IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
- IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
+ if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
+ // only generate unconditional defers
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
+ result_loc_ret->base.source_instruction = result;
+ return result;
+ }
+ bool should_inline = ir_should_inline(irb->exec, scope);
+ bool need_test_cancel = !should_inline && have_err_defers;
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
+ IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
+ IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, scope, "Defers");
+ IrBasicBlock *ok_block = need_test_cancel ?
+ ir_create_basic_block(irb, scope, "ErrRetOk") : normal_defers_block;
+ IrBasicBlock *all_defers_block = have_err_defers ? ir_create_basic_block(irb, scope, "ErrDefers") : normal_defers_block;
- bool should_inline = ir_should_inline(irb->exec, scope);
- IrInstruction *is_comptime;
- if (should_inline) {
- is_comptime = ir_build_const_bool(irb, scope, node, true);
- } else {
- is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
- }
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
- ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
- IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
+ IrInstruction *force_comptime = ir_build_const_bool(irb, scope, node, should_inline);
+ IrInstruction *err_is_comptime;
+ if (should_inline) {
+ err_is_comptime = force_comptime;
+ } else {
+ err_is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
+ }
- ir_set_cursor_at_end_and_append_block(irb, err_block);
- if (irb->codegen->have_err_ret_tracing && !should_inline) {
- ir_build_save_err_ret_addr(irb, scope, node);
- }
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, err_is_comptime));
+ IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
+ ir_set_cursor_at_end_and_append_block(irb, err_block);
+ if (irb->codegen->have_err_ret_tracing && !should_inline) {
+ ir_build_save_err_ret_addr(irb, scope, node);
+ }
+ ir_build_br(irb, scope, node, all_defers_block, err_is_comptime);
+
+ if (need_test_cancel) {
ir_set_cursor_at_end_and_append_block(irb, ok_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+ IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node, true);
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled,
+ all_defers_block, normal_defers_block, force_comptime));
+ }
- ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
- IrInstruction *result = ir_build_return(irb, scope, node, return_value);
- result_loc_ret->base.source_instruction = result;
- return result;
- } else {
- // generate unconditional defers
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- IrInstruction *result = ir_build_return(irb, scope, node, return_value);
- result_loc_ret->base.source_instruction = result;
- return result;
+ if (all_defers_block != normal_defers_block) {
+ ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
}
+
+ ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ ir_build_br(irb, scope, node, ret_stmt_block, force_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
+ result_loc_ret->base.source_instruction = result;
+ return result;
}
case ReturnKindError:
{
@@ -3765,18 +3797,59 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
incoming_values.append(else_expr_result);
}
- if (block_node->data.block.name != nullptr) {
+ bool is_return_from_fn = block_node == irb->main_block_node;
+ if (!is_return_from_fn) {
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ }
+
+ IrInstruction *result;
+ if (block_node->data.block.name != nullptr) {
ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime));
ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
- return ir_expr_wrap(irb, parent_scope, phi, result_loc);
+ result = ir_expr_wrap(irb, parent_scope, phi, result_loc);
} else {
- ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
IrInstruction *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node));
- return ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
+ result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
}
+ if (!is_return_from_fn)
+ return result;
+
+ // no need for save_err_ret_addr because this cannot return error
+ // but if it is a canceled async function we do need to run the errdefers
+
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result));
+ result = ir_mark_gen(ir_build_return_begin(irb, child_scope, block_node, result));
+
+ size_t defer_counts[2];
+ ir_count_defers(irb, child_scope, outer_block_scope, defer_counts);
+ bool have_err_defers = defer_counts[ReturnKindError] > 0;
+ if (!have_err_defers) {
+ // only generate unconditional defers
+ ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
+ }
+ IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node, true);
+ IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers");
+ IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers");
+ IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt");
+ bool should_inline = ir_should_inline(irb->exec, child_scope);
+ IrInstruction *errdefers_is_comptime = ir_build_const_bool(irb, child_scope, block_node,
+ should_inline || !have_err_defers);
+ ir_mark_gen(ir_build_cond_br(irb, child_scope, block_node, is_canceled,
+ all_defers_block, normal_defers_block, errdefers_is_comptime));
+
+ ir_set_cursor_at_end_and_append_block(irb, all_defers_block);
+ ir_gen_defers_for_block(irb, child_scope, outer_block_scope, true);
+ ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, normal_defers_block);
+ ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ ir_build_br(irb, child_scope, block_node, ret_stmt_block, errdefers_is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
+ return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
@@ -8111,6 +8184,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->codegen = codegen;
irb->exec = ir_executable;
+ irb->main_block_node = node;
IrBasicBlock *entry_block = ir_create_basic_block(irb, scope, "Entry");
ir_set_cursor_at_end_and_append_block(irb, entry_block);
@@ -24603,6 +24677,16 @@ static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstr
return ir_build_coro_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
}
+static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ira,
+ IrInstructionTestCancelRequested *instruction)
+{
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
+ return ir_const_bool(ira, &instruction->base, false);
+ }
+ return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
+ instruction->use_return_begin_prev_value);
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -24900,6 +24984,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
case IrInstructionIdAwaitSrc:
return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
+ case IrInstructionIdTestCancelRequested:
+ return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction);
}
zig_unreachable();
}
@@ -25134,6 +25220,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
+ case IrInstructionIdTestCancelRequested:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 0348cfc986..8b8445f625 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1550,6 +1550,11 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction)
fprintf(irp->f, ")");
}
+static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) {
+ const char *arg = instruction->use_return_begin_prev_value ? "UseReturnBeginPrevValue" : "AdditionalCheck";
+ fprintf(irp->f, "@testCancelRequested(%s)", arg);
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -2032,6 +2037,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAwaitGen:
ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
break;
+ case IrInstructionIdTestCancelRequested:
+ ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
index b546857ae3..5dedb20159 100644
--- a/test/stage1/behavior/cancel.zig
+++ b/test/stage1/behavior/cancel.zig
@@ -48,8 +48,9 @@ var defer_b3: bool = false;
var defer_b4: bool = false;
test "cancel backwards" {
- _ = async b1();
+ var b1_frame = async b1();
resume b4_handle;
+ _ = async awaitAFrame(&b1_frame);
expect(defer_b1);
expect(defer_b2);
expect(defer_b3);
@@ -63,7 +64,7 @@ async fn b1() void {
b2();
}
-var b4_handle: anyframe = undefined;
+var b4_handle: anyframe->void = undefined;
async fn b2() void {
const b3_handle = async b3();
@@ -93,6 +94,10 @@ async fn b4() void {
suspend;
}
+fn awaitAFrame(f: anyframe->void) void {
+ await f;
+}
+
test "cancel on a non-pointer" {
const S = struct {
fn doTheTest() void {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index d11f6831b3..2fd5912aac 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -134,29 +134,44 @@ test "@frameSize" {
}
test "coroutine suspend, resume" {
- seq('a');
- const p = async testAsyncSeq();
- seq('c');
- resume p;
- seq('f');
- // `cancel` is now a suspend point so it cannot be done here
- seq('g');
+ const S = struct {
+ var frame: anyframe = undefined;
- expect(std.mem.eql(u8, points, "abcdefg"));
-}
-async fn testAsyncSeq() void {
- defer seq('e');
+ fn doTheTest() void {
+ _ = async amain();
+ seq('d');
+ resume frame;
+ seq('h');
- seq('b');
- suspend;
- seq('d');
-}
-var points = [_]u8{0} ** "abcdefg".len;
-var index: usize = 0;
+ expect(std.mem.eql(u8, points, "abcdefgh"));
+ }
+
+ fn amain() void {
+ seq('a');
+ var f = async testAsyncSeq();
+ seq('c');
+ cancel f;
+ seq('g');
+ }
+
+ fn testAsyncSeq() void {
+ defer seq('f');
-fn seq(c: u8) void {
- points[index] = c;
- index += 1;
+ seq('b');
+ suspend {
+ frame = @frame();
+ }
+ seq('e');
+ }
+ var points = [_]u8{'x'} ** "abcdefgh".len;
+ var index: usize = 0;
+
+ fn seq(c: u8) void {
+ points[index] = c;
+ index += 1;
+ }
+ };
+ S.doTheTest();
}
test "coroutine suspend with block" {
@@ -267,12 +282,19 @@ test "async fn pointer in a struct field" {
};
var foo = Foo{ .bar = simpleAsyncFn2 };
var bytes: [64]u8 = undefined;
- const p = @asyncCall(&bytes, {}, foo.bar, &data);
- comptime expect(@typeOf(p) == anyframe->void);
+ const f = @asyncCall(&bytes, {}, foo.bar, &data);
+ comptime expect(@typeOf(f) == anyframe->void);
expect(data == 2);
- resume p;
+ resume f;
+ expect(data == 2);
+ _ = async doTheAwait(f);
expect(data == 4);
}
+
+fn doTheAwait(f: anyframe->void) void {
+ await f;
+}
+
async fn simpleAsyncFn2(y: *i32) void {
defer y.* += 2;
y.* += 1;
@@ -507,3 +529,42 @@ test "call async function which has struct return type" {
};
S.doTheTest();
}
+
+test "errdefers in scope get run when canceling async fn call" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var x: u32 = 0;
+
+ fn doTheTest() void {
+ x = 9;
+ _ = async cancelIt();
+ resume frame;
+ expect(x == 6);
+
+ x = 9;
+ _ = async awaitIt();
+ resume frame;
+ expect(x == 11);
+ }
+
+ fn cancelIt() void {
+ var f = async func();
+ cancel f;
+ }
+
+ fn awaitIt() void {
+ var f = async func();
+ await f;
+ }
+
+ fn func() void {
+ defer x += 1;
+ errdefer x /= 2;
+ defer x += 1;
+ suspend {
+ frame = @frame();
+ }
+ }
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From bfa1d12fbad2031402fbafe51c3a0c481fe69351 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 8 Aug 2019 13:44:57 -0400
Subject: better compile errors when frame depends on itself
---
src/analyze.cpp | 11 ++++++++---
src/ir.cpp | 12 +++++++++---
src/ir.hpp | 2 ++
test/compile_errors.zig | 30 ++++++++++++++++++++++++++++++
4 files changed, 49 insertions(+), 6 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/analyze.cpp b/src/analyze.cpp
index aa5c3c88f7..cc90573f41 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5179,11 +5179,14 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
if (fn->anal_state == FnAnalStateInvalid)
return ErrorSemanticAnalyzeFail;
break;
- case FnAnalStateProbing:
- add_node_error(g, fn->proto_node,
+ case FnAnalStateProbing: {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
buf_ptr(&frame_type->name)));
+ ir_add_analysis_trace(fn->ir_executable.analysis, msg,
+ buf_sprintf("depends on its own frame here"));
return ErrorSemanticAnalyzeFail;
+ }
}
ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
@@ -5201,8 +5204,10 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
if (callee->anal_state == FnAnalStateProbing) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name)));
- add_error_note(g, msg, call->base.source_node,
+ ErrorMsg *note = add_error_note(g, msg, call->base.source_node,
buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name)));
+ ir_add_analysis_trace(callee->ir_executable.analysis, note,
+ buf_sprintf("depends on the frame here"));
return ErrorSemanticAnalyzeFail;
}
diff --git a/src/ir.cpp b/src/ir.cpp
index b2389d1501..f92434bb33 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -8217,18 +8217,24 @@ bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) {
return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable);
}
-static void add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
+static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
if (!exec || !exec->source_node || limit < 0) return;
add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));
- add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+ ir_add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+}
+
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text) {
+ IrInstruction *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
+ add_error_note(ira->codegen, err_msg, old_instruction->source_node, text);
+ ir_add_call_stack_errors(ira->codegen, ira->new_irb.exec, err_msg, 10);
}
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg) {
invalidate_exec(exec);
ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
if (exec->parent_exec) {
- add_call_stack_errors(codegen, exec, err_msg, 10);
+ ir_add_call_stack_errors(codegen, exec, err_msg, 10);
}
return err_msg;
}
diff --git a/src/ir.hpp b/src/ir.hpp
index 597624e2e6..3761c5a97d 100644
--- a/src/ir.hpp
+++ b/src/ir.hpp
@@ -28,4 +28,6 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal
AstNode *source_node);
const char *float_op_to_name(BuiltinFnId op, bool llvm_name);
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text);
+
#endif
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 2941cadcf5..810e40b18b 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,36 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "async function indirectly depends on its own frame",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\async fn amain() void {
+ \\ other();
+ \\}
+ \\fn other() void {
+ \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined;
+ \\}
+ ,
+ "tmp.zig:4:1: error: unable to determine async function frame of 'amain'",
+ "tmp.zig:5:10: note: analysis of function 'other' depends on the frame",
+ "tmp.zig:8:13: note: depends on the frame here",
+ );
+
+ cases.add(
+ "async function depends on its own frame",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\async fn amain() void {
+ \\ var x: [@sizeOf(@Frame(amain))]u8 = undefined;
+ \\}
+ ,
+ "tmp.zig:4:1: error: cannot resolve '@Frame(amain)': function not fully analyzed yet",
+ "tmp.zig:5:13: note: depends on its own frame here",
+ );
+
cases.add(
"non async function pointer passed to @asyncCall",
\\export fn entry() void {
--
cgit v1.2.3
From cfe84423c97eb2121138c2de5876c47782cd6dda Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 8 Aug 2019 15:13:05 -0400
Subject: fix segfault with var args
---
src/ir.cpp | 8 ++-
std/event/fs.zig | 120 +++++++++++++++++++--------------------
std/event/loop.zig | 163 +++++++++++++++++++++++++----------------------------
3 files changed, 143 insertions(+), 148 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index f92434bb33..20a21bb5c3 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15746,7 +15746,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
- nullptr, casted_args, call_param_count, casted_new_stack);
+ nullptr, casted_args, impl_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
@@ -15756,7 +15756,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- call_instruction->is_async, casted_new_stack, result_loc,
+ false, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
parent_fn_entry->call_list.append(new_call_instruction);
@@ -15799,7 +15799,9 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
casted_args[next_arg_index] = casted_arg;
next_arg_index += 1;
}
- for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
+ size_t iter_count = (call_param_count < call_instruction->arg_count) ?
+ call_param_count : call_instruction->arg_count;
+ for (size_t call_i = 0; call_i < iter_count; call_i += 1) {
IrInstruction *old_arg = call_instruction->args[call_i]->child;
if (type_is_invalid(old_arg->value.type))
return ira->codegen->invalid_instruction;
diff --git a/std/event/fs.zig b/std/event/fs.zig
index c25426b98a..3ead77e949 100644
--- a/std/event/fs.zig
+++ b/std/event/fs.zig
@@ -83,10 +83,10 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
resume @handle();
}
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec_const, data.len);
defer loop.allocator.free(iovecs);
@@ -100,7 +100,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
return await (async pwritevPosix(loop, fd, iovecs, offset) catch unreachable);
},
- builtin.Os.windows => {
+ .windows => {
const data_copy = try std.mem.dupe(loop.allocator, []const u8, data);
defer loop.allocator.free(data_copy);
return await (async pwritevWindows(loop, fd, data, offset) catch unreachable);
@@ -220,10 +220,10 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
assert(data.len != 0);
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec, data.len);
defer loop.allocator.free(iovecs);
@@ -237,7 +237,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
return await (async preadvPosix(loop, fd, iovecs, offset) catch unreachable);
},
- builtin.Os.windows => {
+ .windows => {
const data_copy = try std.mem.dupe(loop.allocator, []u8, data);
defer loop.allocator.free(data_copy);
return await (async preadvWindows(loop, fd, data_copy, offset) catch unreachable);
@@ -403,12 +403,12 @@ pub async fn openPosix(
pub async fn openRead(loop: *Loop, path: []const u8) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
@@ -431,15 +431,15 @@ pub async fn openWrite(loop: *Loop, path: []const u8) File.OpenError!fd_t {
/// Creates if does not exist. Truncates the file if it exists.
pub async fn openWriteMode(loop: *Loop, path: []const u8, mode: File.Mode) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx,
- builtin.Os.linux,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .macosx,
+ .linux,
+ .freebsd,
+ .netbsd,
=> {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -459,12 +459,12 @@ pub async fn openReadWrite(
mode: File.Mode,
) File.OpenError!fd_t {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, mode) catch unreachable);
},
- builtin.Os.windows => return windows.CreateFile(
+ .windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE | windows.GENERIC_READ,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -489,9 +489,9 @@ pub const CloseOperation = struct {
os_data: OsData,
const OsData = switch (builtin.os) {
- builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => OsDataPosix,
+ .linux, .macosx, .freebsd, .netbsd => OsDataPosix,
- builtin.Os.windows => struct {
+ .windows => struct {
handle: ?fd_t,
},
@@ -508,8 +508,8 @@ pub const CloseOperation = struct {
self.* = CloseOperation{
.loop = loop,
.os_data = switch (builtin.os) {
- builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => initOsDataPosix(self),
- builtin.Os.windows => OsData{ .handle = null },
+ .linux, .macosx, .freebsd, .netbsd => initOsDataPosix(self),
+ .windows => OsData{ .handle = null },
else => @compileError("Unsupported OS"),
},
};
@@ -535,10 +535,10 @@ pub const CloseOperation = struct {
/// Defer this after creating.
pub fn finish(self: *CloseOperation) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
if (self.os_data.have_fd) {
self.loop.posixFsRequest(&self.os_data.close_req_node);
@@ -546,7 +546,7 @@ pub const CloseOperation = struct {
self.loop.allocator.destroy(self);
}
},
- builtin.Os.windows => {
+ .windows => {
if (self.os_data.handle) |handle| {
os.close(handle);
}
@@ -558,15 +558,15 @@ pub const CloseOperation = struct {
pub fn setHandle(self: *CloseOperation, handle: fd_t) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
self.os_data.close_req_node.data.msg.Close.fd = handle;
self.os_data.have_fd = true;
},
- builtin.Os.windows => {
+ .windows => {
self.os_data.handle = handle;
},
else => @compileError("Unsupported OS"),
@@ -576,14 +576,14 @@ pub const CloseOperation = struct {
/// Undo a `setHandle`.
pub fn clearHandle(self: *CloseOperation) void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
self.os_data.have_fd = false;
},
- builtin.Os.windows => {
+ .windows => {
self.os_data.handle = null;
},
else => @compileError("Unsupported OS"),
@@ -592,15 +592,15 @@ pub const CloseOperation = struct {
pub fn getHandle(self: *CloseOperation) fd_t {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> {
assert(self.os_data.have_fd);
return self.os_data.close_req_node.data.msg.Close.fd;
},
- builtin.Os.windows => {
+ .windows => {
return self.os_data.handle.?;
},
else => @compileError("Unsupported OS"),
@@ -617,12 +617,12 @@ pub async fn writeFile(loop: *Loop, path: []const u8, contents: []const u8) !voi
/// contents must remain alive until writeFile completes.
pub async fn writeFileMode(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void {
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
=> return await (async writeFileModeThread(loop, path, contents, mode) catch unreachable),
- builtin.Os.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
+ .windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
else => @compileError("Unsupported OS"),
}
}
@@ -728,7 +728,7 @@ pub fn Watch(comptime V: type) type {
os_data: OsData,
const OsData = switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => struct {
+ .macosx, .freebsd, .netbsd => struct {
file_table: FileTable,
table_lock: event.Lock,
@@ -739,8 +739,8 @@ pub fn Watch(comptime V: type) type {
};
},
- builtin.Os.linux => LinuxOsData,
- builtin.Os.windows => WindowsOsData,
+ .linux => LinuxOsData,
+ .windows => WindowsOsData,
else => @compileError("Unsupported OS"),
};
@@ -793,7 +793,7 @@ pub fn Watch(comptime V: type) type {
errdefer channel.destroy();
switch (builtin.os) {
- builtin.Os.linux => {
+ .linux => {
const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
errdefer os.close(inotify_fd);
@@ -802,7 +802,7 @@ pub fn Watch(comptime V: type) type {
return result;
},
- builtin.Os.windows => {
+ .windows => {
const self = try loop.allocator.create(Self);
errdefer loop.allocator.destroy(self);
self.* = Self{
@@ -817,7 +817,7 @@ pub fn Watch(comptime V: type) type {
return self;
},
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .freebsd, .netbsd => {
const self = try loop.allocator.create(Self);
errdefer loop.allocator.destroy(self);
@@ -837,7 +837,7 @@ pub fn Watch(comptime V: type) type {
/// All addFile calls and removeFile calls must have completed.
pub fn destroy(self: *Self) void {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .freebsd, .netbsd => {
// TODO we need to cancel the coroutines before destroying the lock
self.os_data.table_lock.deinit();
var it = self.os_data.file_table.iterator();
@@ -847,8 +847,8 @@ pub fn Watch(comptime V: type) type {
}
self.channel.destroy();
},
- builtin.Os.linux => cancel self.os_data.putter,
- builtin.Os.windows => {
+ .linux => cancel self.os_data.putter,
+ .windows => {
while (self.os_data.all_putters.get()) |putter_node| {
cancel putter_node.data;
}
@@ -879,9 +879,9 @@ pub fn Watch(comptime V: type) type {
pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
- builtin.Os.linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
- builtin.Os.windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
+ .macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
+ .linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
+ .windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
else => @compileError("Unsupported OS"),
}
}
diff --git a/std/event/loop.zig b/std/event/loop.zig
index aacd4bd7aa..70cd8d2ab6 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -13,7 +13,7 @@ const Thread = std.Thread;
pub const Loop = struct {
allocator: *mem.Allocator,
- next_tick_queue: std.atomic.Queue(promise),
+ next_tick_queue: std.atomic.Queue(anyframe),
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
@@ -24,11 +24,11 @@ pub const Loop = struct {
available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
- pub const NextTickNode = std.atomic.Queue(promise).Node;
+ pub const NextTickNode = std.atomic.Queue(anyframe).Node;
pub const ResumeNode = struct {
id: Id,
- handle: promise,
+ handle: anyframe,
overlapped: Overlapped,
pub const overlapped_init = switch (builtin.os) {
@@ -110,7 +110,7 @@ pub const Loop = struct {
.pending_event_count = 1,
.allocator = allocator,
.os_data = undefined,
- .next_tick_queue = std.atomic.Queue(promise).init(),
+ .next_tick_queue = std.atomic.Queue(anyframe).init(),
.extra_threads = undefined,
.available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
.eventfd_resume_nodes = undefined,
@@ -148,18 +148,18 @@ pub const Loop = struct {
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) {
.linux => {
- self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
- self.os_data.fs_queue_item = 0;
- // we need another thread for the file system because Linux does not have an async
- // file system I/O API.
- self.os_data.fs_end_request = fs.RequestNode{
- .prev = undefined,
- .next = undefined,
- .data = fs.Request{
- .msg = fs.Request.Msg.End,
- .finish = fs.Request.Finish.NoAction,
- },
- };
+ // TODO self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
+ // TODO self.os_data.fs_queue_item = 0;
+ // TODO // we need another thread for the file system because Linux does not have an async
+ // TODO // file system I/O API.
+ // TODO self.os_data.fs_end_request = fs.RequestNode{
+ // TODO .prev = undefined,
+ // TODO .next = undefined,
+ // TODO .data = fs.Request{
+ // TODO .msg = fs.Request.Msg.End,
+ // TODO .finish = fs.Request.Finish.NoAction,
+ // TODO },
+ // TODO };
errdefer {
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
@@ -197,10 +197,10 @@ pub const Loop = struct {
&self.os_data.final_eventfd_event,
);
- self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
+ // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
errdefer {
- self.posixFsRequest(&self.os_data.fs_end_request);
- self.os_data.fs_thread.wait();
+ // TODO self.posixFsRequest(&self.os_data.fs_end_request);
+ // TODO self.os_data.fs_thread.wait();
}
if (builtin.single_threaded) {
@@ -302,10 +302,10 @@ pub const Loop = struct {
.udata = undefined,
};
- self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
+ // TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
errdefer {
- self.posixFsRequest(&self.os_data.fs_end_request);
- self.os_data.fs_thread.wait();
+ // TODO self.posixFsRequest(&self.os_data.fs_end_request);
+ // TODO self.os_data.fs_thread.wait();
}
if (builtin.single_threaded) {
@@ -397,7 +397,7 @@ pub const Loop = struct {
}
}
- /// resume_node must live longer than the promise that it holds a reference to.
+ /// resume_node must live longer than the anyframe that it holds a reference to.
/// flags must contain EPOLLET
pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
assert(flags & os.EPOLLET == os.EPOLLET);
@@ -460,7 +460,7 @@ pub const Loop = struct {
return resume_node.kev;
}
- /// resume_node must live longer than the promise that it holds a reference to.
+ /// resume_node must live longer than the anyframe that it holds a reference to.
pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
self.beginOneEvent();
errdefer self.finishOneEvent();
@@ -561,11 +561,11 @@ pub const Loop = struct {
self.workerRun();
switch (builtin.os) {
- builtin.Os.linux,
- builtin.Os.macosx,
- builtin.Os.freebsd,
- builtin.Os.netbsd,
- => self.os_data.fs_thread.wait(),
+ .linux,
+ .macosx,
+ .freebsd,
+ .netbsd,
+ => {}, // TODO self.os_data.fs_thread.wait(),
else => {},
}
@@ -574,45 +574,39 @@ pub const Loop = struct {
}
}
- /// This is equivalent to an async call, except instead of beginning execution of the async function,
- /// it immediately returns to the caller, and the async function is queued in the event loop. It still
- /// returns a promise to be awaited.
- pub fn call(self: *Loop, comptime func: var, args: ...) !(promise->@typeOf(func).ReturnType) {
- const S = struct {
- async fn asyncFunc(loop: *Loop, handle: *promise->@typeOf(func).ReturnType, args2: ...) @typeOf(func).ReturnType {
- suspend {
- handle.* = @handle();
- var my_tick_node = Loop.NextTickNode{
- .prev = undefined,
- .next = undefined,
- .data = @handle(),
- };
- loop.onNextTick(&my_tick_node);
- }
- // TODO guaranteed allocation elision for await in same func as async
- return await (async func(args2) catch unreachable);
- }
- };
- var handle: promise->@typeOf(func).ReturnType = undefined;
- return async S.asyncFunc(self, &handle, args);
+ /// This is equivalent to function call, except it calls `startCpuBoundOperation` first.
+ pub fn call(comptime func: var, args: ...) @typeOf(func).ReturnType {
+ startCpuBoundOperation();
+ return func(args);
}
- /// Awaiting a yield lets the event loop run, starting any unstarted async operations.
+ /// Yielding lets the event loop run, starting any unstarted async operations.
/// Note that async operations automatically start when a function yields for any other reason,
/// for example, when async I/O is performed. This function is intended to be used only when
/// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
/// is performed.
- pub async fn yield(self: *Loop) void {
+ pub fn yield(self: *Loop) void {
suspend {
- var my_tick_node = Loop.NextTickNode{
+ var my_tick_node = NextTickNode{
.prev = undefined,
.next = undefined,
- .data = @handle(),
+ .data = @frame(),
};
self.onNextTick(&my_tick_node);
}
}
+ /// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise,
+ /// does nothing.
+ pub fn startCpuBoundOperation() void {
+ if (builtin.is_single_threaded) {
+ return;
+ } else if (instance) |event_loop| {
+ event_loop.yield();
+ }
+ }
+
+
/// call finishOneEvent when done
pub fn beginOneEvent(self: *Loop) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@@ -624,7 +618,7 @@ pub const Loop = struct {
// cause all the threads to stop
switch (builtin.os) {
.linux => {
- self.posixFsRequest(&self.os_data.fs_end_request);
+ // TODO self.posixFsRequest(&self.os_data.fs_end_request);
// writing 8 bytes to an eventfd cannot fail
os.write(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
return;
@@ -672,9 +666,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Basic => {},
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
event_fd_node.epoll_op = os.EPOLL_CTL_MOD;
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
@@ -696,12 +690,12 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {
+ .Basic => {
const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
basic_node.kev = ev;
},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@@ -730,9 +724,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {},
- ResumeNode.Id.Stop => return,
- ResumeNode.Id.EventFd => {
+ .Basic => {},
+ .Stop => return,
+ .EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@@ -750,12 +744,12 @@ pub const Loop = struct {
self.beginOneEvent(); // finished in posixFsRun after processing the msg
self.os_data.fs_queue.put(request_node);
switch (builtin.os) {
- builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
+ .macosx, .freebsd, .netbsd => {
const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
},
- builtin.Os.linux => {
+ .linux => {
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
@@ -781,18 +775,18 @@ pub const Loop = struct {
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
- @TagType(fs.Request.Msg).End => return,
- @TagType(fs.Request.Msg).PWriteV => |*msg| {
+ .End => return,
+ .PWriteV => |*msg| {
msg.result = os.pwritev(msg.fd, msg.iov, msg.offset);
},
- @TagType(fs.Request.Msg).PReadV => |*msg| {
+ .PReadV => |*msg| {
msg.result = os.preadv(msg.fd, msg.iov, msg.offset);
},
- @TagType(fs.Request.Msg).Open => |*msg| {
+ .Open => |*msg| {
msg.result = os.openC(msg.path.ptr, msg.flags, msg.mode);
},
- @TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
- @TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
+ .Close => |*msg| os.close(msg.fd),
+ .WriteFile => |*msg| blk: {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
os.O_CLOEXEC | os.O_TRUNC;
const fd = os.openC(msg.path.ptr, flags, msg.mode) catch |err| {
@@ -804,11 +798,11 @@ pub const Loop = struct {
},
}
switch (node.data.finish) {
- @TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
- @TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
+ .TickNode => |*tick_node| self.onNextTick(tick_node),
+ .DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
- @TagType(fs.Request.Finish).NoAction => {},
+ .NoAction => {},
}
self.finishOneEvent();
}
@@ -855,16 +849,16 @@ pub const Loop = struct {
epollfd: i32,
final_eventfd: i32,
final_eventfd_event: os.linux.epoll_event,
- fs_thread: *Thread,
- fs_queue_item: i32,
- fs_queue: std.atomic.Queue(fs.Request),
- fs_end_request: fs.RequestNode,
+ // TODO fs_thread: *Thread,
+ // TODO fs_queue_item: i32,
+ // TODO fs_queue: std.atomic.Queue(fs.Request),
+ // TODO fs_end_request: fs.RequestNode,
};
};
test "std.event.Loop - basic" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -877,7 +871,7 @@ test "std.event.Loop - basic" {
test "std.event.Loop - call" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -886,9 +880,8 @@ test "std.event.Loop - call" {
defer loop.deinit();
var did_it = false;
- const handle = try loop.call(testEventLoop);
- const handle2 = try loop.call(testEventLoop2, handle, &did_it);
- defer cancel handle2;
+ const handle = async Loop.call(testEventLoop);
+ const handle2 = async Loop.call(testEventLoop2, handle, &did_it);
loop.run();
@@ -899,7 +892,7 @@ async fn testEventLoop() i32 {
return 1234;
}
-async fn testEventLoop2(h: promise->i32, did_it: *bool) void {
+async fn testEventLoop2(h: anyframe->i32, did_it: *bool) void {
const value = await h;
testing.expect(value == 1234);
did_it.* = true;
--
cgit v1.2.3
From 93840f8610974109d129e6940a851c1f7a8c9fce Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 8 Aug 2019 15:34:41 -0400
Subject: fix var args call on non-generic function
---
src/ir.cpp | 64 +++++++++++++++++++++++++++++++++++++++++-------------
std/event/loop.zig | 17 +++++++++++++--
2 files changed, 64 insertions(+), 17 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 20a21bb5c3..5fc31db3ef 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15799,26 +15799,60 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
casted_args[next_arg_index] = casted_arg;
next_arg_index += 1;
}
- size_t iter_count = (call_param_count < call_instruction->arg_count) ?
- call_param_count : call_instruction->arg_count;
- for (size_t call_i = 0; call_i < iter_count; call_i += 1) {
+ for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
IrInstruction *old_arg = call_instruction->args[call_i]->child;
if (type_is_invalid(old_arg->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *casted_arg;
- if (next_arg_index < src_param_count) {
- ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
- if (type_is_invalid(param_type))
- return ira->codegen->invalid_instruction;
- casted_arg = ir_implicit_cast(ira, old_arg, param_type);
- if (type_is_invalid(casted_arg->value.type))
- return ira->codegen->invalid_instruction;
+
+ if (old_arg->value.type->id == ZigTypeIdArgTuple) {
+ for (size_t arg_tuple_i = old_arg->value.data.x_arg_tuple.start_index;
+ arg_tuple_i < old_arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1)
+ {
+ ZigVar *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i);
+ if (arg_var == nullptr) {
+ ir_add_error(ira, old_arg,
+ buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, old_arg, arg_var);
+ if (type_is_invalid(arg_var_ptr_inst->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *arg_tuple_arg = ir_get_deref(ira, old_arg, arg_var_ptr_inst, nullptr);
+ if (type_is_invalid(arg_tuple_arg->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, arg_tuple_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = arg_tuple_arg;
+ }
+
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
} else {
- casted_arg = old_arg;
- }
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, old_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = old_arg;
+ }
- casted_args[next_arg_index] = casted_arg;
- next_arg_index += 1;
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
}
assert(next_arg_index == call_param_count);
diff --git a/std/event/loop.zig b/std/event/loop.zig
index 70cd8d2ab6..f0ae67a3d1 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -1,5 +1,6 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
+const root = @import("root");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
@@ -85,6 +86,18 @@ pub const Loop = struct {
};
};
+ pub const IoMode = enum {
+ blocking,
+ evented,
+ };
+ pub const io_mode: IoMode = if (@hasDecl(root, "io_mode")) root.io_mode else IoMode.blocking;
+ var global_instance_state: Loop = undefined;
+ const default_instance: ?*Loop = switch (io_mode) {
+ .blocking => null,
+ .evented => &global_instance_state,
+ };
+ pub const instance: ?*Loop = if (@hasDecl(root, "event_loop")) root.event_loop else default_instance;
+
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
@@ -599,7 +612,7 @@ pub const Loop = struct {
/// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise,
/// does nothing.
pub fn startCpuBoundOperation() void {
- if (builtin.is_single_threaded) {
+ if (builtin.single_threaded) {
return;
} else if (instance) |event_loop| {
event_loop.yield();
@@ -881,7 +894,7 @@ test "std.event.Loop - call" {
var did_it = false;
const handle = async Loop.call(testEventLoop);
- const handle2 = async Loop.call(testEventLoop2, handle, &did_it);
+ const handle2 = async Loop.call(testEventLoop2, &handle, &did_it);
loop.run();
--
cgit v1.2.3
From 2e7f53f1f0d8339b8dc90ad7e0bc9963f1ec471c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 9 Aug 2019 17:34:06 -0400
Subject: fix cancel inside an errdefer
---
src/all_types.hpp | 7 ++++---
src/analyze.cpp | 4 ++++
src/codegen.cpp | 23 ++++++++++++++++++++---
src/ir.cpp | 12 ++++--------
src/ir_print.cpp | 3 +--
test/stage1/behavior/coroutines.zig | 26 +++++++++++++++++++++++++-
6 files changed, 58 insertions(+), 17 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index cf41444f0b..0b03388502 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1725,6 +1725,7 @@ struct CodeGen {
LLVMValueRef cur_async_resume_index_ptr;
LLVMValueRef cur_async_awaiter_ptr;
LLVMValueRef cur_async_prev_val;
+ LLVMValueRef cur_async_prev_val_field_ptr;
LLVMBasicBlockRef cur_preamble_llvm_block;
size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
@@ -1886,6 +1887,7 @@ struct CodeGen {
bool system_linker_hack;
bool reported_bad_link_libc_error;
bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl.
+ bool cur_is_after_return;
//////////////////////////// Participates in Input Parameter Cache Hash
/////// Note: there is a separate cache hash for builtin.zig, when adding fields,
@@ -3639,8 +3641,6 @@ struct IrInstructionCoroResume {
struct IrInstructionTestCancelRequested {
IrInstruction base;
-
- bool use_return_begin_prev_value;
};
enum ResultLocId {
@@ -3730,7 +3730,8 @@ static const size_t err_union_payload_index = 1;
static const size_t coro_fn_ptr_index = 0;
static const size_t coro_resume_index = 1;
static const size_t coro_awaiter_index = 2;
-static const size_t coro_ret_start = 3;
+static const size_t coro_prev_val_index = 3;
+static const size_t coro_ret_start = 4;
// TODO call graph analysis to find out what this number needs to be for every function
// MUST BE A POWER OF TWO.
diff --git a/src/analyze.cpp b/src/analyze.cpp
index cc90573f41..a09ba582c9 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5246,6 +5246,9 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
field_names.append("@awaiter");
field_types.append(g->builtin_types.entry_usize);
+ field_names.append("@prev_val");
+ field_types.append(g->builtin_types.entry_usize);
+
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
field_names.append("@result_ptr_callee");
@@ -7592,6 +7595,7 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
field_types.append(ptr_fn_llvm_type); // fn_ptr
field_types.append(usize_type_ref); // resume_index
field_types.append(usize_type_ref); // awaiter
+ field_types.append(usize_type_ref); // prev_val
bool have_result_type = result_type != nullptr && type_has_bits(result_type);
if (have_result_type) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 46cd8e9fcf..5a8fd3e9ca 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -2226,7 +2226,18 @@ static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef tar
return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
}
+static LLVMValueRef get_cur_async_prev_val(CodeGen *g) {
+ if (g->cur_async_prev_val != nullptr) {
+ return g->cur_async_prev_val;
+ }
+ g->cur_async_prev_val = LLVMBuildLoad(g->builder, g->cur_async_prev_val_field_ptr, "");
+ return g->cur_async_prev_val;
+}
+
static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
+ // This becomes invalid when a suspend happens.
+ g->cur_async_prev_val = nullptr;
+
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
size_t new_block_index = g->cur_resume_block_count;
@@ -2319,6 +2330,9 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
LLVMBasicBlockRef incoming_blocks[] = { after_resume_block, switch_bb };
LLVMAddIncoming(g->cur_async_prev_val, incoming_values, incoming_blocks, 2);
+ g->cur_is_after_return = true;
+ LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr);
+
if (!ret_type_has_bits) {
return nullptr;
}
@@ -2366,7 +2380,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
ZigType *any_frame_type = get_any_frame_type(g, ret_type);
LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
LLVMValueRef mask_val = LLVMConstNot(one);
- LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, g->cur_async_prev_val, mask_val, "");
+ LLVMValueRef masked_prev_val = LLVMBuildAnd(g->builder, get_cur_async_prev_val(g), mask_val, "");
LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val,
get_llvm_type(g, any_frame_type), "");
LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
@@ -5590,8 +5604,8 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex
{
if (!fn_is_async(g->cur_fn))
return LLVMConstInt(LLVMInt1Type(), 0, false);
- if (instruction->use_return_begin_prev_value) {
- return LLVMBuildTrunc(g->builder, g->cur_async_prev_val, LLVMInt1Type(), "");
+ if (g->cur_is_after_return) {
+ return LLVMBuildTrunc(g->builder, get_cur_async_prev_val(g), LLVMInt1Type(), "");
} else {
zig_panic("TODO");
}
@@ -7063,6 +7077,7 @@ static void do_code_gen(CodeGen *g) {
}
if (is_async) {
+ g->cur_is_after_return = false;
g->cur_resume_block_count = 0;
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
@@ -7099,6 +7114,8 @@ static void do_code_gen(CodeGen *g) {
g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
}
+ g->cur_async_prev_val_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ coro_prev_val_index, "");
LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
diff --git a/src/ir.cpp b/src/ir.cpp
index 5fc31db3ef..4dcfaa6cce 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3325,12 +3325,9 @@ static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node,
- bool use_return_begin_prev_value)
-{
+static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionTestCancelRequested *instruction = ir_build_instruction(irb, scope, source_node);
instruction->base.value.type = irb->codegen->builtin_types.entry_bool;
- instruction->use_return_begin_prev_value = use_return_begin_prev_value;
return &instruction->base;
}
@@ -3546,7 +3543,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
if (need_test_cancel) {
ir_set_cursor_at_end_and_append_block(irb, ok_block);
- IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node, true);
+ IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, scope, node);
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_canceled,
all_defers_block, normal_defers_block, force_comptime));
}
@@ -3830,7 +3827,7 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
- IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node, true);
+ IrInstruction *is_canceled = ir_build_test_cancel_requested(irb, child_scope, block_node);
IrBasicBlock *all_defers_block = ir_create_basic_block(irb, child_scope, "ErrDefers");
IrBasicBlock *normal_defers_block = ir_create_basic_block(irb, child_scope, "Defers");
IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, child_scope, "RetStmt");
@@ -24725,8 +24722,7 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir
if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
return ir_const_bool(ira, &instruction->base, false);
}
- return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- instruction->use_return_begin_prev_value);
+ return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
}
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 8b8445f625..8c90eb02f3 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1551,8 +1551,7 @@ static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction)
}
static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancelRequested *instruction) {
- const char *arg = instruction->use_return_begin_prev_value ? "UseReturnBeginPrevValue" : "AdditionalCheck";
- fprintf(irp->f, "@testCancelRequested(%s)", arg);
+ fprintf(irp->f, "@testCancelRequested()");
}
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index 57706c2455..c2b95e8559 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -318,7 +318,7 @@ test "@asyncCall with return type" {
}
};
var foo = Foo{ .bar = Foo.middle };
- var bytes: [100]u8 = undefined;
+ var bytes: [150]u8 = undefined;
var aresult: i32 = 0;
_ = @asyncCall(&bytes, &aresult, foo.bar);
expect(aresult == 0);
@@ -589,3 +589,27 @@ test "pass string literal to async function" {
};
S.doTheTest();
}
+
+test "cancel inside an errdefer" {
+ const S = struct {
+ var frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ _ = async amainWrap();
+ resume frame;
+ }
+
+ fn amainWrap() !void {
+ var foo = async func();
+ errdefer cancel foo;
+ return error.Bad;
+ }
+
+ fn func() void {
+ frame = @frame();
+ suspend;
+ }
+
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From b9d1d45dfd0f704bc762732c23aa2844f1d14e8d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 9 Aug 2019 21:49:40 -0400
Subject: fix combining try with errdefer cancel
---
src/all_types.hpp | 13 +++++++++++
src/codegen.cpp | 45 +++++++++++++++++++++++++++++--------
src/ir.cpp | 33 +++++++++++++++++++++++++++
src/ir_print.cpp | 9 ++++++++
test/stage1/behavior/coroutines.zig | 29 ++++++++++++++++++++++++
5 files changed, 120 insertions(+), 9 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 0b03388502..45182f3db3 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2366,6 +2366,7 @@ enum IrInstructionId {
IrInstructionIdAwaitGen,
IrInstructionIdCoroResume,
IrInstructionIdTestCancelRequested,
+ IrInstructionIdSpill,
};
struct IrInstruction {
@@ -3643,6 +3644,18 @@ struct IrInstructionTestCancelRequested {
IrInstruction base;
};
+enum SpillId {
+ SpillIdInvalid,
+ SpillIdRetErrCode,
+};
+
+struct IrInstructionSpill {
+ IrInstruction base;
+
+ SpillId spill_id;
+ IrInstruction *operand;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 5a8fd3e9ca..976ee4181e 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5113,17 +5113,9 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildICmp(g->builder, LLVMIntNE, err_val, zero, "");
}
-static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable,
- IrInstructionUnwrapErrCode *instruction)
-{
- if (instruction->base.value.special != ConstValSpecialRuntime)
- return nullptr;
-
- ZigType *ptr_type = instruction->err_union_ptr->value.type;
- assert(ptr_type->id == ZigTypeIdPointer);
+static LLVMValueRef gen_unwrap_err_code(CodeGen *g, LLVMValueRef err_union_ptr, ZigType *ptr_type) {
ZigType *err_union_type = ptr_type->data.pointer.child_type;
ZigType *payload_type = err_union_type->data.error_union.payload_type;
- LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr);
if (!type_has_bits(payload_type)) {
return err_union_ptr;
} else {
@@ -5133,6 +5125,18 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab
}
}
+static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable,
+ IrInstructionUnwrapErrCode *instruction)
+{
+ if (instruction->base.value.special != ConstValSpecialRuntime)
+ return nullptr;
+
+ ZigType *ptr_type = instruction->err_union_ptr->value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr);
+ return gen_unwrap_err_code(g, err_union_ptr, ptr_type);
+}
+
static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *executable,
IrInstructionUnwrapErrPayload *instruction)
{
@@ -5611,6 +5615,27 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex
}
}
+static LLVMValueRef ir_render_spill(CodeGen *g, IrExecutable *executable, IrInstructionSpill *instruction) {
+ if (!fn_is_async(g->cur_fn))
+ return ir_llvm_value(g, instruction->operand);
+
+ switch (instruction->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr, "");
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ if (ret_type->id == ZigTypeIdErrorUnion) {
+ return gen_unwrap_err_code(g, ret_ptr, get_pointer_to_type(g, ret_type, true));
+ } else {
+ zig_unreachable();
+ }
+ }
+
+ }
+ zig_unreachable();
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5866,6 +5891,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction);
+ case IrInstructionIdSpill:
+ return ir_render_spill(g, executable, (IrInstructionSpill *)instruction);
}
zig_unreachable();
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 4dcfaa6cce..845ee03757 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1066,6 +1066,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelReques
return IrInstructionIdTestCancelRequested;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpill *) {
+ return IrInstructionIdSpill;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -3332,6 +3336,18 @@ static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scop
return &instruction->base;
}
+static IrInstruction *ir_build_spill(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand, SpillId spill_id)
+{
+ IrInstructionSpill *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->operand = operand;
+ instruction->spill_id = spill_id;
+
+ ir_ref_instruction(operand, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -3591,6 +3607,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ResultLocReturn *result_loc_ret = allocate(1);
result_loc_ret->base.id = ResultLocIdReturn;
ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
+ err_val = ir_build_spill(irb, scope, node, err_val, SpillIdRetErrCode);
ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
if (irb->codegen->have_err_ret_tracing && !should_inline) {
@@ -24725,6 +24742,19 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir
return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
}
+static IrInstruction *ir_analyze_instruction_spill(IrAnalyze *ira, IrInstructionSpill *instruction) {
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
+ return operand;
+ }
+ IrInstruction *result = ir_build_spill(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
+ operand, instruction->spill_id);
+ result->value.type = operand->value.type;
+ return result;
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -25024,6 +25054,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction);
+ case IrInstructionIdSpill:
+ return ir_analyze_instruction_spill(ira, (IrInstructionSpill *)instruction);
}
zig_unreachable();
}
@@ -25259,6 +25291,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
case IrInstructionIdTestCancelRequested:
+ case IrInstructionIdSpill:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 8c90eb02f3..39e781e4f0 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1554,6 +1554,12 @@ static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancel
fprintf(irp->f, "@testCancelRequested()");
}
+static void ir_print_spill(IrPrint *irp, IrInstructionSpill *instruction) {
+ fprintf(irp->f, "@spill(");
+ ir_print_other_instruction(irp, instruction->operand);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -2039,6 +2045,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTestCancelRequested:
ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction);
break;
+ case IrInstructionIdSpill:
+ ir_print_spill(irp, (IrInstructionSpill *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index c2b95e8559..c92cca9573 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -613,3 +613,32 @@ test "cancel inside an errdefer" {
};
S.doTheTest();
}
+
+test "combining try with errdefer cancel" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok = false;
+
+ fn doTheTest() void {
+ _ = async amain();
+ resume frame;
+ expect(ok);
+ }
+
+ fn amain() !void {
+ var f = async func("https://example.com/");
+ errdefer cancel f;
+
+ _ = try await f;
+ }
+
+ fn func(url: []const u8) ![]u8 {
+ errdefer ok = true;
+ frame = @frame();
+ suspend;
+ return error.Bad;
+ }
+
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From 22428a75462e01877181501801dce4c090a87e9c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 10 Aug 2019 15:20:08 -0400
Subject: fix try in an async function with error union and non-zero-bit
payload
---
src/all_types.hpp | 13 ++++-
src/analyze.cpp | 12 +++++
src/codegen.cpp | 75 ++++++++++++++++------------
src/ir.cpp | 97 +++++++++++++++++++++++++++++--------
src/ir_print.cpp | 17 +++++--
test/stage1/behavior/coroutines.zig | 30 ++++++++++++
6 files changed, 187 insertions(+), 57 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 45182f3db3..8b4d1e6d70 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -74,6 +74,7 @@ struct IrExecutable {
bool invalid;
bool is_inline;
bool is_generic_instantiation;
+ bool need_err_code_spill;
};
enum OutType {
@@ -1384,6 +1385,7 @@ struct ZigFn {
size_t prealloc_backward_branch_quota;
AstNode **param_source_nodes;
Buf **param_names;
+ IrInstruction *err_code_spill;
AstNode *fn_no_inline_set_node;
AstNode *fn_static_eval_set_node;
@@ -2366,7 +2368,8 @@ enum IrInstructionId {
IrInstructionIdAwaitGen,
IrInstructionIdCoroResume,
IrInstructionIdTestCancelRequested,
- IrInstructionIdSpill,
+ IrInstructionIdSpillBegin,
+ IrInstructionIdSpillEnd,
};
struct IrInstruction {
@@ -3649,13 +3652,19 @@ enum SpillId {
SpillIdRetErrCode,
};
-struct IrInstructionSpill {
+struct IrInstructionSpillBegin {
IrInstruction base;
SpillId spill_id;
IrInstruction *operand;
};
+struct IrInstructionSpillEnd {
+ IrInstruction base;
+
+ IrInstructionSpillBegin *begin;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
diff --git a/src/analyze.cpp b/src/analyze.cpp
index a09ba582c9..7482ba92ba 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5190,6 +5190,18 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
}
ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
+ if (fn->analyzed_executable.need_err_code_spill) {
+ IrInstructionAllocaGen *alloca_gen = allocate(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = fn->proto_node;
+ alloca_gen->base.scope = fn->child_scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ fn->err_code_spill = &alloca_gen->base;
+ }
+
for (size_t i = 0; i < fn->call_list.length; i += 1) {
IrInstructionCallGen *call = fn->call_list.at(i);
ZigFn *callee = call->fn_entry;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 976ee4181e..2f07fcd710 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -2274,16 +2274,16 @@ static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMV
static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
IrInstructionReturnBegin *instruction)
{
- bool ret_type_has_bits = instruction->operand != nullptr &&
- type_has_bits(instruction->operand->value.type);
-
+ ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr;
+ bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type);
if (!fn_is_async(g->cur_fn)) {
- return ret_type_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr;
+ return operand_has_bits ? ir_llvm_value(g, instruction->operand) : nullptr;
}
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ bool ret_type_has_bits = type_has_bits(ret_type);
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
- ZigType *ret_type = ret_type_has_bits ? instruction->operand->value.type : nullptr;
if (ret_type_has_bits && !handle_is_ptr(ret_type)) {
// It's a scalar, so it didn't get written to the result ptr. Do that before the atomic rmw.
LLVMBuildStore(g->builder, ir_llvm_value(g, instruction->operand), g->cur_ret_ptr);
@@ -2333,11 +2333,11 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
g->cur_is_after_return = true;
LLVMBuildStore(g->builder, g->cur_async_prev_val, g->cur_async_prev_val_field_ptr);
- if (!ret_type_has_bits) {
+ if (!operand_has_bits) {
return nullptr;
}
- return get_handle_value(g, g->cur_ret_ptr, ret_type, get_pointer_to_type(g, ret_type, true));
+ return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true));
}
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
@@ -5113,18 +5113,6 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildICmp(g->builder, LLVMIntNE, err_val, zero, "");
}
-static LLVMValueRef gen_unwrap_err_code(CodeGen *g, LLVMValueRef err_union_ptr, ZigType *ptr_type) {
- ZigType *err_union_type = ptr_type->data.pointer.child_type;
- ZigType *payload_type = err_union_type->data.error_union.payload_type;
- if (!type_has_bits(payload_type)) {
- return err_union_ptr;
- } else {
- // TODO assign undef to the payload
- LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type);
- return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, "");
- }
-}
-
static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable,
IrInstructionUnwrapErrCode *instruction)
{
@@ -5133,8 +5121,16 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab
ZigType *ptr_type = instruction->err_union_ptr->value.type;
assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *err_union_type = ptr_type->data.pointer.child_type;
+ ZigType *payload_type = err_union_type->data.error_union.payload_type;
LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr);
- return gen_unwrap_err_code(g, err_union_ptr, ptr_type);
+ if (!type_has_bits(payload_type)) {
+ return err_union_ptr;
+ } else {
+ // TODO assign undef to the payload
+ LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type);
+ return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, "");
+ }
}
static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *executable,
@@ -5615,21 +5611,36 @@ static LLVMValueRef ir_render_test_cancel_requested(CodeGen *g, IrExecutable *ex
}
}
-static LLVMValueRef ir_render_spill(CodeGen *g, IrExecutable *executable, IrInstructionSpill *instruction) {
+static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSpillBegin *instruction)
+{
if (!fn_is_async(g->cur_fn))
- return ir_llvm_value(g, instruction->operand);
+ return nullptr;
switch (instruction->spill_id) {
case SpillIdInvalid:
zig_unreachable();
case SpillIdRetErrCode: {
- LLVMValueRef ret_ptr = LLVMBuildLoad(g->builder, g->cur_ret_ptr, "");
- ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
- if (ret_type->id == ZigTypeIdErrorUnion) {
- return gen_unwrap_err_code(g, ret_ptr, get_pointer_to_type(g, ret_type, true));
- } else {
- zig_unreachable();
- }
+ LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ LLVMBuildStore(g->builder, operand, ptr);
+ return nullptr;
+ }
+
+ }
+ zig_unreachable();
+}
+
+static LLVMValueRef ir_render_spill_end(CodeGen *g, IrExecutable *executable, IrInstructionSpillEnd *instruction) {
+ if (!fn_is_async(g->cur_fn))
+ return ir_llvm_value(g, instruction->begin->operand);
+
+ switch (instruction->begin->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ return LLVMBuildLoad(g->builder, ptr, "");
}
}
@@ -5891,8 +5902,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_render_test_cancel_requested(g, executable, (IrInstructionTestCancelRequested *)instruction);
- case IrInstructionIdSpill:
- return ir_render_spill(g, executable, (IrInstructionSpill *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_render_spill_end(g, executable, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 845ee03757..97971efd50 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1066,8 +1066,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestCancelReques
return IrInstructionIdTestCancelRequested;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionSpill *) {
- return IrInstructionIdSpill;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) {
+ return IrInstructionIdSpillBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillEnd *) {
+ return IrInstructionIdSpillEnd;
}
template
@@ -3336,15 +3340,28 @@ static IrInstruction *ir_build_test_cancel_requested(IrBuilder *irb, Scope *scop
return &instruction->base;
}
-static IrInstruction *ir_build_spill(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand, SpillId spill_id)
{
- IrInstructionSpill *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionSpillBegin *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->base.value.special = ConstValSpecialStatic;
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
instruction->operand = operand;
instruction->spill_id = spill_id;
ir_ref_instruction(operand, irb->current_basic_block);
+ return instruction;
+}
+
+static IrInstruction *ir_build_spill_end(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSpillBegin *begin)
+{
+ IrInstructionSpillEnd *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->begin = begin;
+
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
+
return &instruction->base;
}
@@ -3602,14 +3619,15 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
- err_val = ir_build_return_begin(irb, scope, node, err_val);
+ IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val,
+ SpillIdRetErrCode);
+ ir_build_return_begin(irb, scope, node, err_val);
+ err_val = ir_build_spill_end(irb, scope, node, spill_begin);
+ ResultLocReturn *result_loc_ret = allocate(1);
+ result_loc_ret->base.id = ResultLocIdReturn;
+ ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
+ ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) {
- ResultLocReturn *result_loc_ret = allocate(1);
- result_loc_ret->base.id = ResultLocIdReturn;
- ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
- err_val = ir_build_spill(irb, scope, node, err_val, SpillIdRetErrCode);
- ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
-
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
@@ -12778,8 +12796,21 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_finish_anal(ira, result);
}
+ // This cast might have been already done from IrInstructionReturnBegin but it also
+ // might not have, in the case of `try`.
+ IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
+ if (type_is_invalid(casted_operand->value.type)) {
+ AstNode *source_node = ira->explicit_return_type_source_node;
+ if (source_node != nullptr) {
+ ErrorMsg *msg = ira->codegen->errors.last();
+ add_error_note(ira->codegen, msg, source_node,
+ buf_sprintf("return type declared here"));
+ }
+ return ir_unreach_error(ira);
+ }
+
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, operand);
+ instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
@@ -24742,15 +24773,38 @@ static IrInstruction *ir_analyze_instruction_test_cancel_requested(IrAnalyze *ir
return ir_build_test_cancel_requested(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
}
-static IrInstruction *ir_analyze_instruction_spill(IrAnalyze *ira, IrInstructionSpill *instruction) {
+static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) {
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope))
+ return ir_const_void(ira, &instruction->base);
+
IrInstruction *operand = instruction->operand->child;
if (type_is_invalid(operand->value.type))
return ira->codegen->invalid_instruction;
- if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
+
+ if (!type_has_bits(operand->value.type))
+ return ir_const_void(ira, &instruction->base);
+
+ ir_assert(instruction->spill_id == SpillIdRetErrCode, &instruction->base);
+ ira->new_irb.exec->need_err_code_spill = true;
+
+ IrInstructionSpillBegin *result = ir_build_spill_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, operand, instruction->spill_id);
+ return &result->base;
+}
+
+static IrInstruction *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstructionSpillEnd *instruction) {
+ IrInstruction *operand = instruction->begin->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope) || !type_has_bits(operand->value.type))
return operand;
- }
- IrInstruction *result = ir_build_spill(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- operand, instruction->spill_id);
+
+ ir_assert(instruction->begin->base.child->id == IrInstructionIdSpillBegin, &instruction->base);
+ IrInstructionSpillBegin *begin = reinterpret_cast(instruction->begin->base.child);
+
+ IrInstruction *result = ir_build_spill_end(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, begin);
result->value.type = operand->value.type;
return result;
}
@@ -25054,8 +25108,10 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
case IrInstructionIdTestCancelRequested:
return ir_analyze_instruction_test_cancel_requested(ira, (IrInstructionTestCancelRequested *)instruction);
- case IrInstructionIdSpill:
- return ir_analyze_instruction_spill(ira, (IrInstructionSpill *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_analyze_instruction_spill_end(ira, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
@@ -25193,6 +25249,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCoroResume:
case IrInstructionIdAwaitSrc:
case IrInstructionIdAwaitGen:
+ case IrInstructionIdSpillBegin:
return true;
case IrInstructionIdPhi:
@@ -25291,7 +25348,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
case IrInstructionIdTestCancelRequested:
- case IrInstructionIdSpill:
+ case IrInstructionIdSpillEnd:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 39e781e4f0..9d4570d79a 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -1554,12 +1554,18 @@ static void ir_print_test_cancel_requested(IrPrint *irp, IrInstructionTestCancel
fprintf(irp->f, "@testCancelRequested()");
}
-static void ir_print_spill(IrPrint *irp, IrInstructionSpill *instruction) {
- fprintf(irp->f, "@spill(");
+static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) {
+ fprintf(irp->f, "@spillBegin(");
ir_print_other_instruction(irp, instruction->operand);
fprintf(irp->f, ")");
}
+static void ir_print_spill_end(IrPrint *irp, IrInstructionSpillEnd *instruction) {
+ fprintf(irp->f, "@spillEnd(");
+ ir_print_other_instruction(irp, &instruction->begin->base);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -2045,8 +2051,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTestCancelRequested:
ir_print_test_cancel_requested(irp, (IrInstructionTestCancelRequested *)instruction);
break;
- case IrInstructionIdSpill:
- ir_print_spill(irp, (IrInstructionSpill *)instruction);
+ case IrInstructionIdSpillBegin:
+ ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction);
+ break;
+ case IrInstructionIdSpillEnd:
+ ir_print_spill_end(irp, (IrInstructionSpillEnd *)instruction);
break;
}
fprintf(irp->f, "\n");
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index c92cca9573..a1828a662c 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -642,3 +642,33 @@ test "combining try with errdefer cancel" {
};
S.doTheTest();
}
+
+test "try in an async function with error union and non-zero-bit payload" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok = false;
+
+ fn doTheTest() void {
+ _ = async amain();
+ resume frame;
+ expect(ok);
+ }
+
+ fn amain() void {
+ std.testing.expectError(error.Bad, theProblem());
+ ok = true;
+ }
+
+ fn theProblem() ![]u8 {
+ frame = @frame();
+ suspend;
+ const result = try other();
+ return result;
+ }
+
+ fn other() ![]u8 {
+ return error.Bad;
+ }
+ };
+ S.doTheTest();
+}
--
cgit v1.2.3
From 1b83ee78a48a64bef28f12b7b2e263074f88b6b6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 11 Aug 2019 12:00:32 -0400
Subject: allow comptime_int to implicit cast to comptime_float
---
src/ir.cpp | 3 +++
std/math.zig | 7 +++++++
test/compile_errors.zig | 8 --------
test/stage1/behavior/cast.zig | 7 ++++++-
4 files changed, 16 insertions(+), 9 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 2b096a3383..13348d28c4 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -9713,6 +9713,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
bool const_val_is_float = (const_val->type->id == ZigTypeIdFloat || const_val->type->id == ZigTypeIdComptimeFloat);
assert(const_val_is_int || const_val_is_float);
+ if (const_val_is_int && other_type->id == ZigTypeIdComptimeFloat) {
+ return true;
+ }
if (other_type->id == ZigTypeIdFloat) {
if (const_val->type->id == ZigTypeIdComptimeInt || const_val->type->id == ZigTypeIdComptimeFloat) {
return true;
diff --git a/std/math.zig b/std/math.zig
index e10c9329d9..e47021512e 100644
--- a/std/math.zig
+++ b/std/math.zig
@@ -305,6 +305,13 @@ test "math.min" {
testing.expect(@typeOf(result) == i16);
testing.expect(result == -200);
}
+ {
+ const a = 10.34;
+ var b: f32 = 999.12;
+ var result = min(a, b);
+ testing.expect(@typeOf(result) == f32);
+ testing.expect(result == 10.34);
+ }
}
pub fn max(x: var, y: var) @typeOf(x + y) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index a4bc2a66f0..437e40900d 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -3225,14 +3225,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:3:17: note: value 8 cannot fit into type u3",
);
- cases.add(
- "incompatible number literals",
- \\const x = 2 == 2.0;
- \\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- ,
- "tmp.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'",
- );
-
cases.add(
"missing function call param",
\\const Foo = struct {
diff --git a/test/stage1/behavior/cast.zig b/test/stage1/behavior/cast.zig
index c243f18088..04c7fa606f 100644
--- a/test/stage1/behavior/cast.zig
+++ b/test/stage1/behavior/cast.zig
@@ -508,7 +508,7 @@ test "peer type resolution: unreachable, null, slice" {
}
test "peer type resolution: unreachable, error set, unreachable" {
- const Error = error {
+ const Error = error{
FileDescriptorAlreadyPresentInSet,
OperationCausesCircularLoop,
FileDescriptorNotRegistered,
@@ -529,3 +529,8 @@ test "peer type resolution: unreachable, error set, unreachable" {
};
expect(transformed_err == error.SystemResources);
}
+
+test "implicit cast comptime_int to comptime_float" {
+ comptime expect(comptime_float(10) == f32(10));
+ expect(2 == 2.0);
+}
--
cgit v1.2.3
From b87686dfa094770e96da33fb23a7d011a168157c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 11 Aug 2019 13:43:44 -0400
Subject: fix enum with one member and custom tag type
---
src/ir.cpp | 1 -
test/stage1/behavior/enum.zig | 11 +++++++++++
2 files changed, 11 insertions(+), 1 deletion(-)
(limited to 'src/ir.cpp')
diff --git a/src/ir.cpp b/src/ir.cpp
index 13348d28c4..fde2b972f8 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -11938,7 +11938,6 @@ static IrInstruction *ir_analyze_enum_to_int(IrAnalyze *ira, IrInstruction *sour
if (enum_type->data.enumeration.layout == ContainerLayoutAuto &&
enum_type->data.enumeration.src_field_count == 1)
{
- assert(tag_type == ira->codegen->builtin_types.entry_num_lit_int);
IrInstruction *result = ir_const(ira, source_instr, tag_type);
init_const_bigint(&result->value, tag_type,
&enum_type->data.enumeration.fields[0].value);
diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig
index 51f4f0e196..d7d34aec88 100644
--- a/test/stage1/behavior/enum.zig
+++ b/test/stage1/behavior/enum.zig
@@ -982,3 +982,14 @@ test "enum literal casting to tagged union" {
else => @panic("fail"),
}
}
+
+test "enum with one member and custom tag type" {
+ const E = enum(u2) {
+ One,
+ };
+ expect(@enumToInt(E.One) == 0);
+ const E2 = enum(u2) {
+ One = 2,
+ };
+ expect(@enumToInt(E2.One) == 2);
+}
--
cgit v1.2.3
From af8c6ccb4bcae7baf30f3b1032a98b82f39d9c26 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 11 Aug 2019 14:26:34 -0400
Subject: fix canceling async functions which have error return tracing
---
src/codegen.cpp | 17 +++++++++++++++
src/ir.cpp | 64 +++++++++++++++++++++++++++++----------------------------
2 files changed, 50 insertions(+), 31 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 0db9b37c52..f1a42e321d 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -2071,6 +2071,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMPositionBuilderAtEnd(g->builder, entry_block);
ZigLLVMClearCurrentDebugLocation(g->builder);
+ // if (dest_stack_trace == null) return;
// var frame_index: usize = undefined;
// var frames_left: usize = undefined;
// if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
@@ -2088,6 +2089,7 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
// frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
// }
LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
+ LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull");
LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
@@ -2095,6 +2097,11 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
+ LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), "");
+ LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
@@ -5480,10 +5487,20 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
LLVMValueRef one = LLVMConstInt(usize_type_ref, 1, false);
+ src_assert(instruction->frame->value.type->id == ZigTypeIdAnyFrame, instruction->base.source_node);
+ ZigType *result_type = instruction->frame->value.type->data.any_frame.result_type;
LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "CancelResume");
+ // supply null for the error return trace pointer
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(err_ret_trace_ptr_ptr))),
+ err_ret_trace_ptr_ptr);
+ }
+
LLVMValueRef awaiter_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
LLVMValueRef awaiter_ored_val = LLVMBuildOr(g->builder, awaiter_val, one, "");
LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, coro_awaiter_index, "");
diff --git a/src/ir.cpp b/src/ir.cpp
index 97971efd50..f1d4b80a2c 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -24656,26 +24656,51 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
}
-static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *frame_ptr = instruction->frame->child;
+static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *frame_ptr)
+{
if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
+ ZigType *result_type;
IrInstruction *frame;
if (frame_ptr->value.type->id == ZigTypeIdPointer &&
frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
{
+ result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
frame = frame_ptr;
} else {
- frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
+ if (frame->value.type->id == ZigTypeIdPointer &&
+ frame->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
+ {
+ result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ } else if (frame->value.type->id != ZigTypeIdAnyFrame ||
+ frame->value.type->data.any_frame.result_type == nullptr)
+ {
+ ir_add_error(ira, source_instr,
+ buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ } else {
+ result_type = frame->value.type->data.any_frame.result_type;
+ }
}
- ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
if (type_is_invalid(casted_frame->value.type))
return ira->codegen->invalid_instruction;
+ return casted_frame;
+}
+
+static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
+ IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
+ if (type_is_invalid(frame->value.type))
+ return ira->codegen->invalid_instruction;
+
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
@@ -24683,38 +24708,15 @@ static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructio
fn_entry->inferred_async_node = instruction->base.source_node;
}
- return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
+ return ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, frame);
}
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
- IrInstruction *frame_ptr = instruction->frame->child;
- if (type_is_invalid(frame_ptr->value.type))
+ IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
+ if (type_is_invalid(frame->value.type))
return ira->codegen->invalid_instruction;
- ZigType *result_type;
- IrInstruction *frame;
- if (frame_ptr->value.type->id == ZigTypeIdPointer &&
- frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
- frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdCoroFrame)
- {
- result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
- frame = frame_ptr;
- } else {
- frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
- if (frame->value.type->id != ZigTypeIdAnyFrame ||
- frame->value.type->data.any_frame.result_type == nullptr)
- {
- ir_add_error(ira, &instruction->base,
- buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name)));
- return ira->codegen->invalid_instruction;
- }
- result_type = frame->value.type->data.any_frame.result_type;
- }
-
- ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
- IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
- if (type_is_invalid(casted_frame->value.type))
- return ira->codegen->invalid_instruction;
+ ZigType *result_type = frame->value.type->data.any_frame.result_type;
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
--
cgit v1.2.3
From 4d8d513e16d308131846d98267bc844bf702e9ce Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 11 Aug 2019 19:53:10 -0400
Subject: all tests passing
---
BRANCH_TODO | 6 ++-
doc/docgen.zig | 2 +-
doc/langref.html.in | 81 ++++++++++++--------------------------
src/analyze.cpp | 2 +-
src/codegen.cpp | 49 ++++++++++++-----------
src/ir.cpp | 3 ++
std/event/channel.zig | 11 +++---
std/event/fs.zig | 102 ++++++++++++------------------------------------
std/event/future.zig | 45 +++++++++------------
std/event/group.zig | 68 ++++++++++----------------------
std/event/io.zig | 19 +++++----
std/event/lock.zig | 54 ++++++++++---------------
std/event/loop.zig | 4 +-
std/event/net.zig | 53 +++++++++++--------------
std/event/rwlock.zig | 85 ++++++++++++++++++++--------------------
std/zig/parser_test.zig | 2 +-
test/compile_errors.zig | 35 +++++------------
17 files changed, 240 insertions(+), 381 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index bd797a75a8..b2b293aec1 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,10 +1,13 @@
+ * for loops need to spill the index. other payload captures probably also need to spill
+ * compile error (instead of crashing) for trying to get @Frame of generic function
+ * compile error (instead of crashing) for trying to async call and passing @Frame of wrong function
+ * `const result = (await a) + (await b);` this causes "Instruction does not dominate all uses" - need spill
* compile error for error: expected anyframe->T, found 'anyframe'
* compile error for error: expected anyframe->T, found 'i32'
* await of a non async function
* async call on a non async function
* a test where an async function destroys its own frame in a defer
* implicit cast of normal function to async function should be allowed when it is inferred to be async
- * revive std.event.Loop
* @typeInfo for @Frame(func)
* peer type resolution of *@Frame(func) and anyframe
* peer type resolution of *@Frame(func) and anyframe->T when the return type matches
@@ -36,3 +39,4 @@
- it can be assumed that these are always available: the awaiter ptr, return ptr if applicable,
error return trace ptr if applicable.
- it can be assumed that it is never cancelled
+ * fix the debug info for variables of async functions
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 3d3dcba76d..92764d7642 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -770,7 +770,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
.Keyword_or,
.Keyword_orelse,
.Keyword_packed,
- .Keyword_promise,
+ .Keyword_anyframe,
.Keyword_pub,
.Keyword_resume,
.Keyword_return,
diff --git a/doc/langref.html.in b/doc/langref.html.in
index ac381e00b2..0cb76a4bdf 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6024,13 +6024,14 @@ const assert = std.debug.assert;
var x: i32 = 1;
-test "create a coroutine and cancel it" {
- const p = try async simpleAsyncFn();
- comptime assert(@typeOf(p) == promise->void);
- cancel p;
+test "call an async function" {
+ var frame = async simpleAsyncFn();
+ comptime assert(@typeOf(frame) == @Frame(simpleAsyncFn));
assert(x == 2);
}
-async<*std.mem.Allocator> fn simpleAsyncFn() void {
+fn simpleAsyncFn() void {
+ x += 1;
+ suspend;
x += 1;
}
{#code_end#}
@@ -6041,60 +6042,33 @@ async<*std.mem.Allocator> fn simpleAsyncFn() void {
return to the caller or resumer. The following code demonstrates where control flow
goes:
- {#code_begin|test#}
-const std = @import("std");
-const assert = std.debug.assert;
-
-test "coroutine suspend, resume, cancel" {
- seq('a');
- const p = try async testAsyncSeq();
- seq('c');
- resume p;
- seq('f');
- cancel p;
- seq('g');
-
- assert(std.mem.eql(u8, points, "abcdefg"));
-}
-async fn testAsyncSeq() void {
- defer seq('e');
-
- seq('b');
- suspend;
- seq('d');
-}
-var points = [_]u8{0} ** "abcdefg".len;
-var index: usize = 0;
-
-fn seq(c: u8) void {
- points[index] = c;
- index += 1;
-}
- {#code_end#}
+
+ TODO another test example here
+
When an async function suspends itself, it must be sure that it will be
resumed or canceled somehow, for example by registering its promise handle
in an event loop. Use a suspend capture block to gain access to the
- promise:
+ promise (TODO this is outdated):
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
+var the_frame: anyframe = undefined;
+var result = false;
+
test "coroutine suspend with block" {
- const p = try async testSuspendBlock();
+ _ = async testSuspendBlock();
std.debug.assert(!result);
- resume a_promise;
+ resume the_frame;
std.debug.assert(result);
- cancel p;
}
-var a_promise: promise = undefined;
-var result = false;
-async fn testSuspendBlock() void {
+fn testSuspendBlock() void {
suspend {
- comptime assert(@typeOf(@handle()) == promise->void);
- a_promise = @handle();
+ comptime assert(@typeOf(@frame()) == *@Frame(testSuspendBlock));
+ the_frame = @frame();
}
result = true;
}
@@ -6124,16 +6098,13 @@ const std = @import("std");
const assert = std.debug.assert;
test "resume from suspend" {
- var buf: [500]u8 = undefined;
- var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
var my_result: i32 = 1;
- const p = try async testResumeFromSuspend(&my_result);
- cancel p;
+ _ = async testResumeFromSuspend(&my_result);
std.debug.assert(my_result == 2);
}
async fn testResumeFromSuspend(my_result: *i32) void {
suspend {
- resume @handle();
+ resume @frame();
}
my_result.* += 1;
suspend;
@@ -6172,30 +6143,30 @@ async fn testResumeFromSuspend(my_result: *i32) void {
const std = @import("std");
const assert = std.debug.assert;
-var a_promise: promise = undefined;
+var the_frame: anyframe = undefined;
var final_result: i32 = 0;
test "coroutine await" {
seq('a');
- const p = async amain() catch unreachable;
+ _ = async amain();
seq('f');
- resume a_promise;
+ resume the_frame;
seq('i');
assert(final_result == 1234);
assert(std.mem.eql(u8, seq_points, "abcdefghi"));
}
async fn amain() void {
seq('b');
- const p = async another() catch unreachable;
+ var f = async another();
seq('e');
- final_result = await p;
+ final_result = await f;
seq('h');
}
async fn another() i32 {
seq('c');
suspend {
seq('d');
- a_promise = @handle();
+ the_frame = @frame();
}
seq('g');
return 1234;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 7482ba92ba..30aa82a216 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5325,7 +5325,7 @@ static Error resolve_coro_frame(CodeGen *g, ZigType *frame_type) {
if (*instruction->name_hint == 0) {
name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
} else {
- name = instruction->name_hint;
+ name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i));
}
field_names.append(name);
field_types.append(child_type);
diff --git a/src/codegen.cpp b/src/codegen.cpp
index f1a42e321d..4510e7156c 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -535,24 +535,24 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
// use the ABI alignment, which is fine.
}
- unsigned init_gen_i = 0;
- if (!type_has_bits(return_type)) {
- // nothing to do
- } else if (type_is_nonnull_ptr(return_type)) {
- addLLVMAttr(llvm_fn, 0, "nonnull");
- } else if (!is_async && want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
- // Sret pointers must not be address 0
- addLLVMArgAttr(llvm_fn, 0, "nonnull");
- addLLVMArgAttr(llvm_fn, 0, "sret");
- if (cc_want_sret_attr(cc)) {
- addLLVMArgAttr(llvm_fn, 0, "noalias");
- }
- init_gen_i = 1;
- }
-
if (is_async) {
addLLVMArgAttr(llvm_fn, 0, "nonnull");
} else {
+ unsigned init_gen_i = 0;
+ if (!type_has_bits(return_type)) {
+ // nothing to do
+ } else if (type_is_nonnull_ptr(return_type)) {
+ addLLVMAttr(llvm_fn, 0, "nonnull");
+ } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
+ // Sret pointers must not be address 0
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ addLLVMArgAttr(llvm_fn, 0, "sret");
+ if (cc_want_sret_attr(cc)) {
+ addLLVMArgAttr(llvm_fn, 0, "noalias");
+ }
+ init_gen_i = 1;
+ }
+
// set parameter attributes
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdAttrs;
@@ -911,7 +911,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
case PanicMsgIdBadResume:
return buf_create_from_str("resumed an async function which already returned");
case PanicMsgIdBadAwait:
- return buf_create_from_str("async function awaited/canceled twice");
+ return buf_create_from_str("async function awaited twice");
case PanicMsgIdBadReturn:
return buf_create_from_str("async function returned twice");
case PanicMsgIdResumedAnAwaitingFn:
@@ -2350,6 +2350,10 @@ static LLVMValueRef ir_render_return_begin(CodeGen *g, IrExecutable *executable,
return get_handle_value(g, g->cur_ret_ptr, operand_type, get_pointer_to_type(g, operand_type, true));
}
+static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
+ LLVMSetTailCall(call_inst, true);
+}
+
static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
if (fn_is_async(g->cur_fn)) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
@@ -2394,7 +2398,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, masked_prev_val,
get_llvm_type(g, any_frame_type), "");
LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn, nullptr);
- LLVMSetTailCall(call_inst, true);
+ set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
g->cur_is_after_return = false;
@@ -4009,7 +4013,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall, nullptr);
- LLVMSetTailCall(call_inst, true);
+ set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, call_bb);
@@ -5520,7 +5524,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_ored_val);
- LLVMSetTailCall(call_inst, true);
+ set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
@@ -5556,8 +5560,9 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
}
// supply the error return trace pointer
- LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
- if (my_err_ret_trace_val != nullptr) {
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ assert(my_err_ret_trace_val != nullptr);
LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
@@ -5588,7 +5593,7 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Tail resume it now, so that it can complete.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
LLVMValueRef call_inst = gen_resume(g, nullptr, target_frame_ptr, ResumeIdAwaitEarlyReturn, awaiter_init_val);
- LLVMSetTailCall(call_inst, true);
+ set_tail_call_if_appropriate(g, call_inst);
LLVMBuildRetVoid(g->builder);
// Rely on the target to resume us from suspension.
diff --git a/src/ir.cpp b/src/ir.cpp
index f1d4b80a2c..57c50db818 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15064,6 +15064,9 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc
if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
return result_loc;
}
+ result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
+ if (type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base;
}
diff --git a/std/event/channel.zig b/std/event/channel.zig
index c9686e37e9..c4f7dca085 100644
--- a/std/event/channel.zig
+++ b/std/event/channel.zig
@@ -77,18 +77,19 @@ pub fn Channel(comptime T: type) type {
/// must be called when all calls to put and get have suspended and no more calls occur
pub fn destroy(self: *SelfChannel) void {
while (self.getters.get()) |get_node| {
- cancel get_node.data.tick_node.data;
+ resume get_node.data.tick_node.data;
}
while (self.putters.get()) |put_node| {
- cancel put_node.data.tick_node.data;
+ resume put_node.data.tick_node.data;
}
self.loop.allocator.free(self.buffer_nodes);
self.loop.allocator.destroy(self);
}
- /// puts a data item in the channel. The promise completes when the value has been added to the
+ /// puts a data item in the channel. The function returns when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
- pub async fn put(self: *SelfChannel, data: T) void {
+ /// Or when the channel is destroyed.
+ pub fn put(self: *SelfChannel, data: T) void {
var my_tick_node = Loop.NextTickNode.init(@frame());
var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{
.tick_node = &my_tick_node,
@@ -114,7 +115,7 @@ pub fn Channel(comptime T: type) type {
}
}
- /// await this function to get an item from the channel. If the buffer is empty, the promise will
+ /// await this function to get an item from the channel. If the buffer is empty, the frame will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
// TODO integrate this function with named return values
diff --git a/std/event/fs.zig b/std/event/fs.zig
index 22e9fc38c9..fe2f604ac3 100644
--- a/std/event/fs.zig
+++ b/std/event/fs.zig
@@ -76,12 +76,8 @@ pub const Request = struct {
pub const PWriteVError = error{OutOfMemory} || File.WriteError;
-/// data - just the inner references - must live until pwritev promise completes.
+/// data - just the inner references - must live until pwritev frame completes.
pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) PWriteVError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
switch (builtin.os) {
.macosx,
.linux,
@@ -109,7 +105,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
}
}
-/// data must outlive the returned promise
+/// data must outlive the returned frame
pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, offset: usize) os.WindowsWriteError!void {
if (data.len == 0) return;
if (data.len == 1) return await (async pwriteWindows(loop, fd, data[0], offset) catch unreachable);
@@ -123,15 +119,10 @@ pub async fn pwritevWindows(loop: *Loop, fd: fd_t, data: []const []const u8, off
}
pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
@@ -166,18 +157,13 @@ pub async fn pwriteWindows(loop: *Loop, fd: fd_t, data: []const u8, offset: u64)
}
}
-/// iovecs must live until pwritev promise completes.
+/// iovecs must live until pwritev frame completes.
pub async fn pwritevPosix(
loop: *Loop,
fd: fd_t,
iovecs: []const os.iovec_const,
offset: usize,
) os.WriteError!void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var req_node = RequestNode{
.prev = null,
.next = null,
@@ -194,7 +180,7 @@ pub async fn pwritevPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -211,13 +197,8 @@ pub async fn pwritevPosix(
pub const PReadVError = error{OutOfMemory} || File.ReadError;
-/// data - just the inner references - must live until preadv promise completes.
+/// data - just the inner references - must live until preadv frame completes.
pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PReadVError!usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
assert(data.len != 0);
switch (builtin.os) {
.macosx,
@@ -246,7 +227,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
}
}
-/// data must outlive the returned promise
+/// data must outlive the returned frame
pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u64) !usize {
assert(data.len != 0);
if (data.len == 1) return await (async preadWindows(loop, fd, data[0], offset) catch unreachable);
@@ -272,15 +253,10 @@ pub async fn preadvWindows(loop: *Loop, fd: fd_t, data: []const []u8, offset: u6
}
pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
@@ -314,18 +290,13 @@ pub async fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize
return usize(bytes_transferred);
}
-/// iovecs must live until preadv promise completes
+/// iovecs must live until preadv frame completes
pub async fn preadvPosix(
loop: *Loop,
fd: fd_t,
iovecs: []const os.iovec,
offset: usize,
) os.ReadError!usize {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
var req_node = RequestNode{
.prev = null,
.next = null,
@@ -342,7 +313,7 @@ pub async fn preadvPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -363,11 +334,6 @@ pub async fn openPosix(
flags: u32,
mode: File.Mode,
) File.OpenError!fd_t {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
const path_c = try std.os.toPosixPath(path);
var req_node = RequestNode{
@@ -386,7 +352,7 @@ pub async fn openPosix(
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -643,11 +609,6 @@ async fn writeFileWindows(loop: *Loop, path: []const u8, contents: []const u8) !
}
async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void {
- // workaround for https://github.com/ziglang/zig/issues/1194
- suspend {
- resume @handle();
- }
-
const path_with_null = try std.cstr.addNullByte(loop.allocator, path);
defer loop.allocator.free(path_with_null);
@@ -667,7 +628,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
- .data = @handle(),
+ .data = @frame(),
},
},
},
@@ -682,7 +643,7 @@ async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8
return req_node.data.msg.WriteFile.result;
}
-/// The promise resumes when the last data has been confirmed written, but before the file handle
+/// The frame resumes when the last data has been confirmed written, but before the file handle
/// is closed.
/// Caller owns returned memory.
pub async fn readFile(loop: *Loop, file_path: []const u8, max_size: usize) ![]u8 {
@@ -734,7 +695,7 @@ pub const WatchEventId = enum {
//
// const FileTable = std.AutoHashMap([]const u8, *Put);
// const Put = struct {
-// putter: promise,
+// putter: anyframe,
// value_ptr: *V,
// };
// },
@@ -748,21 +709,21 @@ pub const WatchEventId = enum {
// const WindowsOsData = struct {
// table_lock: event.Lock,
// dir_table: DirTable,
-// all_putters: std.atomic.Queue(promise),
+// all_putters: std.atomic.Queue(anyframe),
// ref_count: std.atomic.Int(usize),
//
// const DirTable = std.AutoHashMap([]const u8, *Dir);
// const FileTable = std.AutoHashMap([]const u16, V);
//
// const Dir = struct {
-// putter: promise,
+// putter: anyframe,
// file_table: FileTable,
// table_lock: event.Lock,
// };
// };
//
// const LinuxOsData = struct {
-// putter: promise,
+// putter: anyframe,
// inotify_fd: i32,
// wd_table: WdTable,
// table_lock: event.Lock,
@@ -776,7 +737,7 @@ pub const WatchEventId = enum {
// };
// };
//
-// const FileToHandle = std.AutoHashMap([]const u8, promise);
+// const FileToHandle = std.AutoHashMap([]const u8, anyframe);
//
// const Self = @This();
//
@@ -811,7 +772,7 @@ pub const WatchEventId = enum {
// .table_lock = event.Lock.init(loop),
// .dir_table = OsData.DirTable.init(loop.allocator),
// .ref_count = std.atomic.Int(usize).init(1),
-// .all_putters = std.atomic.Queue(promise).init(),
+// .all_putters = std.atomic.Queue(anyframe).init(),
// },
// };
// return self;
@@ -926,14 +887,9 @@ pub const WatchEventId = enum {
// }
//
// async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void {
-// // TODO https://github.com/ziglang/zig/issues/1194
-// suspend {
-// resume @handle();
-// }
-//
// var value_copy = value;
// var put = OsData.Put{
-// .putter = @handle(),
+// .putter = @frame(),
// .value_ptr = &value_copy,
// };
// out_put.* = &put;
@@ -1091,18 +1047,13 @@ pub const WatchEventId = enum {
// }
//
// async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void {
-// // TODO https://github.com/ziglang/zig/issues/1194
-// suspend {
-// resume @handle();
-// }
-//
// self.ref();
// defer self.deref();
//
// defer os.close(dir_handle);
//
-// var putter_node = std.atomic.Queue(promise).Node{
-// .data = @handle(),
+// var putter_node = std.atomic.Queue(anyframe).Node{
+// .data = @frame(),
// .prev = null,
// .next = null,
// };
@@ -1112,7 +1063,7 @@ pub const WatchEventId = enum {
// var resume_node = Loop.ResumeNode.Basic{
// .base = Loop.ResumeNode{
// .id = Loop.ResumeNode.Id.Basic,
-// .handle = @handle(),
+// .handle = @frame(),
// .overlapped = windows.OVERLAPPED{
// .Internal = 0,
// .InternalHigh = 0,
@@ -1207,17 +1158,12 @@ pub const WatchEventId = enum {
// }
//
// async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void {
-// // TODO https://github.com/ziglang/zig/issues/1194
-// suspend {
-// resume @handle();
-// }
-//
// const loop = channel.loop;
//
// var watch = Self{
// .channel = channel,
// .os_data = OsData{
-// .putter = @handle(),
+// .putter = @frame(),
// .inotify_fd = inotify_fd,
// .wd_table = OsData.WdTable.init(loop.allocator),
// .table_lock = event.Lock.init(loop),
diff --git a/std/event/future.zig b/std/event/future.zig
index 2e62ace978..11a4c82fb0 100644
--- a/std/event/future.zig
+++ b/std/event/future.zig
@@ -2,8 +2,6 @@ const std = @import("../std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const builtin = @import("builtin");
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Lock = std.event.Lock;
const Loop = std.event.Loop;
@@ -23,7 +21,7 @@ pub fn Future(comptime T: type) type {
available: u8,
const Self = @This();
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub fn init(loop: *Loop) Self {
return Self{
@@ -37,10 +35,10 @@ pub fn Future(comptime T: type) type {
/// available.
/// Thread-safe.
pub async fn get(self: *Self) *T {
- if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
return &self.data;
}
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
held.release();
return &self.data;
@@ -49,7 +47,7 @@ pub fn Future(comptime T: type) type {
/// Gets the data without waiting for it. If it's available, a pointer is
/// returned. Otherwise, null is returned.
pub fn getOrNull(self: *Self) ?*T {
- if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
return &self.data;
} else {
return null;
@@ -62,10 +60,10 @@ pub fn Future(comptime T: type) type {
/// It's not required to call start() before resolve() but it can be useful since
/// this method is thread-safe.
pub async fn start(self: *Self) ?*T {
- const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null;
+ const state = @cmpxchgStrong(u8, &self.available, 0, 1, .SeqCst, .SeqCst) orelse return null;
switch (state) {
1 => {
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
held.release();
return &self.data;
},
@@ -77,7 +75,7 @@ pub fn Future(comptime T: type) type {
/// Make the data become available. May be called only once.
/// Before calling this, modify the `data` property.
pub fn resolve(self: *Self) void {
- const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst);
+ const prev = @atomicRmw(u8, &self.available, .Xchg, 2, .SeqCst);
assert(prev == 0 or prev == 1); // resolve() called twice
Lock.Held.release(Lock.Held{ .lock = &self.lock });
}
@@ -86,7 +84,7 @@ pub fn Future(comptime T: type) type {
test "std.event.Future" {
// https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
+ if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -94,38 +92,33 @@ test "std.event.Future" {
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testFuture(&loop);
- defer cancel handle;
+ const handle = async testFuture(&loop);
loop.run();
}
async fn testFuture(loop: *Loop) void {
- suspend {
- resume @handle();
- }
var future = Future(i32).init(loop);
- const a = async waitOnFuture(&future) catch @panic("memory");
- const b = async waitOnFuture(&future) catch @panic("memory");
- const c = async resolveFuture(&future) catch @panic("memory");
+ const a = async waitOnFuture(&future);
+ const b = async waitOnFuture(&future);
+ const c = async resolveFuture(&future);
+
+ // TODO make this work:
+ //const result = (await a) + (await b);
+ const a_result = await a;
+ const b_result = await b;
+ const result = a_result + b_result;
- const result = (await a) + (await b);
cancel c;
testing.expect(result == 12);
}
async fn waitOnFuture(future: *Future(i32)) i32 {
- suspend {
- resume @handle();
- }
- return (await (async future.get() catch @panic("memory"))).*;
+ return future.get().*;
}
async fn resolveFuture(future: *Future(i32)) void {
- suspend {
- resume @handle();
- }
future.data = 6;
future.resolve();
}
diff --git a/std/event/group.zig b/std/event/group.zig
index 36235eed74..ab6d592278 100644
--- a/std/event/group.zig
+++ b/std/event/group.zig
@@ -2,8 +2,6 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
const Lock = std.event.Lock;
const Loop = std.event.Loop;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const testing = std.testing;
/// ReturnType must be `void` or `E!void`
@@ -16,10 +14,10 @@ pub fn Group(comptime ReturnType: type) type {
const Self = @This();
const Error = switch (@typeInfo(ReturnType)) {
- builtin.TypeId.ErrorUnion => |payload| payload.error_set,
+ .ErrorUnion => |payload| payload.error_set,
else => void,
};
- const Stack = std.atomic.Stack(promise->ReturnType);
+ const Stack = std.atomic.Stack(anyframe->ReturnType);
pub fn init(loop: *Loop) Self {
return Self{
@@ -29,7 +27,7 @@ pub fn Group(comptime ReturnType: type) type {
};
}
- /// Cancel all the outstanding promises. Can be called even if wait was already called.
+ /// Cancel all the outstanding frames. Can be called even if wait was already called.
pub fn deinit(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
@@ -40,8 +38,8 @@ pub fn Group(comptime ReturnType: type) type {
}
}
- /// Add a promise to the group. Thread-safe.
- pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
+ /// Add a frame to the group. Thread-safe.
+ pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node);
node.* = Stack.Node{
.next = undefined,
@@ -51,7 +49,7 @@ pub fn Group(comptime ReturnType: type) type {
}
/// Add a node to the group. Thread-safe. Cannot fail.
- /// `node.data` should be the promise handle to add to the group.
+ /// `node.data` should be the frame handle to add to the group.
/// The node's memory should be in the coroutine frame of
/// the handle that is in the node, or somewhere guaranteed to live
/// at least as long.
@@ -59,40 +57,11 @@ pub fn Group(comptime ReturnType: type) type {
self.coro_stack.push(node);
}
- /// This is equivalent to an async call, but the async function is added to the group, instead
- /// of returning a promise. func must be async and have return type ReturnType.
- /// Thread-safe.
- pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) {
- const S = struct {
- async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType {
- // TODO this is a hack to make the memory following be inside the coro frame
- suspend {
- var my_node: Stack.Node = undefined;
- node.* = &my_node;
- resume @handle();
- }
-
- // TODO this allocation elision should be guaranteed because we await it in
- // this coro frame
- return await (async func(args2) catch unreachable);
- }
- };
- var node: *Stack.Node = undefined;
- const handle = try async S.asyncFunc(&node, args);
- node.* = Stack.Node{
- .next = undefined,
- .data = handle,
- };
- self.coro_stack.push(node);
- }
-
/// Wait for all the calls and promises of the group to complete.
/// Thread-safe.
/// Safe to call any number of times.
pub async fn wait(self: *Self) ReturnType {
- // TODO catch unreachable because the allocation can be grouped with
- // the coro frame allocation
- const held = await (async self.lock.acquire() catch unreachable);
+ const held = self.lock.acquire();
defer held.release();
while (self.coro_stack.pop()) |node| {
@@ -131,8 +100,7 @@ test "std.event.Group" {
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testGroup(&loop);
- defer cancel handle;
+ const handle = async testGroup(&loop);
loop.run();
}
@@ -140,26 +108,30 @@ test "std.event.Group" {
async fn testGroup(loop: *Loop) void {
var count: usize = 0;
var group = Group(void).init(loop);
- group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory");
- group.call(increaseByTen, &count) catch @panic("memory");
- await (async group.wait() catch @panic("memory"));
+ var sleep_a_little_frame = async sleepALittle(&count);
+ group.add(&sleep_a_little_frame) catch @panic("memory");
+ var increase_by_ten_frame = async increaseByTen(&count);
+ group.add(&increase_by_ten_frame) catch @panic("memory");
+ group.wait();
testing.expect(count == 11);
var another = Group(anyerror!void).init(loop);
- another.add(async somethingElse() catch @panic("memory")) catch @panic("memory");
- another.call(doSomethingThatFails) catch @panic("memory");
- testing.expectError(error.ItBroke, await (async another.wait() catch @panic("memory")));
+ var something_else_frame = async somethingElse();
+ another.add(&something_else_frame) catch @panic("memory");
+ var something_that_fails_frame = async doSomethingThatFails();
+ another.add(&something_that_fails_frame) catch @panic("memory");
+ testing.expectError(error.ItBroke, another.wait());
}
async fn sleepALittle(count: *usize) void {
std.time.sleep(1 * std.time.millisecond);
- _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
async fn increaseByTen(count: *usize) void {
var i: usize = 0;
while (i < 10) : (i += 1) {
- _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
}
}
diff --git a/std/event/io.zig b/std/event/io.zig
index 29419a792e..4b54822e68 100644
--- a/std/event/io.zig
+++ b/std/event/io.zig
@@ -1,6 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const mem = std.mem;
@@ -12,13 +11,13 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
- readFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!usize,
+ readFn: async fn (self: *Self, buffer: []u8) Error!usize,
/// Return the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub async fn read(self: *Self, buffer: []u8) !usize {
- return await (async self.readFn(self, buffer) catch unreachable);
+ return self.readFn(self, buffer);
}
/// Return the number of bytes read. If it is less than buffer.len
@@ -26,7 +25,7 @@ pub fn InStream(comptime ReadError: type) type {
pub async fn readFull(self: *Self, buffer: []u8) !usize {
var index: usize = 0;
while (index != buf.len) {
- const amt_read = try await (async self.read(buf[index..]) catch unreachable);
+ const amt_read = try self.read(buf[index..]);
if (amt_read == 0) return index;
index += amt_read;
}
@@ -35,25 +34,25 @@ pub fn InStream(comptime ReadError: type) type {
/// Same as `readFull` but end of stream returns `error.EndOfStream`.
pub async fn readNoEof(self: *Self, buf: []u8) !void {
- const amt_read = try await (async self.readFull(buf[index..]) catch unreachable);
+ const amt_read = try self.readFull(buf[index..]);
if (amt_read < buf.len) return error.EndOfStream;
}
pub async fn readIntLittle(self: *Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readIntLittle(T, &bytes);
}
pub async fn readIntBe(self: *Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readIntBig(T, &bytes);
}
pub async fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
- try await (async self.readNoEof(bytes[0..]) catch unreachable);
+ try self.readNoEof(bytes[0..]);
return mem.readInt(T, &bytes, endian);
}
@@ -61,7 +60,7 @@ pub fn InStream(comptime ReadError: type) type {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
- try await (async self.readNoEof(@sliceToBytes(res[0..])) catch unreachable);
+ try self.readNoEof(@sliceToBytes(res[0..]));
return res[0];
}
};
@@ -72,6 +71,6 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = @This();
pub const Error = WriteError;
- writeFn: async<*Allocator> fn (self: *Self, buffer: []u8) Error!void,
+ writeFn: async fn (self: *Self, buffer: []u8) Error!void,
};
}
diff --git a/std/event/lock.zig b/std/event/lock.zig
index d86902cc06..8f2dac008c 100644
--- a/std/event/lock.zig
+++ b/std/event/lock.zig
@@ -3,8 +3,6 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
@@ -17,7 +15,7 @@ pub const Lock = struct {
queue: Queue,
queue_empty_bit: u8, // TODO make this a bool
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub const Held = struct {
lock: *Lock,
@@ -30,19 +28,19 @@ pub const Lock = struct {
}
// We need to release the lock.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
// But if there's a queue item, we are the actor which must loop and attempt
// to grab the lock again.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
return;
}
while (true) {
- const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit != 0) {
// We did not obtain the lock. Great, the queue is someone else's problem.
return;
@@ -55,11 +53,11 @@ pub const Lock = struct {
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
// Find out if we can be done.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
return;
}
}
@@ -88,15 +86,11 @@ pub const Lock = struct {
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
assert(self.shared_bit == 0);
- while (self.queue.get()) |node| cancel node.data;
+ while (self.queue.get()) |node| resume node.data;
}
pub async fn acquire(self: *Lock) Held {
- // TODO explicitly put this memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
- var my_tick_node = Loop.NextTickNode.init(@handle());
+ var my_tick_node = Loop.NextTickNode.init(@frame());
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
suspend {
@@ -107,9 +101,9 @@ pub const Lock = struct {
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
// will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
- const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit == 0) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
@@ -123,8 +117,7 @@ pub const Lock = struct {
};
test "std.event.Lock" {
- // TODO https://github.com/ziglang/zig/issues/2377
- if (true) return error.SkipZigTest;
+ // TODO https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@@ -136,39 +129,34 @@ test "std.event.Lock" {
var lock = Lock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
- defer cancel handle;
+ _ = async testLock(&loop, &lock);
loop.run();
testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data);
}
async fn testLock(loop: *Loop, lock: *Lock) void {
- // TODO explicitly put next tick node memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
- const handle1 = async lockRunner(lock) catch @panic("out of memory");
+ const handle1 = async lockRunner(lock);
var tick_node1 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle1,
+ .data = &handle1,
};
loop.onNextTick(&tick_node1);
- const handle2 = async lockRunner(lock) catch @panic("out of memory");
+ const handle2 = async lockRunner(lock);
var tick_node2 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle2,
+ .data = &handle2,
};
loop.onNextTick(&tick_node2);
- const handle3 = async lockRunner(lock) catch @panic("out of memory");
+ const handle3 = async lockRunner(lock);
var tick_node3 = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
- .data = handle3,
+ .data = &handle3,
};
loop.onNextTick(&tick_node3);
@@ -185,7 +173,7 @@ async fn lockRunner(lock: *Lock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
- const lock_promise = async lock.acquire() catch @panic("out of memory");
+ const lock_promise = async lock.acquire();
const handle = await lock_promise;
defer handle.release();
diff --git a/std/event/loop.zig b/std/event/loop.zig
index a4a60b5098..f1febd3fdb 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -457,7 +457,7 @@ pub const Loop = struct {
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = ResumeNode.overlapped_init,
},
};
@@ -469,7 +469,7 @@ pub const Loop = struct {
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
- .handle = @handle(),
+ .handle = @frame(),
.overlapped = ResumeNode.overlapped_init,
},
.kev = undefined,
diff --git a/std/event/net.zig b/std/event/net.zig
index 46b724e32e..3752c88e99 100644
--- a/std/event/net.zig
+++ b/std/event/net.zig
@@ -9,17 +9,17 @@ const File = std.fs.File;
const fd_t = os.fd_t;
pub const Server = struct {
- handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void,
+ handleRequestFn: async fn (*Server, *const std.net.Address, File) void,
loop: *Loop,
sockfd: ?i32,
- accept_coro: ?promise,
+ accept_coro: ?anyframe,
listen_address: std.net.Address,
waiting_for_emfile_node: PromiseNode,
listen_resume_node: event.Loop.ResumeNode,
- const PromiseNode = std.TailQueue(promise).Node;
+ const PromiseNode = std.TailQueue(anyframe).Node;
pub fn init(loop: *Loop) Server {
// TODO can't initialize handler coroutine here because we need well defined copy elision
@@ -41,7 +41,7 @@ pub const Server = struct {
pub fn listen(
self: *Server,
address: *const std.net.Address,
- handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, File) void,
+ handleRequestFn: async fn (*Server, *const std.net.Address, File) void,
) !void {
self.handleRequestFn = handleRequestFn;
@@ -53,7 +53,7 @@ pub const Server = struct {
try os.listen(sockfd, os.SOMAXCONN);
self.listen_address = std.net.Address.initPosix(try os.getsockname(sockfd));
- self.accept_coro = try async Server.handler(self);
+ self.accept_coro = async Server.handler(self);
errdefer cancel self.accept_coro.?;
self.listen_resume_node.handle = self.accept_coro.?;
@@ -86,12 +86,7 @@ pub const Server = struct {
continue;
}
var socket = File.openHandle(accepted_fd);
- _ = async self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) {
- error.OutOfMemory => {
- socket.close();
- continue;
- },
- };
+ self.handleRequestFn(self, &accepted_addr, socket);
} else |err| switch (err) {
error.ProcessFdQuotaExceeded => @panic("TODO handle this error"),
error.ConnectionAborted => continue,
@@ -124,7 +119,7 @@ pub async fn connectUnixSocket(loop: *Loop, path: []const u8) !i32 {
mem.copy(u8, sock_addr.path[0..], path);
const size = @intCast(u32, @sizeOf(os.sa_family_t) + path.len);
try os.connect_async(sockfd, &sock_addr, size);
- try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
+ try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
try os.getsockoptError(sockfd);
return sockfd;
@@ -149,7 +144,7 @@ pub async fn read(loop: *std.event.Loop, fd: fd_t, buffer: []u8) ReadError!usize
.iov_len = buffer.len,
};
const iovs: *const [1]os.iovec = &iov;
- return await (async readvPosix(loop, fd, iovs, 1) catch unreachable);
+ return readvPosix(loop, fd, iovs, 1);
}
pub const WriteError = error{};
@@ -160,7 +155,7 @@ pub async fn write(loop: *std.event.Loop, fd: fd_t, buffer: []const u8) WriteErr
.iov_len = buffer.len,
};
const iovs: *const [1]os.iovec_const = &iov;
- return await (async writevPosix(loop, fd, iovs, 1) catch unreachable);
+ return writevPosix(loop, fd, iovs, 1);
}
pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, count: usize) !void {
@@ -174,7 +169,7 @@ pub async fn writevPosix(loop: *Loop, fd: i32, iov: [*]const os.iovec_const, cou
os.EINVAL => unreachable,
os.EFAULT => unreachable,
os.EAGAIN => {
- try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT) catch unreachable);
+ try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLOUT);
continue;
},
os.EBADF => unreachable, // always a race condition
@@ -205,7 +200,7 @@ pub async fn readvPosix(loop: *std.event.Loop, fd: i32, iov: [*]os.iovec, count:
os.EINVAL => unreachable,
os.EFAULT => unreachable,
os.EAGAIN => {
- try await (async loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN) catch unreachable);
+ try loop.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN);
continue;
},
os.EBADF => unreachable, // always a race condition
@@ -232,7 +227,7 @@ pub async fn writev(loop: *Loop, fd: fd_t, data: []const []const u8) !void {
};
}
- return await (async writevPosix(loop, fd, iovecs.ptr, data.len) catch unreachable);
+ return writevPosix(loop, fd, iovecs.ptr, data.len);
}
pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize {
@@ -246,7 +241,7 @@ pub async fn readv(loop: *Loop, fd: fd_t, data: []const []u8) !usize {
};
}
- return await (async readvPosix(loop, fd, iovecs.ptr, data.len) catch unreachable);
+ return readvPosix(loop, fd, iovecs.ptr, data.len);
}
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File {
@@ -256,7 +251,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File {
errdefer os.close(sockfd);
try os.connect_async(sockfd, &address.os_addr, @sizeOf(os.sockaddr_in));
- try await try async loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
+ try loop.linuxWaitFd(sockfd, os.EPOLLIN | os.EPOLLOUT | os.EPOLLET);
try os.getsockoptError(sockfd);
return File.openHandle(sockfd);
@@ -275,17 +270,16 @@ test "listen on a port, send bytes, receive bytes" {
tcp_server: Server,
const Self = @This();
- async<*mem.Allocator> fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void {
+ async fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: File) void {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592
defer socket.close();
// TODO guarantee elision of this allocation
- const next_handler = async errorableHandler(self, _addr, socket) catch unreachable;
- (await next_handler) catch |err| {
+ const next_handler = errorableHandler(self, _addr, socket) catch |err| {
std.debug.panic("unable to handle connection: {}\n", err);
};
suspend {
- cancel @handle();
+ cancel @frame();
}
}
async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: File) !void {
@@ -306,15 +300,14 @@ test "listen on a port, send bytes, receive bytes" {
defer server.tcp_server.deinit();
try server.tcp_server.listen(&addr, MyServer.handler);
- const p = try async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
- defer cancel p;
+ _ = async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
loop.run();
}
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Server) void {
errdefer @panic("test failure");
- var socket_file = try await try async connect(loop, address);
+ var socket_file = try connect(loop, address);
defer socket_file.close();
var buf: [512]u8 = undefined;
@@ -340,9 +333,9 @@ pub const OutStream = struct {
};
}
- async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
+ async fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
- return await (async write(self.loop, self.fd, bytes) catch unreachable);
+ return write(self.loop, self.fd, bytes);
}
};
@@ -362,8 +355,8 @@ pub const InStream = struct {
};
}
- async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
+ async fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
const self = @fieldParentPtr(InStream, "stream", in_stream);
- return await (async read(self.loop, self.fd, bytes) catch unreachable);
+ return read(self.loop, self.fd, bytes);
}
};
diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig
index 7b97fa24c1..a5768e5b65 100644
--- a/std/event/rwlock.zig
+++ b/std/event/rwlock.zig
@@ -3,8 +3,6 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
@@ -28,19 +26,19 @@ pub const RwLock = struct {
const ReadLock = 2;
};
- const Queue = std.atomic.Queue(promise);
+ const Queue = std.atomic.Queue(anyframe);
pub const HeldRead = struct {
lock: *RwLock,
pub fn release(self: HeldRead) void {
// If other readers still hold the lock, we're done.
- if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) {
+ if (@atomicRmw(usize, &self.lock.reader_lock_count, .Sub, 1, .SeqCst) != 1) {
return;
}
- _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
@@ -61,17 +59,17 @@ pub const RwLock = struct {
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
- if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
+ if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
// Switch to a read lock.
- _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
self.lock.loop.onNextTick(node);
}
return;
}
- _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
self.lock.commonPostUnlock();
}
@@ -93,17 +91,16 @@ pub const RwLock = struct {
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *RwLock) void {
assert(self.shared_state == State.Unlocked);
- while (self.writer_queue.get()) |node| cancel node.data;
- while (self.reader_queue.get()) |node| cancel node.data;
+ while (self.writer_queue.get()) |node| resume node.data;
+ while (self.reader_queue.get()) |node| resume node.data;
}
pub async fn acquireRead(self: *RwLock) HeldRead {
- _ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.reader_lock_count, .Add, 1, .SeqCst);
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
- .data = @handle(),
+ .data = @frame(),
.prev = undefined,
.next = undefined,
};
@@ -115,10 +112,10 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
- const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true;
+ const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == State.ReadLock else true;
if (have_read_lock) {
// Give out all the read locks.
if (self.reader_queue.get()) |first_node| {
@@ -134,9 +131,8 @@ pub const RwLock = struct {
pub async fn acquireWrite(self: *RwLock) HeldWrite {
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
- .data = @handle(),
+ .data = @frame(),
.prev = undefined,
.next = undefined,
};
@@ -148,10 +144,10 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) == null) {
// We now have a write lock.
if (self.writer_queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
@@ -169,8 +165,8 @@ pub const RwLock = struct {
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
- if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@@ -180,13 +176,13 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- _ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
+ _ = @atomicRmw(u8, &self.shared_state, .Xchg, State.Unlocked, .SeqCst);
continue;
}
- if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
- if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@@ -199,8 +195,8 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
@@ -215,6 +211,9 @@ test "std.event.RwLock" {
// https://github.com/ziglang/zig/issues/2377
if (true) return error.SkipZigTest;
+ // https://github.com/ziglang/zig/issues/1908
+ if (builtin.single_threaded) return error.SkipZigTest;
+
const allocator = std.heap.direct_allocator;
var loop: Loop = undefined;
@@ -224,8 +223,7 @@ test "std.event.RwLock" {
var lock = RwLock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
- defer cancel handle;
+ const handle = testLock(&loop, &lock);
loop.run();
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
@@ -233,28 +231,31 @@ test "std.event.RwLock" {
}
async fn testLock(loop: *Loop, lock: *RwLock) void {
- // TODO explicitly put next tick node memory in the coroutine frame #1194
- suspend {
- resume @handle();
- }
-
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
- read_node.data = async readRunner(lock) catch @panic("out of memory");
+ const frame = loop.allocator.create(@Frame(readRunner)) catch @panic("memory");
+ read_node.data = frame;
+ frame.* = async readRunner(lock);
loop.onNextTick(read_node);
}
var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
for (write_nodes) |*write_node| {
- write_node.data = async writeRunner(lock) catch @panic("out of memory");
+ const frame = loop.allocator.create(@Frame(writeRunner)) catch @panic("memory");
+ write_node.data = frame;
+ frame.* = async writeRunner(lock);
loop.onNextTick(write_node);
}
for (write_nodes) |*write_node| {
- await @ptrCast(promise->void, write_node.data);
+ const casted = @ptrCast(*const @Frame(writeRunner), write_node.data);
+ await casted;
+ loop.allocator.destroy(casted);
}
for (read_nodes) |*read_node| {
- await @ptrCast(promise->void, read_node.data);
+ const casted = @ptrCast(*const @Frame(readRunner), read_node.data);
+ await casted;
+ loop.allocator.destroy(casted);
}
}
@@ -269,7 +270,7 @@ async fn writeRunner(lock: *RwLock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
std.time.sleep(100 * std.time.microsecond);
- const lock_promise = async lock.acquireWrite() catch @panic("out of memory");
+ const lock_promise = async lock.acquireWrite();
const handle = await lock_promise;
defer handle.release();
@@ -287,7 +288,7 @@ async fn readRunner(lock: *RwLock) void {
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
- const lock_promise = async lock.acquireRead() catch @panic("out of memory");
+ const lock_promise = async lock.acquireRead();
const handle = await lock_promise;
defer handle.release();
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 7407528bf5..aec1ef96b5 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -1183,7 +1183,7 @@ test "zig fmt: resume from suspend block" {
try testCanonical(
\\fn foo() void {
\\ suspend {
- \\ resume @handle();
+ \\ resume @frame();
\\ }
\\}
\\
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 810e40b18b..835f968e23 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1403,24 +1403,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "@handle() called outside of function definition",
- \\var handle_undef: promise = undefined;
- \\var handle_dummy: promise = @handle();
+ "@frame() called outside of function definition",
+ \\var handle_undef: anyframe = undefined;
+ \\var handle_dummy: anyframe = @frame();
\\export fn entry() bool {
\\ return handle_undef == handle_dummy;
\\}
,
- "tmp.zig:2:29: error: @handle() called outside of function definition",
- );
-
- cases.add(
- "@handle() in non-async function",
- \\export fn entry() bool {
- \\ var handle_undef: promise = undefined;
- \\ return handle_undef == @handle();
- \\}
- ,
- "tmp.zig:3:28: error: @handle() in non-async function",
+ "tmp.zig:2:30: error: @frame() called outside of function definition",
);
cases.add(
@@ -1796,15 +1786,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"suspend inside suspend block",
- \\const std = @import("std",);
- \\
\\export fn entry() void {
- \\ var buf: [500]u8 = undefined;
- \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
- \\ const p = (async foo()) catch unreachable;
- \\ cancel p;
+ \\ _ = async foo();
\\}
- \\
\\async fn foo() void {
\\ suspend {
\\ suspend {
@@ -1812,8 +1796,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ }
\\}
,
- "tmp.zig:12:9: error: cannot suspend inside suspend block",
- "tmp.zig:11:5: note: other suspend block here",
+ "tmp.zig:6:9: error: cannot suspend inside suspend block",
+ "tmp.zig:5:5: note: other suspend block here",
);
cases.add(
@@ -1854,15 +1838,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"returning error from void async function",
- \\const std = @import("std",);
\\export fn entry() void {
- \\ const p = async amain() catch unreachable;
+ \\ _ = async amain();
\\}
\\async fn amain() void {
\\ return error.ShouldBeCompileError;
\\}
,
- "tmp.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'",
+ "tmp.zig:5:17: error: expected type 'void', found 'error{ShouldBeCompileError}'",
);
cases.add(
--
cgit v1.2.3
From 50926341036c3ba377215d1c70c3e97adb07a292 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 13 Aug 2019 14:14:19 -0400
Subject: avoid the word "coroutine", they're "async functions"
---
BRANCH_TODO | 2 +-
doc/langref.html.in | 40 ++++++++++-----------
src-self-hosted/ir.zig | 14 --------
src-self-hosted/link.zig | 2 +-
src-self-hosted/main.zig | 2 +-
src-self-hosted/stage1.zig | 3 +-
src/all_types.hpp | 24 ++++++-------
src/analyze.cpp | 90 +++++++++++++++++++++++-----------------------
src/analyze.hpp | 2 +-
src/codegen.cpp | 62 ++++++++++++++++----------------
src/ir.cpp | 62 ++++++++++++++++----------------
src/ir_print.cpp | 9 +++--
src/zig_llvm.cpp | 3 --
std/event/fs.zig | 6 ++--
std/event/future.zig | 2 +-
std/event/group.zig | 12 +++----
std/event/lock.zig | 5 ++-
std/event/locked.zig | 2 +-
std/event/loop.zig | 2 +-
std/event/net.zig | 14 ++++----
std/event/rwlock.zig | 8 ++---
std/event/rwlocked.zig | 2 +-
std/zig/parser_test.zig | 6 ++--
23 files changed, 175 insertions(+), 199 deletions(-)
(limited to 'src/ir.cpp')
diff --git a/BRANCH_TODO b/BRANCH_TODO
index 77ea14c06f..cac275eb75 100644
--- a/BRANCH_TODO
+++ b/BRANCH_TODO
@@ -1,4 +1,4 @@
- * grep for "coroutine" and "coro" and replace all that nomenclature with "async functions"
+ * zig fmt support for the syntax
* alignment of variables not being respected in async functions
* await of a non async function
* async call on a non async function
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 0cb76a4bdf..23e4dd194e 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -5968,9 +5968,10 @@ test "global assembly" {
TODO: @atomic rmw
TODO: builtin atomic memory ordering enum
{#header_close#}
- {#header_open|Coroutines#}
+ {#header_open|Async Functions#}
- A coroutine is a generalization of a function.
+ An async function is a function whose callsite is split into an {#syntax#}async{#endsyntax#} initiation,
+ followed by an {#syntax#}await{#endsyntax#} completion. They can also be canceled.
When you call a function, it creates a stack frame,
@@ -5980,14 +5981,14 @@ test "global assembly" {
until the function returns.
- A coroutine is like a function, but it can be suspended
+ An async function is like a function, but it can be suspended
and resumed any number of times, and then it must be
- explicitly destroyed. When a coroutine suspends, it
+ explicitly destroyed. When an async function suspends, it
returns to the resumer.
- {#header_open|Minimal Coroutine Example#}
+ {#header_open|Minimal Async Function Example#}
- Declare a coroutine with the {#syntax#}async{#endsyntax#} keyword.
+ Declare an async function with the {#syntax#}async{#endsyntax#} keyword.
The expression in angle brackets must evaluate to a struct
which has these fields:
@@ -6006,8 +6007,8 @@ test "global assembly" {
the function generic. Zig will infer the allocator type when the async function is called.
- Call a coroutine with the {#syntax#}async{#endsyntax#} keyword. Here, the expression in angle brackets
- is a pointer to the allocator struct that the coroutine expects.
+ Call an async function with the {#syntax#}async{#endsyntax#} keyword. Here, the expression in angle brackets
+ is a pointer to the allocator struct that the async function expects.
The result of an async function call is a {#syntax#}promise->T{#endsyntax#} type, where {#syntax#}T{#endsyntax#}
@@ -6058,7 +6059,7 @@ const assert = std.debug.assert;
var the_frame: anyframe = undefined;
var result = false;
-test "coroutine suspend with block" {
+test "async function suspend with block" {
_ = async testSuspendBlock();
std.debug.assert(!result);
resume the_frame;
@@ -6074,7 +6075,7 @@ fn testSuspendBlock() void {
}
{#code_end#}
- Every suspend point in an async function represents a point at which the coroutine
+ Every suspend point in an async function represents a point at which the async function
could be destroyed. If that happens, {#syntax#}defer{#endsyntax#} expressions that are in
scope are run, as well as {#syntax#}errdefer{#endsyntax#} expressions.
@@ -6083,14 +6084,14 @@ fn testSuspendBlock() void {
{#header_open|Resuming from Suspend Blocks#}
- Upon entering a {#syntax#}suspend{#endsyntax#} block, the coroutine is already considered
+ Upon entering a {#syntax#}suspend{#endsyntax#} block, the async function is already considered
suspended, and can be resumed. For example, if you started another kernel thread,
and had that thread call {#syntax#}resume{#endsyntax#} on the promise handle provided by the
{#syntax#}suspend{#endsyntax#} block, the new thread would begin executing after the suspend
block, while the old thread continued executing the suspend block.
- However, the coroutine can be directly resumed from the suspend block, in which case it
+ However, the async function can be directly resumed from the suspend block, in which case it
never returns to its resumer and continues executing.
{#code_begin|test#}
@@ -6127,8 +6128,8 @@ async fn testResumeFromSuspend(my_result: *i32) void {
If the async function associated with the promise handle has already returned,
then {#syntax#}await{#endsyntax#} destroys the target async function, and gives the return value.
Otherwise, {#syntax#}await{#endsyntax#} suspends the current async function, registering its
- promise handle with the target coroutine. It becomes the target coroutine's responsibility
- to have ensured that it will be resumed or destroyed. When the target coroutine reaches
+ promise handle with the target async function. It becomes the target async function's responsibility
+ to have ensured that it will be resumed or destroyed. When the target async function reaches
its return statement, it gives the return value to the awaiter, destroys itself, and then
resumes the awaiter.
@@ -6137,7 +6138,7 @@ async fn testResumeFromSuspend(my_result: *i32) void {
{#syntax#}await{#endsyntax#} counts as a suspend point, and therefore at every {#syntax#}await{#endsyntax#},
- a coroutine can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions.
+ a async function can be potentially destroyed, which would run {#syntax#}defer{#endsyntax#} and {#syntax#}errdefer{#endsyntax#} expressions.
{#code_begin|test#}
const std = @import("std");
@@ -6146,7 +6147,7 @@ const assert = std.debug.assert;
var the_frame: anyframe = undefined;
var final_result: i32 = 0;
-test "coroutine await" {
+test "async function await" {
seq('a');
_ = async amain();
seq('f');
@@ -6188,7 +6189,7 @@ fn seq(c: u8) void {
{#header_close#}
{#header_open|Open Issues#}
- There are a few issues with coroutines that are considered unresolved. Best be aware of them,
+ There are a few issues with async function that are considered unresolved. Best be aware of them,
as the situation is likely to change before 1.0.0: