aboutsummaryrefslogtreecommitdiff
path: root/src/analyze.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/analyze.cpp')
-rw-r--r--src/analyze.cpp1111
1 files changed, 873 insertions, 238 deletions
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 672e75a5ee..21289f24a8 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7,6 +7,7 @@
#include "analyze.hpp"
#include "ast_render.hpp"
+#include "codegen.hpp"
#include "config.h"
#include "error.hpp"
#include "ir.hpp"
@@ -31,6 +32,11 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
+// nullptr means not analyzed yet; this one means currently being analyzed
+static const AstNode *inferred_async_checking = reinterpret_cast<AstNode *>(0x1);
+// this one means analyzed and it's not async
+static const AstNode *inferred_async_none = reinterpret_cast<AstNode *>(0x2);
+
static bool is_top_level_struct(ZigType *import) {
return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr;
}
@@ -56,14 +62,14 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg) {
return err;
}
-ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
+ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
return add_token_error(g, node->owner, &fake_token, msg);
}
-ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg) {
+ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
@@ -188,12 +194,6 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
return &scope->base;
}
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent) {
- ScopeCoroPrelude *scope = allocate<ScopeCoroPrelude>(1);
- init_scope(g, &scope->base, ScopeIdCoroPrelude, node, parent);
- return &scope->base;
-}
-
ZigType *get_scope_import(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDecls) {
@@ -234,6 +234,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
return type_entry->data.enumeration.decl_node;
case ZigTypeIdUnion:
return type_entry->data.unionation.decl_node;
+ case ZigTypeIdFnFrame:
+ return type_entry->data.frame.fn->proto_node;
case ZigTypeIdOpaque:
case ZigTypeIdMetaType:
case ZigTypeIdVoid:
@@ -254,8 +256,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return nullptr;
}
zig_unreachable();
@@ -269,6 +271,20 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
return type_entry->data.structure.resolve_status >= status;
case ZigTypeIdUnion:
return type_entry->data.unionation.resolve_status >= status;
+ case ZigTypeIdFnFrame:
+ switch (status) {
+ case ResolveStatusInvalid:
+ zig_unreachable();
+ case ResolveStatusUnstarted:
+ case ResolveStatusZeroBitsKnown:
+ return true;
+ case ResolveStatusAlignmentKnown:
+ case ResolveStatusSizeKnown:
+ return type_entry->data.frame.locals_struct != nullptr;
+ case ResolveStatusLLVMFwdDecl:
+ case ResolveStatusLLVMFull:
+ return type_entry->llvm_type != nullptr;
+ }
case ZigTypeIdEnum:
switch (status) {
case ResolveStatusUnstarted:
@@ -307,8 +323,8 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return true;
}
zig_unreachable();
@@ -341,27 +357,27 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type) {
- if (result_type != nullptr && result_type->promise_parent != nullptr) {
- return result_type->promise_parent;
- } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) {
- return g->builtin_types.entry_promise;
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) {
+ if (result_type != nullptr && result_type->any_frame_parent != nullptr) {
+ return result_type->any_frame_parent;
+ } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) {
+ return g->builtin_types.entry_any_frame;
}
- ZigType *entry = new_type_table_entry(ZigTypeIdPromise);
+ ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame);
entry->abi_size = g->builtin_types.entry_usize->abi_size;
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->data.promise.result_type = result_type;
- buf_init_from_str(&entry->name, "promise");
+ entry->data.any_frame.result_type = result_type;
+ buf_init_from_str(&entry->name, "anyframe");
if (result_type != nullptr) {
buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
}
if (result_type != nullptr) {
- result_type->promise_parent = entry;
+ result_type->any_frame_parent = entry;
} else if (result_type == nullptr) {
- g->builtin_types.entry_promise = entry;
+ g->builtin_types.entry_any_frame = entry;
}
return entry;
}
@@ -378,6 +394,25 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) {
zig_unreachable();
}
+ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn) {
+ if (fn->frame_type != nullptr) {
+ return fn->frame_type;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdFnFrame);
+ buf_resize(&entry->name, 0);
+ buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name));
+
+ entry->data.frame.fn = fn;
+
+ // Async function frames are always non-zero bits because they always have a resume index.
+ entry->abi_size = SIZE_MAX;
+ entry->size_in_bits = SIZE_MAX;
+
+ fn->frame_type = entry;
+ return entry;
+}
+
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment,
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero)
@@ -490,42 +525,6 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) {
return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false);
}
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type) {
- if (return_type->promise_frame_parent != nullptr) {
- return return_type->promise_frame_parent;
- }
-
- ZigType *atomic_state_type = g->builtin_types.entry_usize;
- ZigType *result_ptr_type = get_pointer_to_type(g, return_type, false);
-
- ZigList<const char *> field_names = {};
- field_names.append(ATOMIC_STATE_FIELD_NAME);
- field_names.append(RESULT_FIELD_NAME);
- field_names.append(RESULT_PTR_FIELD_NAME);
- if (g->have_err_ret_tracing) {
- field_names.append(ERR_RET_TRACE_PTR_FIELD_NAME);
- field_names.append(ERR_RET_TRACE_FIELD_NAME);
- field_names.append(RETURN_ADDRESSES_FIELD_NAME);
- }
-
- ZigList<ZigType *> field_types = {};
- field_types.append(atomic_state_type);
- field_types.append(return_type);
- field_types.append(result_ptr_type);
- if (g->have_err_ret_tracing) {
- field_types.append(get_ptr_to_stack_trace_type(g));
- field_types.append(g->stack_trace_type);
- field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
- }
-
- assert(field_names.length == field_types.length);
- Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name));
- ZigType *entry = get_struct_type(g, buf_ptr(name), field_names.items, field_types.items, field_names.length);
-
- return_type->promise_frame_parent = entry;
- return entry;
-}
-
ZigType *get_optional_type(CodeGen *g, ZigType *child_type) {
if (child_type->optional_parent != nullptr) {
return child_type->optional_parent;
@@ -631,6 +630,7 @@ ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payloa
size_t field2_offset = next_field_offset(0, entry->abi_align, field_sizes[0], field_aligns[1]);
entry->abi_size = next_field_offset(field2_offset, entry->abi_align, field_sizes[1], entry->abi_align);
entry->size_in_bits = entry->abi_size * 8;
+ entry->data.error_union.pad_bytes = entry->abi_size - (field2_offset + field_sizes[1]);
}
g->type_table.put(type_id, entry);
@@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g) {
+ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
g->stack_trace_type = stack_trace_type_val->data.x_type;
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
-
- g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false);
}
- return g->ptr_to_stack_trace_type;
+ return g->stack_trace_type;
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
@@ -879,13 +877,8 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
// populate the name of the type
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- assert(fn_type_id->async_allocator_type != nullptr);
- buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
@@ -998,14 +991,8 @@ ZigType *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) {
ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
ZigType *fn_type = new_type_table_entry(ZigTypeIdFn);
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ?
- "var" : buf_ptr(&fn_type_id->async_allocator_type->name);
- buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str);
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
size_t i = 0;
for (; i < fn_type_id->next_param_index; i += 1) {
@@ -1119,7 +1106,8 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1207,8 +1195,9 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVoid:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1378,8 +1367,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1474,8 +1464,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -1487,16 +1478,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
break;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (fn_proto->async_allocator_type == nullptr) {
- return get_generic_fn_type(g, &fn_type_id);
- }
- fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type);
- if (type_is_invalid(fn_type_id.async_allocator_type)) {
- return g->builtin_types.entry_invalid;
- }
- }
-
return get_fn_type(g, &fn_type_id);
}
@@ -1516,9 +1497,14 @@ bool type_is_invalid(ZigType *type_entry) {
zig_unreachable();
}
+struct SrcField {
+ const char *name;
+ ZigType *ty;
+ unsigned align;
+};
-ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
- ZigType *field_types[], size_t field_count)
+static ZigType *get_struct_type(CodeGen *g, const char *type_name, SrcField fields[], size_t field_count,
+ unsigned min_abi_align)
{
ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct);
@@ -1530,22 +1516,20 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
struct_type->data.structure.fields = allocate<TypeStructField>(field_count);
struct_type->data.structure.fields_by_name.init(field_count);
- size_t abi_align = 0;
+ size_t abi_align = min_abi_align;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- field->name = buf_create_from_str(field_names[i]);
- field->type_entry = field_types[i];
+ field->name = buf_create_from_str(fields[i].name);
+ field->type_entry = fields[i].ty;
field->src_index = i;
+ field->align = fields[i].align;
if (type_has_bits(field->type_entry)) {
assert(type_is_resolved(field->type_entry, ResolveStatusSizeKnown));
- if (field->type_entry->abi_align > abi_align) {
- abi_align = field->type_entry->abi_align;
+ unsigned field_abi_align = max(field->align, field->type_entry->abi_align);
+ if (field_abi_align > abi_align) {
+ abi_align = field_abi_align;
}
- field->gen_index = struct_type->data.structure.gen_field_count;
- struct_type->data.structure.gen_field_count += 1;
- } else {
- field->gen_index = SIZE_MAX;
}
auto prev_entry = struct_type->data.structure.fields_by_name.put_unique(field->name, field);
@@ -1555,17 +1539,24 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
size_t next_offset = 0;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- if (field->gen_index == SIZE_MAX)
+ if (!type_has_bits(field->type_entry))
continue;
+
field->offset = next_offset;
+
+ // find the next non-zero-byte field for offset calculations
size_t next_src_field_index = i + 1;
for (; next_src_field_index < field_count; next_src_field_index += 1) {
- if (struct_type->data.structure.fields[next_src_field_index].gen_index != SIZE_MAX) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
break;
- }
}
- size_t next_abi_align = (next_src_field_index == field_count) ?
- abi_align : struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = abi_align;
+ } else {
+ next_abi_align = max(fields[next_src_field_index].align,
+ struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align);
+ }
next_offset = next_field_offset(next_offset, abi_align, field->type_entry->abi_size, next_abi_align);
}
@@ -2653,7 +2644,6 @@ ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota;
- fn_entry->codegen = g;
fn_entry->analyzed_executable.backward_branch_count = &fn_entry->prealloc_bbc;
fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota;
fn_entry->analyzed_executable.fn_entry = fn_entry;
@@ -2781,6 +2771,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
}
}
} else {
+ fn_table_entry->inferred_async_node = inferred_async_none;
g->external_prototypes.put_unique(tld_fn->base.name, &tld_fn->base);
}
@@ -2802,6 +2793,13 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
g->fn_defs.append(fn_table_entry);
}
+ // if the calling convention implies that it cannot be async, we save that for later
+ // and leave the value to be nullptr to indicate that we have not emitted possible
+ // compile errors for improperly calling async functions.
+ if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ fn_table_entry->inferred_async_node = fn_table_entry->proto_node;
+ }
+
if (scope_is_root_decls(tld_fn->base.parent_scope) &&
(import == g->root_import || import->data.structure.root_struct->package == g->panic_package))
{
@@ -3035,12 +3033,11 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeIfErrorExpr:
case NodeTypeIfOptional:
case NodeTypeErrorSetDecl:
- case NodeTypeCancel:
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
- case NodeTypePromiseType:
case NodeTypeEnumLiteral:
+ case NodeTypeAnyFrameType:
zig_unreachable();
}
}
@@ -3091,8 +3088,9 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return type_entry;
}
zig_unreachable();
@@ -3592,8 +3590,9 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
}
zig_unreachable();
@@ -3649,8 +3648,9 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdInvalid:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
zig_unreachable();
@@ -3659,13 +3659,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
- if (type->id == ZigTypeIdPromise) return type;
+ if (type->id == ZigTypeIdAnyFrame) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
- if (type->data.maybe.child_type->id == ZigTypeIdPromise) return type->data.maybe.child_type;
+ if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3681,6 +3681,13 @@ bool type_is_nonnull_ptr(ZigType *type) {
return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type);
}
+static uint32_t get_async_frame_align_bytes(CodeGen *g) {
+ uint32_t a = g->pointer_size_bytes * 2;
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (a < 8) a = 8;
+ return a;
+}
+
uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
ZigType *ptr_type = get_src_ptr_type(type);
if (ptr_type->id == ZigTypeIdPointer) {
@@ -3692,8 +3699,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
- } else if (ptr_type->id == ZigTypeIdPromise) {
- return get_coro_frame_align_bytes(g);
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return get_async_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3705,7 +3712,7 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
- } else if (ptr_type->id == ZigTypeIdPromise) {
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
return true;
} else {
zig_unreachable();
@@ -3780,18 +3787,128 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour
return true;
}
-void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) {
- ZigType *fn_type = fn_table_entry->type_entry;
+static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) {
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ Error err;
+ if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+}
+
+bool fn_is_async(ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ return fn->inferred_async_node != inferred_async_none;
+}
+
+static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ assert(fn->inferred_async_node != inferred_async_none);
+ if (fn->inferred_async_fn != nullptr) {
+ ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async function call here"));
+ return add_async_error_notes(g, new_msg, fn->inferred_async_fn);
+ } else if (fn->inferred_async_node->type == NodeTypeFnProto) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async calling convention here"));
+ } else if (fn->inferred_async_node->type == NodeTypeSuspend) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("await is a suspend point"));
+ } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr &&
+ fn->inferred_async_node->data.fn_call_expr.is_builtin)
+ {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("@frame() causes function to be async"));
+ } else {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ }
+}
+
+// This function resolves functions being inferred async.
+static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
+ if (fn->inferred_async_node == inferred_async_checking) {
+ // TODO call graph cycle detected, disallow the recursion
+ fn->inferred_async_node = inferred_async_none;
+ return;
+ }
+ if (fn->inferred_async_node == inferred_async_none) {
+ return;
+ }
+ if (fn->inferred_async_node != nullptr) {
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ fn->inferred_async_node = inferred_async_checking;
+
+ bool must_not_be_async = false;
+ if (fn->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) {
+ must_not_be_async = true;
+ fn->inferred_async_node = inferred_async_none;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ // TODO function pointer call here, could be anything
+ continue;
+ }
+
+ if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ continue;
+ if (callee->anal_state == FnAnalStateReady) {
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ }
+ assert(callee->anal_state == FnAnalStateComplete);
+ analyze_fn_async(g, callee, true);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (fn_is_async(callee)) {
+ fn->inferred_async_node = call->base.source_node;
+ fn->inferred_async_fn = callee;
+ if (must_not_be_async) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ }
+ fn->inferred_async_node = inferred_async_none;
+}
+
+static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) {
+ ZigType *fn_type = fn->type_entry;
assert(!fn_type->data.fn.is_generic);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
- ZigType *block_return_type = ir_analyze(g, &fn_table_entry->ir_executable,
- &fn_table_entry->analyzed_executable, fn_type_id->return_type, return_type_node);
- fn_table_entry->src_implicit_return_type = block_return_type;
+ ZigType *block_return_type = ir_analyze(g, &fn->ir_executable,
+ &fn->analyzed_executable, fn_type_id->return_type, return_type_node);
+ fn->src_implicit_return_type = block_return_type;
- if (type_is_invalid(block_return_type) || fn_table_entry->analyzed_executable.invalid) {
+ if (type_is_invalid(block_return_type) || fn->analyzed_executable.invalid) {
assert(g->errors.length > 0);
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
@@ -3799,20 +3916,20 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
ZigType *return_err_set_type = fn_type_id->return_type->data.error_union.err_set_type;
if (return_err_set_type->data.error_set.infer_fn != nullptr) {
ZigType *inferred_err_set_type;
- if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorSet) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type;
- } else if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type->data.error_union.err_set_type;
+ if (fn->src_implicit_return_type->id == ZigTypeIdErrorSet) {
+ inferred_err_set_type = fn->src_implicit_return_type;
+ } else if (fn->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
+ inferred_err_set_type = fn->src_implicit_return_type->data.error_union.err_set_type;
} else {
add_node_error(g, return_type_node,
buf_sprintf("function with inferred error set must return at least one possible error"));
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
}
@@ -3832,13 +3949,25 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
}
}
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
+ if (cc != CallingConventionUnspecified && cc != CallingConventionAsync &&
+ fn->inferred_async_node != nullptr &&
+ fn->inferred_async_node != inferred_async_checking &&
+ fn->inferred_async_node != inferred_async_none)
+ {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ }
+
if (g->verbose_ir) {
- fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name));
- ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
+ fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name));
+ ir_print(g, stderr, &fn->analyzed_executable, 4);
fprintf(stderr, "}\n");
}
-
- fn_table_entry->anal_state = FnAnalStateComplete;
+ fn->anal_state = FnAnalStateComplete;
}
static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
@@ -4008,6 +4137,16 @@ void semantic_analyze(CodeGen *g) {
analyze_fn_body(g, fn_entry);
}
}
+
+ if (g->errors.length != 0) {
+ return;
+ }
+
+ // second pass over functions for detecting async
+ for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) {
+ ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index);
+ analyze_fn_async(g, fn_entry, true);
+ }
}
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
@@ -4103,11 +4242,12 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdFn:
case ZigTypeIdEnum:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
+ case ZigTypeIdFnFrame:
return type_has_bits(type_entry);
case ZigTypeIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
@@ -4143,7 +4283,6 @@ uint32_t fn_type_id_hash(FnTypeId *id) {
result += ((uint32_t)(id->cc)) * (uint32_t)3349388391;
result += id->is_var_args ? (uint32_t)1931444534 : 0;
result += hash_ptr(id->return_type);
- result += hash_ptr(id->async_allocator_type);
result += id->alignment * 0xd3b3f3e2;
for (size_t i = 0; i < id->param_count; i += 1) {
FnTypeParamInfo *info = &id->param_info[i];
@@ -4158,8 +4297,7 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
a->return_type != b->return_type ||
a->is_var_args != b->is_var_args ||
a->param_count != b->param_count ||
- a->alignment != b->alignment ||
- a->async_allocator_type != b->async_allocator_type)
+ a->alignment != b->alignment)
{
return false;
}
@@ -4321,9 +4459,6 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case ZigTypeIdPointer:
return hash_const_val_ptr(const_val);
- case ZigTypeIdPromise:
- // TODO better hashing algorithm
- return 223048345;
case ZigTypeIdUndefined:
return 162837799;
case ZigTypeIdNull:
@@ -4357,6 +4492,12 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdVector:
// TODO better hashing algorithm
return 3647867726;
+ case ZigTypeIdFnFrame:
+ // TODO better hashing algorithm
+ return 675741936;
+ case ZigTypeIdAnyFrame:
+ // TODO better hashing algorithm
+ return 3747294894;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4389,7 +4530,7 @@ bool generic_fn_type_id_eql(GenericFnTypeId *a, GenericFnTypeId *b) {
if (a_val->special != ConstValSpecialRuntime && b_val->special != ConstValSpecialRuntime) {
assert(a_val->special == ConstValSpecialStatic);
assert(b_val->special == ConstValSpecialStatic);
- if (!const_values_equal(a->fn_entry->codegen, a_val, b_val)) {
+ if (!const_values_equal(a->codegen, a_val, b_val)) {
return false;
}
} else {
@@ -4419,9 +4560,10 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdPointer:
@@ -4489,11 +4631,12 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdPointer:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdArray:
@@ -4624,8 +4767,9 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBool:
case ZigTypeIdFloat:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4713,7 +4857,8 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5032,6 +5177,221 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) {
return type_resolve(g, type_entry, ResolveStatusSizeKnown);
}
+static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
+ if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync)
+ return orig_fn_type;
+
+ ZigType *fn_type = allocate_nonzero<ZigType>(1);
+ *fn_type = *orig_fn_type;
+ fn_type->data.fn.fn_type_id.cc = CallingConventionAsync;
+ fn_type->llvm_type = nullptr;
+ fn_type->llvm_di_type = nullptr;
+
+ return fn_type;
+}
+
+static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
+ Error err;
+
+ if (frame_type->data.frame.locals_struct != nullptr)
+ return ErrorNone;
+
+ ZigFn *fn = frame_type->data.frame.fn;
+ switch (fn->anal_state) {
+ case FnAnalStateInvalid:
+ return ErrorSemanticAnalyzeFail;
+ case FnAnalStateComplete:
+ break;
+ case FnAnalStateReady:
+ analyze_fn_body(g, fn);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+ break;
+ case FnAnalStateProbing: {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
+ buf_ptr(&frame_type->name)));
+ ir_add_analysis_trace(fn->ir_executable.analysis, msg,
+ buf_sprintf("depends on its own frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ }
+ analyze_fn_async(g, fn, false);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+
+ if (!fn_is_async(fn)) {
+ ZigType *fn_type = fn->type_entry;
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList<SrcField> fields = {};
+
+ fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0});
+
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+
+ return ErrorNone;
+ }
+
+ ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
+
+ if (fn->analyzed_executable.need_err_code_spill) {
+ IrInstructionAllocaGen *alloca_gen = allocate<IrInstructionAllocaGen>(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = fn->proto_node;
+ alloca_gen->base.scope = fn->child_scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ fn->err_code_spill = &alloca_gen->base;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ add_node_error(g, call->base.source_node,
+ buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ if (callee->body_node == nullptr) {
+ continue;
+ }
+ if (callee->anal_state == FnAnalStateProbing) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name)));
+ ErrorMsg *note = add_error_note(g, msg, call->base.source_node,
+ buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name)));
+ ir_add_analysis_trace(callee->ir_executable.analysis, note,
+ buf_sprintf("depends on the frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
+ return ErrorSemanticAnalyzeFail;
+ }
+ analyze_fn_async(g, callee, true);
+ if (!fn_is_async(callee))
+ continue;
+
+ ZigType *callee_frame_type = get_fn_frame_type(g, callee);
+
+ IrInstructionAllocaGen *alloca_gen = allocate<IrInstructionAllocaGen>(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = call->base.source_node;
+ alloca_gen->base.scope = call->base.scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ call->frame_result_loc = &alloca_gen->base;
+ }
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList<SrcField> fields = {};
+
+ fields.append({"@fn_ptr", fn_type, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0});
+ }
+
+ for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
+ FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
+ AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
+ Buf *param_name;
+ bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
+ if (param_decl_node && !is_var_args) {
+ param_name = param_decl_node->data.param_decl.name;
+ } else {
+ param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i);
+ }
+ ZigType *param_type = param_info->type;
+
+ fields.append({buf_ptr(param_name), param_type, 0});
+ }
+
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
+ instruction->field_index = SIZE_MAX;
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
+ continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
+ }
+ if ((err = type_resolve(g, child_type, ResolveStatusSizeKnown))) {
+ return err;
+ }
+ const char *name;
+ if (*instruction->name_hint == 0) {
+ name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
+ } else {
+ name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i));
+ }
+ instruction->field_index = fields.length;
+
+ fields.append({name, child_type, instruction->align});
+ }
+
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+ return ErrorNone;
+}
+
Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
if (type_is_invalid(ty))
return ErrorSemanticAnalyzeFail;
@@ -5056,6 +5416,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_alignment(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusSizeKnown:
@@ -5065,6 +5427,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_type(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusLLVMFwdDecl:
@@ -5259,6 +5623,10 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return false;
}
return true;
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5279,7 +5647,6 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
zig_unreachable();
}
zig_unreachable();
@@ -5612,8 +5979,14 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "(args value)");
return;
}
- case ZigTypeIdPromise:
- zig_unreachable();
+ case ZigTypeIdFnFrame:
+ buf_appendf(buf, "(TODO: async function frame value)");
+ return;
+
+ case ZigTypeIdAnyFrame:
+ buf_appendf(buf, "(TODO: anyframe value)");
+ return;
+
}
zig_unreachable();
}
@@ -5627,6 +6000,15 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
entry->llvm_type = LLVMIntType(size_in_bits);
entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type);
entry->abi_align = LLVMABIAlignmentOfType(g->target_data_ref, entry->llvm_type);
+
+ if (size_in_bits >= 128) {
+ // Override the incorrect alignment reported by LLVM. Clang does this as well.
+ // On x86_64 there are some instructions like CMPXCHG16B which require this.
+ // On all targets, integers 128 bits and above have ABI alignment of 16.
+ // See: https://github.com/ziglang/zig/issues/2987
+ assert(entry->abi_align == 8); // if this trips we can remove the workaround
+ entry->abi_align = 16;
+ }
}
const char u_or_i = is_signed ? 'i' : 'u';
@@ -5660,7 +6042,8 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5702,7 +6085,6 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
@@ -5710,6 +6092,8 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -5875,7 +6259,8 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
+ ZigTypeIdFnFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -5939,12 +6324,14 @@ size_t type_id_index(ZigType *entry) {
return 20;
case ZigTypeIdOpaque:
return 21;
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
return 22;
- case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return 23;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 24;
+ case ZigTypeIdEnumLiteral:
+ return 25;
}
zig_unreachable();
}
@@ -5999,10 +6386,12 @@ const char *type_id_name(ZigTypeId id) {
return "ArgTuple";
case ZigTypeIdOpaque:
return "Opaque";
- case ZigTypeIdPromise:
- return "Promise";
case ZigTypeIdVector:
return "Vector";
+ case ZigTypeIdFnFrame:
+ return "Frame";
+ case ZigTypeIdAnyFrame:
+ return "AnyFrame";
}
zig_unreachable();
}
@@ -6067,19 +6456,12 @@ bool type_is_global_error_set(ZigType *err_set_type) {
return err_set_type->data.error_set.err_count == UINT32_MAX;
}
-uint32_t get_coro_frame_align_bytes(CodeGen *g) {
- uint32_t a = g->pointer_size_bytes * 2;
- // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
- if (a < 8) a = 8;
- return a;
-}
-
bool type_can_fail(ZigType *type_entry) {
return type_entry->id == ZigTypeIdErrorUnion || type_entry->id == ZigTypeIdErrorSet;
}
bool fn_type_can_fail(FnTypeId *fn_type_id) {
- return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
+ return type_can_fail(fn_type_id->return_type);
}
// ErrorNone - result pointer has the type
@@ -6449,7 +6831,9 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa
type->data.structure.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status) {
+static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status,
+ ZigType *async_frame_type)
+{
assert(struct_type->id == ZigTypeIdStruct);
assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid);
assert(struct_type->data.structure.resolve_status >= ResolveStatusSizeKnown);
@@ -6486,10 +6870,9 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
size_t field_count = struct_type->data.structure.src_field_count;
- size_t gen_field_count = struct_type->data.structure.gen_field_count;
- LLVMTypeRef *element_types = allocate<LLVMTypeRef>(gen_field_count);
+ // Every field could potentially have a generated padding field after it.
+ LLVMTypeRef *element_types = allocate<LLVMTypeRef>(field_count * 2);
- size_t gen_field_index = 0;
bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked);
size_t packed_bits_offset = 0;
size_t first_packed_bits_offset_misalign = SIZE_MAX;
@@ -6497,20 +6880,36 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
// trigger all the recursive get_llvm_type calls
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
if (!type_has_bits(field_type))
continue;
(void)get_llvm_type(g, field_type);
if (struct_type->data.structure.resolve_status >= wanted_resolve_status) return;
}
- for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ size_t gen_field_index = 0;
+ // Calculate what LLVM thinks the ABI align of the struct will be. We do this to avoid
+ // inserting padding bytes where LLVM would do it automatically.
+ size_t llvm_struct_abi_align = 0;
+ for (size_t i = 0; i < field_count; i += 1) {
+ ZigType *field_type = struct_type->data.structure.fields[i].type_entry;
if (!type_has_bits(field_type))
continue;
+ LLVMTypeRef field_llvm_type = get_llvm_type(g, field_type);
+ size_t llvm_field_abi_align = LLVMABIAlignmentOfType(g->target_data_ref, field_llvm_type);
+ llvm_struct_abi_align = max(llvm_struct_abi_align, llvm_field_abi_align);
+ }
+
+ for (size_t i = 0; i < field_count; i += 1) {
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
+
+ if (!type_has_bits(field_type)) {
+ field->gen_index = SIZE_MAX;
+ continue;
+ }
if (packed) {
size_t field_size_in_bits = type_size_bits(g, field_type);
@@ -6537,12 +6936,61 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
packed_bits_offset = next_packed_bits_offset;
} else {
- element_types[gen_field_index] = get_llvm_type(g, field_type);
-
+ LLVMTypeRef llvm_type;
+ if (i == 0 && async_frame_type != nullptr) {
+ assert(async_frame_type->id == ZigTypeIdFnFrame);
+ assert(field_type->id == ZigTypeIdFn);
+ resolve_llvm_types_fn(g, async_frame_type->data.frame.fn);
+ llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, 0);
+ } else {
+ llvm_type = get_llvm_type(g, field_type);
+ }
+ element_types[gen_field_index] = llvm_type;
+ field->gen_index = gen_field_index;
gen_field_index += 1;
+
+ // find the next non-zero-byte field for offset calculations
+ size_t next_src_field_index = i + 1;
+ for (; next_src_field_index < field_count; next_src_field_index += 1) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
+ break;
+ }
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = struct_type->abi_align;
+ } else {
+ if (struct_type->data.structure.fields[next_src_field_index].align == 0) {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ } else {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].align;
+ }
+ }
+ size_t llvm_next_abi_align = (next_src_field_index == field_count) ?
+ llvm_struct_abi_align :
+ LLVMABIAlignmentOfType(g->target_data_ref,
+ get_llvm_type(g, struct_type->data.structure.fields[next_src_field_index].type_entry));
+
+ size_t next_offset = next_field_offset(field->offset, struct_type->abi_align,
+ field_type->abi_size, next_abi_align);
+ size_t llvm_next_offset = next_field_offset(field->offset, llvm_struct_abi_align,
+ LLVMABISizeOfType(g->target_data_ref, llvm_type), llvm_next_abi_align);
+
+ assert(next_offset >= llvm_next_offset);
+ if (next_offset > llvm_next_offset) {
+ size_t pad_bytes = next_offset - (field->offset + field_type->abi_size);
+ if (pad_bytes != 0) {
+ LLVMTypeRef pad_llvm_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
+ element_types[gen_field_index] = pad_llvm_type;
+ gen_field_index += 1;
+ }
+ }
}
debug_field_count += 1;
}
+ if (!packed) {
+ struct_type->data.structure.gen_field_count = gen_field_index;
+ }
+
if (first_packed_bits_offset_misalign != SIZE_MAX) {
size_t full_bit_count = packed_bits_offset - first_packed_bits_offset_misalign;
size_t full_abi_size = get_abi_size_bytes(full_bit_count, g->pointer_size_bytes);
@@ -6551,19 +6999,20 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
if (type_has_bits(struct_type)) {
- LLVMStructSetBody(struct_type->llvm_type, element_types, (unsigned)gen_field_count, packed);
+ LLVMStructSetBody(struct_type->llvm_type, element_types,
+ (unsigned)struct_type->data.structure.gen_field_count, packed);
}
ZigLLVMDIType **di_element_types = allocate<ZigLLVMDIType*>(debug_field_count);
size_t debug_field_index = 0;
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- size_t gen_field_index = type_struct_field->gen_index;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ size_t gen_field_index = field->gen_index;
if (gen_field_index == SIZE_MAX) {
continue;
}
- ZigType *field_type = type_struct_field->type_entry;
+ ZigType *field_type = field->type_entry;
// if the field is a function, actually the debug info should be a pointer.
ZigLLVMDIType *field_di_type;
@@ -6581,13 +7030,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
uint64_t debug_align_in_bits;
uint64_t debug_offset_in_bits;
if (packed) {
- debug_size_in_bits = type_struct_field->type_entry->size_in_bits;
- debug_align_in_bits = 8 * type_struct_field->type_entry->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset + type_struct_field->bit_offset_in_host;
+ debug_size_in_bits = field->type_entry->size_in_bits;
+ debug_align_in_bits = 8 * field->type_entry->abi_align;
+ debug_offset_in_bits = 8 * field->offset + field->bit_offset_in_host;
} else {
debug_size_in_bits = 8 * get_store_size_bytes(field_type->size_in_bits);
debug_align_in_bits = 8 * field_type->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset;
+ debug_offset_in_bits = 8 * field->offset;
}
unsigned line;
if (decl_node != nullptr) {
@@ -6597,7 +7046,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
line = 0;
}
di_element_types[debug_field_index] = ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(type_struct_field->name),
+ ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(field->name),
di_file, line,
debug_size_in_bits,
debug_align_in_bits,
@@ -6838,7 +7287,7 @@ static void resolve_llvm_types_union(CodeGen *g, ZigType *union_type, ResolveSta
union_type->data.unionation.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
+static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
if (type->llvm_di_type != nullptr) return;
if (!type_has_bits(type)) {
@@ -6867,7 +7316,7 @@ static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
uint64_t debug_align_in_bits = 8*type->abi_align;
type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, elem_type->llvm_di_type,
debug_size_in_bits, debug_align_in_bits, buf_ptr(&type->name));
- assertNoError(type_resolve(g, elem_type, ResolveStatusLLVMFull));
+ assertNoError(type_resolve(g, elem_type, wanted_resolve_status));
} else {
ZigType *host_int_type = get_int_type(g, false, type->data.pointer.host_int_bytes * 8);
LLVMTypeRef host_int_llvm_type = get_llvm_type(g, host_int_type);
@@ -6993,10 +7442,17 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) {
} else {
LLVMTypeRef err_set_llvm_type = get_llvm_type(g, err_set_type);
LLVMTypeRef payload_llvm_type = get_llvm_type(g, payload_type);
- LLVMTypeRef elem_types[2];
+ LLVMTypeRef elem_types[3];
elem_types[err_union_err_index] = err_set_llvm_type;
elem_types[err_union_payload_index] = payload_llvm_type;
+
type->llvm_type = LLVMStructType(elem_types, 2, false);
+ if (LLVMABISizeOfType(g->target_data_ref, type->llvm_type) != type->abi_size) {
+ // we need to do our own padding
+ type->data.error_union.pad_llvm_type = LLVMArrayType(LLVMInt8Type(), type->data.error_union.pad_bytes);
+ elem_types[2] = type->data.error_union.pad_llvm_type;
+ type->llvm_type = LLVMStructType(elem_types, 3, false);
+ }
ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
ZigLLVMDIFile *di_file = nullptr;
@@ -7068,7 +7524,7 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) {
debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len);
}
-static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
+static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
if (fn_type->llvm_di_type != nullptr) return;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -7085,67 +7541,73 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
// +1 for maybe first argument the error return trace
// +2 for maybe arguments async allocator and error code pointer
ZigList<ZigLLVMDIType *> param_di_types = {};
- param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type));
ZigType *gen_return_type;
if (is_async) {
- gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (!type_has_bits(fn_type_id->return_type)) {
gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (first_arg_return) {
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
ZigType *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
- gen_return_type = g->builtin_types.entry_void;
} else {
gen_return_type = fn_type_id->return_type;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
}
fn_type->data.fn.gen_return_type = gen_return_type;
- if (prefix_arg_error_return_trace) {
- ZigType *gen_type = get_ptr_to_stack_trace_type(g);
+ if (prefix_arg_error_return_trace && !is_async) {
+ ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- {
- // async allocator param
- ZigType *gen_type = fn_type_id->async_allocator_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
+ fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(2);
- {
- // error code pointer
- ZigType *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
- }
+ ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
+ gen_param_types.append(get_llvm_type(g, frame_type));
+ param_di_types.append(get_llvm_di_type(g, frame_type));
- fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(fn_type_id->param_count);
- for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
- FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
- ZigType *type_entry = src_param_info->type;
- FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
+ fn_type->data.fn.gen_param_info[0].src_index = 0;
+ fn_type->data.fn.gen_param_info[0].gen_index = 0;
+ fn_type->data.fn.gen_param_info[0].type = frame_type;
- gen_param_info->src_index = i;
- gen_param_info->gen_index = SIZE_MAX;
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
- if (is_c_abi || !type_has_bits(type_entry))
- continue;
+ fn_type->data.fn.gen_param_info[1].src_index = 1;
+ fn_type->data.fn.gen_param_info[1].gen_index = 1;
+ fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize;
+ } else {
+ fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(fn_type_id->param_count);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
+ ZigType *type_entry = src_param_info->type;
+ FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
- ZigType *gen_type;
- if (handle_is_ptr(type_entry)) {
- gen_type = get_pointer_to_type(g, type_entry, true);
- gen_param_info->is_byval = true;
- } else {
- gen_type = type_entry;
- }
- gen_param_info->gen_index = gen_param_types.length;
- gen_param_info->type = gen_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
+ gen_param_info->src_index = i;
+ gen_param_info->gen_index = SIZE_MAX;
- param_di_types.append(get_llvm_di_type(g, gen_type));
+ if (is_c_abi || !type_has_bits(type_entry))
+ continue;
+
+ ZigType *gen_type;
+ if (handle_is_ptr(type_entry)) {
+ gen_type = get_pointer_to_type(g, type_entry, true);
+ gen_param_info->is_byval = true;
+ } else {
+ gen_type = type_entry;
+ }
+ gen_param_info->gen_index = gen_param_types.length;
+ gen_param_info->type = gen_type;
+ gen_param_types.append(get_llvm_type(g, gen_type));
+
+ param_di_types.append(get_llvm_di_type(g, gen_type));
+ }
}
if (is_c_abi) {
@@ -7161,6 +7623,7 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
for (size_t i = 0; i < gen_param_types.length; i += 1) {
assert(gen_param_types.items[i] != nullptr);
}
+
fn_type->data.fn.raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args);
fn_type->llvm_type = LLVMPointerType(fn_type->data.fn.raw_type_ref, 0);
@@ -7170,6 +7633,40 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
LLVMABIAlignmentOfType(g->target_data_ref, fn_type->llvm_type), "");
}
+void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
+ Error err;
+ if (fn->raw_di_type != nullptr) return;
+
+ ZigType *fn_type = fn->type_entry;
+ if (!fn_is_async(fn)) {
+ resolve_llvm_types_fn_type(g, fn_type);
+ fn->raw_type_ref = fn_type->data.fn.raw_type_ref;
+ fn->raw_di_type = fn_type->data.fn.raw_di_type;
+ return;
+ }
+
+ ZigType *gen_return_type = g->builtin_types.entry_void;
+ ZigList<ZigLLVMDIType *> param_di_types = {};
+ ZigList<LLVMTypeRef> gen_param_types = {};
+ // first "parameter" is return value
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
+
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
+ if ((err = type_resolve(g, ptr_type, ResolveStatusLLVMFwdDecl)))
+ zig_unreachable();
+ gen_param_types.append(ptr_type->llvm_type);
+ param_di_types.append(ptr_type->llvm_di_type);
+
+ // this parameter is used to pass the result pointer when await completes
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
+ gen_param_types.items, gen_param_types.length, false);
+ fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0);
+}
+
static void resolve_llvm_types_anyerror(CodeGen *g) {
ZigType *entry = g->builtin_types.entry_global_error_set;
entry->llvm_type = get_llvm_type(g, g->err_tag_type);
@@ -7194,6 +7691,147 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
get_llvm_di_type(g, g->err_tag_type), "");
}
+static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
+ ZigType *passed_frame_type = fn_is_async(frame_type->data.frame.fn) ? frame_type : nullptr;
+ resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, passed_frame_type);
+ frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
+ frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
+}
+
+static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
+ if (any_frame_type->llvm_di_type != nullptr) return;
+
+ Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+ LLVMTypeRef frame_header_type = LLVMStructCreateNamed(LLVMGetGlobalContext(), buf_ptr(name));
+ any_frame_type->llvm_type = LLVMPointerType(frame_header_type, 0);
+
+ unsigned dwarf_kind = ZigLLVMTag_DW_structure_type();
+ ZigLLVMDIFile *di_file = nullptr;
+ ZigLLVMDIScope *di_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+ unsigned line = 0;
+ ZigLLVMDIType *frame_header_di_type = ZigLLVMCreateReplaceableCompositeType(g->dbuilder,
+ dwarf_kind, buf_ptr(name), di_scope, di_file, line);
+ any_frame_type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, frame_header_di_type,
+ 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name));
+
+ LLVMTypeRef llvm_void = LLVMVoidType();
+ LLVMTypeRef arg_types[] = {any_frame_type->llvm_type, g->builtin_types.entry_usize->llvm_type};
+ LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, arg_types, 2, false);
+ LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
+ ZigType *ptr_result_type = (result_type == nullptr) ? nullptr : get_pointer_to_type(g, result_type, false);
+ LLVMTypeRef ptr_fn_llvm_type = LLVMPointerType(fn_type, 0);
+ if (result_type == nullptr) {
+ g->anyframe_fn_type = ptr_fn_llvm_type;
+ }
+
+ ZigList<LLVMTypeRef> field_types = {};
+ ZigList<ZigLLVMDIType *> di_element_types = {};
+
+ // label (grep this): [fn_frame_struct_layout]
+ field_types.append(ptr_fn_llvm_type); // fn_ptr
+ field_types.append(usize_type_ref); // resume_index
+ field_types.append(usize_type_ref); // awaiter
+
+ bool have_result_type = result_type != nullptr && type_has_bits(result_type);
+ if (have_result_type) {
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_callee
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
+ field_types.append(get_llvm_type(g, result_type)); // result
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter
+ }
+ }
+ LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
+
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+
+ if (have_result_type) {
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ }
+ };
+
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types.items, di_element_types.length, 0, nullptr, "");
+
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7219,20 +7857,13 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
if (type->data.structure.is_slice)
return resolve_llvm_types_slice(g, type, wanted_resolve_status);
else
- return resolve_llvm_types_struct(g, type, wanted_resolve_status);
+ return resolve_llvm_types_struct(g, type, wanted_resolve_status, nullptr);
case ZigTypeIdEnum:
return resolve_llvm_types_enum(g, type);
case ZigTypeIdUnion:
return resolve_llvm_types_union(g, type, wanted_resolve_status);
case ZigTypeIdPointer:
- return resolve_llvm_types_pointer(g, type);
- case ZigTypeIdPromise: {
- if (type->llvm_di_type != nullptr) return;
- ZigType *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
- type->llvm_type = get_llvm_type(g, u8_ptr_type);
- type->llvm_di_type = get_llvm_di_type(g, u8_ptr_type);
- return;
- }
+ return resolve_llvm_types_pointer(g, type, wanted_resolve_status);
case ZigTypeIdInt:
return resolve_llvm_types_integer(g, type);
case ZigTypeIdOptional:
@@ -7242,7 +7873,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
case ZigTypeIdArray:
return resolve_llvm_types_array(g, type);
case ZigTypeIdFn:
- return resolve_llvm_types_fn(g, type);
+ return resolve_llvm_types_fn_type(g, type);
case ZigTypeIdErrorSet: {
if (type->llvm_di_type != nullptr) return;
@@ -7261,14 +7892,18 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len);
return;
}
+ case ZigTypeIdFnFrame:
+ return resolve_llvm_types_async_frame(g, type, wanted_resolve_status);
+ case ZigTypeIdAnyFrame:
+ return resolve_llvm_types_any_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
LLVMTypeRef get_llvm_type(CodeGen *g, ZigType *type) {
assertNoError(type_resolve(g, type, ResolveStatusLLVMFull));
- assert(type->abi_size == 0 || type->abi_size == LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
- assert(type->abi_align == 0 || type->abi_align == LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_size == 0 || type->abi_size >= LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_align == 0 || type->abi_align >= LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
return type->llvm_type;
}