aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2019-08-15 14:01:01 -0700
committerGitHub <noreply@github.com>2019-08-15 14:01:01 -0700
commit8b97a1aee2b161b9604d3b0c88166d0f0aef7e64 (patch)
tree7a8d65ad3ef59679cf4318a2fea88b6979207de4 /src
parent729807203a4ef162f39656be062dd11a428af8e3 (diff)
parentd3672493cc6ad5085f202df1859b13b4ae4dec96 (diff)
downloadzig-8b97a1aee2b161b9604d3b0c88166d0f0aef7e64.tar.gz
zig-8b97a1aee2b161b9604d3b0c88166d0f0aef7e64.zip
Merge pull request #3033 from ziglang/rewrite-coroutines
rework async function semantics
Diffstat (limited to 'src')
-rw-r--r--src/all_types.hpp377
-rw-r--r--src/analyze.cpp1111
-rw-r--r--src/analyze.hpp18
-rw-r--r--src/ast_render.cpp45
-rw-r--r--src/codegen.cpp2148
-rw-r--r--src/codegen.hpp2
-rw-r--r--src/ir.cpp2540
-rw-r--r--src/ir.hpp2
-rw-r--r--src/ir_print.cpp333
-rw-r--r--src/parser.cpp46
-rw-r--r--src/target.cpp4
-rw-r--r--src/target.hpp2
-rw-r--r--src/tokenizer.cpp6
-rw-r--r--src/tokenizer.hpp3
-rw-r--r--src/zig_llvm.cpp11
-rw-r--r--src/zig_llvm.h2
16 files changed, 3275 insertions, 3375 deletions
diff --git a/src/all_types.hpp b/src/all_types.hpp
index e9d5aa3834..f1c699ba10 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -35,6 +35,7 @@ struct ConstExprValue;
struct IrInstruction;
struct IrInstructionCast;
struct IrInstructionAllocaGen;
+struct IrInstructionCallGen;
struct IrBasicBlock;
struct ScopeDecls;
struct ZigWindowsSDK;
@@ -70,20 +71,10 @@ struct IrExecutable {
Scope *begin_scope;
ZigList<Tld *> tld_list;
- IrInstruction *coro_handle;
- IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise
- IrInstruction *coro_result_ptr_field_ptr;
- IrInstruction *coro_result_field_ptr;
- IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise
- IrBasicBlock *coro_early_final;
- IrBasicBlock *coro_normal_final;
- IrBasicBlock *coro_suspend_block;
- IrBasicBlock *coro_final_cleanup_block;
- ZigVar *coro_allocator_var;
-
bool invalid;
bool is_inline;
bool is_generic_instantiation;
+ bool need_err_code_spill;
};
enum OutType {
@@ -485,11 +476,10 @@ enum NodeType {
NodeTypeIfErrorExpr,
NodeTypeIfOptional,
NodeTypeErrorSetDecl,
- NodeTypeCancel,
NodeTypeResume,
NodeTypeAwaitExpr,
NodeTypeSuspend,
- NodeTypePromiseType,
+ NodeTypeAnyFrameType,
NodeTypeEnumLiteral,
};
@@ -522,7 +512,6 @@ struct AstNodeFnProto {
AstNode *section_expr;
bool auto_err_set;
- AstNode *async_allocator_type;
};
struct AstNodeFnDef {
@@ -657,7 +646,6 @@ struct AstNodeFnCallExpr {
bool is_builtin;
bool is_async;
bool seen; // used by @compileLog
- AstNode *async_allocator;
};
struct AstNodeArrayAccessExpr {
@@ -922,10 +910,6 @@ struct AstNodeBreakExpr {
AstNode *expr; // may be null
};
-struct AstNodeCancelExpr {
- AstNode *expr;
-};
-
struct AstNodeResumeExpr {
AstNode *expr;
};
@@ -949,7 +933,7 @@ struct AstNodeSuspend {
AstNode *block;
};
-struct AstNodePromiseType {
+struct AstNodeAnyFrameType {
AstNode *payload_type; // can be NULL
};
@@ -1014,13 +998,16 @@ struct AstNode {
AstNodeInferredArrayType inferred_array_type;
AstNodeErrorType error_type;
AstNodeErrorSetDecl err_set_decl;
- AstNodeCancelExpr cancel_expr;
AstNodeResumeExpr resume_expr;
AstNodeAwaitExpr await_expr;
AstNodeSuspend suspend;
- AstNodePromiseType promise_type;
+ AstNodeAnyFrameType anyframe_type;
AstNodeEnumLiteral enum_literal;
} data;
+
+ // This is a function for use in the debugger to print
+ // the source location.
+ void src();
};
// this struct is allocated with allocate_nonzero
@@ -1047,7 +1034,6 @@ struct FnTypeId {
bool is_var_args;
CallingConvention cc;
uint32_t alignment;
- ZigType *async_allocator_type;
};
uint32_t fn_type_id_hash(FnTypeId*);
@@ -1095,6 +1081,7 @@ struct TypeStructField {
ConstExprValue *init_val; // null and then memoized
uint32_t bit_offset_in_host; // offset from the memory at gen_index
uint32_t host_int_bytes; // size of host integer
+ uint32_t align;
};
enum ResolveStatus {
@@ -1156,6 +1143,8 @@ struct ZigTypeOptional {
struct ZigTypeErrorUnion {
ZigType *err_set_type;
ZigType *payload_type;
+ size_t pad_bytes;
+ LLVMTypeRef pad_llvm_type;
};
struct ZigTypeErrorSet {
@@ -1241,11 +1230,6 @@ struct ZigTypeBoundFn {
ZigType *fn_type;
};
-struct ZigTypePromise {
- // null if `promise` instead of `promise->T`
- ZigType *result_type;
-};
-
struct ZigTypeVector {
// The type must be a pointer, integer, or float
ZigType *elem_type;
@@ -1276,7 +1260,8 @@ enum ZigTypeId {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
+ ZigTypeIdFnFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -1291,6 +1276,15 @@ struct ZigTypeOpaque {
Buf *bare_name;
};
+struct ZigTypeFnFrame {
+ ZigFn *fn;
+ ZigType *locals_struct;
+};
+
+struct ZigTypeAnyFrame {
+ ZigType *result_type; // null if `anyframe` instead of `anyframe->T`
+};
+
struct ZigType {
ZigTypeId id;
Buf name;
@@ -1314,16 +1308,16 @@ struct ZigType {
ZigTypeUnion unionation;
ZigTypeFn fn;
ZigTypeBoundFn bound_fn;
- ZigTypePromise promise;
ZigTypeVector vector;
ZigTypeOpaque opaque;
+ ZigTypeFnFrame frame;
+ ZigTypeAnyFrame any_frame;
} data;
// use these fields to make sure we don't duplicate type table entries for the same type
ZigType *pointer_parent[2]; // [0 - mut, 1 - const]
ZigType *optional_parent;
- ZigType *promise_parent;
- ZigType *promise_frame_parent;
+ ZigType *any_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
// The type of this is array
ConstExprValue *cached_const_name_val;
@@ -1359,7 +1353,6 @@ struct GlobalExport {
};
struct ZigFn {
- CodeGen *codegen;
LLVMValueRef llvm_value;
const char *llvm_name;
AstNode *proto_node;
@@ -1368,7 +1361,17 @@ struct ZigFn {
Scope *child_scope; // parent is scope for last parameter
ScopeBlock *def_scope; // parent is child_scope
Buf symbol_name;
- ZigType *type_entry; // function type
+ // This is the function type assuming the function does not suspend.
+ // Note that for an async function, this can be shared with non-async functions. So the value here
+ // should only be read for things in common between non-async and async function types.
+ ZigType *type_entry;
+ // For normal functions one could use the type_entry->raw_type_ref and type_entry->raw_di_type.
+ // However for functions that suspend, those values could possibly be their non-suspending equivalents.
+ // So these values should be preferred.
+ LLVMTypeRef raw_type_ref;
+ ZigLLVMDIType *raw_di_type;
+
+ ZigType *frame_type;
// in the case of normal functions this is the implicit return type
// in the case of async functions this is the implicit return type according to the
// zig source code, not according to zig ir
@@ -1379,6 +1382,7 @@ struct ZigFn {
size_t prealloc_backward_branch_quota;
AstNode **param_source_nodes;
Buf **param_names;
+ IrInstruction *err_code_spill;
AstNode *fn_no_inline_set_node;
AstNode *fn_static_eval_set_node;
@@ -1390,8 +1394,11 @@ struct ZigFn {
AstNode *set_alignstack_node;
AstNode *set_cold_node;
+ const AstNode *inferred_async_node;
+ ZigFn *inferred_async_fn;
ZigList<GlobalExport> export_list;
+ ZigList<IrInstructionCallGen *> call_list;
LLVMValueRef valgrind_client_request_array;
@@ -1442,8 +1449,6 @@ enum BuiltinFnId {
BuiltinFnIdErrName,
BuiltinFnIdBreakpoint,
BuiltinFnIdReturnAddress,
- BuiltinFnIdFrameAddress,
- BuiltinFnIdHandle,
BuiltinFnIdEmbedFile,
BuiltinFnIdCmpxchgWeak,
BuiltinFnIdCmpxchgStrong,
@@ -1499,6 +1504,7 @@ enum BuiltinFnId {
BuiltinFnIdInlineCall,
BuiltinFnIdNoInlineCall,
BuiltinFnIdNewStackCall,
+ BuiltinFnIdAsyncCall,
BuiltinFnIdTypeId,
BuiltinFnIdShlExact,
BuiltinFnIdShrExact,
@@ -1514,6 +1520,10 @@ enum BuiltinFnId {
BuiltinFnIdAtomicLoad,
BuiltinFnIdHasDecl,
BuiltinFnIdUnionInit,
+ BuiltinFnIdFrameAddress,
+ BuiltinFnIdFrameType,
+ BuiltinFnIdFrameHandle,
+ BuiltinFnIdFrameSize,
};
struct BuiltinFnEntry {
@@ -1541,6 +1551,12 @@ enum PanicMsgId {
PanicMsgIdBadEnumValue,
PanicMsgIdFloatToInt,
PanicMsgIdPtrCastNull,
+ PanicMsgIdBadResume,
+ PanicMsgIdBadAwait,
+ PanicMsgIdBadReturn,
+ PanicMsgIdResumedAnAwaitingFn,
+ PanicMsgIdFrameTooSmall,
+ PanicMsgIdResumedFnPendingAwait,
PanicMsgIdCount,
};
@@ -1701,7 +1717,13 @@ struct CodeGen {
LLVMTargetMachineRef target_machine;
ZigLLVMDIFile *dummy_di_file;
LLVMValueRef cur_ret_ptr;
+ LLVMValueRef cur_frame_ptr;
LLVMValueRef cur_fn_val;
+ LLVMValueRef cur_async_switch_instr;
+ LLVMValueRef cur_async_resume_index_ptr;
+ LLVMValueRef cur_async_awaiter_ptr;
+ LLVMBasicBlockRef cur_preamble_llvm_block;
+ size_t cur_resume_block_count;
LLVMValueRef cur_err_ret_trace_val_arg;
LLVMValueRef cur_err_ret_trace_val_stack;
LLVMValueRef memcpy_fn_val;
@@ -1709,28 +1731,16 @@ struct CodeGen {
LLVMValueRef trap_fn_val;
LLVMValueRef return_address_fn_val;
LLVMValueRef frame_address_fn_val;
- LLVMValueRef coro_destroy_fn_val;
- LLVMValueRef coro_id_fn_val;
- LLVMValueRef coro_alloc_fn_val;
- LLVMValueRef coro_size_fn_val;
- LLVMValueRef coro_begin_fn_val;
- LLVMValueRef coro_suspend_fn_val;
- LLVMValueRef coro_end_fn_val;
- LLVMValueRef coro_free_fn_val;
- LLVMValueRef coro_resume_fn_val;
- LLVMValueRef coro_save_fn_val;
- LLVMValueRef coro_promise_fn_val;
- LLVMValueRef coro_alloc_helper_fn_val;
- LLVMValueRef coro_frame_fn_val;
- LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef add_error_return_trace_addr_fn_val;
LLVMValueRef stacksave_fn_val;
LLVMValueRef stackrestore_fn_val;
LLVMValueRef write_register_fn_val;
+ LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef sp_md_node;
LLVMValueRef err_name_table;
LLVMValueRef safety_crash_err_fn;
LLVMValueRef return_err_fn;
+ LLVMTypeRef anyframe_fn_type;
// reminder: hash tables must be initialized before use
HashMap<Buf *, ZigType *, buf_hash, buf_eql_buf> import_table;
@@ -1797,12 +1807,12 @@ struct CodeGen {
ZigType *entry_var;
ZigType *entry_global_error_set;
ZigType *entry_arg_tuple;
- ZigType *entry_promise;
ZigType *entry_enum_literal;
+ ZigType *entry_any_frame;
} builtin_types;
+
ZigType *align_amt_type;
ZigType *stack_trace_type;
- ZigType *ptr_to_stack_trace_type;
ZigType *err_tag_type;
ZigType *test_fn_type;
@@ -1938,6 +1948,7 @@ struct ZigVar {
ZigType *var_type;
LLVMValueRef value_ref;
IrInstruction *is_comptime;
+ IrInstruction *ptr_instruction;
// which node is the declaration of the variable
AstNode *decl_node;
ZigLLVMDILocalVariable *di_loc_var;
@@ -1985,7 +1996,6 @@ enum ScopeId {
ScopeIdSuspend,
ScopeIdFnDef,
ScopeIdCompTime,
- ScopeIdCoroPrelude,
ScopeIdRuntime,
};
@@ -2109,7 +2119,6 @@ struct ScopeRuntime {
struct ScopeSuspend {
Scope base;
- IrBasicBlock *resume_block;
bool reported_err;
};
@@ -2128,12 +2137,6 @@ struct ScopeFnDef {
ZigFn *fn_entry;
};
-// This scope is created to indicate that the code in the scope
-// is auto-generated coroutine prelude stuff.
-struct ScopeCoroPrelude {
- Scope base;
-};
-
// synchronized with code in define_builtin_compile_vars
enum AtomicOrder {
AtomicOrderUnordered,
@@ -2231,7 +2234,7 @@ enum IrInstructionId {
IrInstructionIdSetRuntimeSafety,
IrInstructionIdSetFloatMode,
IrInstructionIdArrayType,
- IrInstructionIdPromiseType,
+ IrInstructionIdAnyFrameType,
IrInstructionIdSliceType,
IrInstructionIdGlobalAsm,
IrInstructionIdAsm,
@@ -2278,7 +2281,10 @@ enum IrInstructionId {
IrInstructionIdBreakpoint,
IrInstructionIdReturnAddress,
IrInstructionIdFrameAddress,
- IrInstructionIdHandle,
+ IrInstructionIdFrameHandle,
+ IrInstructionIdFrameType,
+ IrInstructionIdFrameSizeSrc,
+ IrInstructionIdFrameSizeGen,
IrInstructionIdAlignOf,
IrInstructionIdOverflowOp,
IrInstructionIdTestErrSrc,
@@ -2321,35 +2327,16 @@ enum IrInstructionId {
IrInstructionIdImplicitCast,
IrInstructionIdResolveResult,
IrInstructionIdResetResult,
- IrInstructionIdResultPtr,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
IrInstructionIdArgType,
IrInstructionIdExport,
IrInstructionIdErrorReturnTrace,
IrInstructionIdErrorUnion,
- IrInstructionIdCancel,
- IrInstructionIdGetImplicitAllocator,
- IrInstructionIdCoroId,
- IrInstructionIdCoroAlloc,
- IrInstructionIdCoroSize,
- IrInstructionIdCoroBegin,
- IrInstructionIdCoroAllocFail,
- IrInstructionIdCoroSuspend,
- IrInstructionIdCoroEnd,
- IrInstructionIdCoroFree,
- IrInstructionIdCoroResume,
- IrInstructionIdCoroSave,
- IrInstructionIdCoroPromise,
- IrInstructionIdCoroAllocHelper,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
- IrInstructionIdPromiseResultType,
- IrInstructionIdAwaitBookkeeping,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
- IrInstructionIdMergeErrRetTraces,
- IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdErrSetCast,
IrInstructionIdToBytes,
IrInstructionIdFromBytes,
@@ -2365,6 +2352,13 @@ enum IrInstructionId {
IrInstructionIdEndExpr,
IrInstructionIdPtrOfArrayToSlice,
IrInstructionIdUnionInitNamedField,
+ IrInstructionIdSuspendBegin,
+ IrInstructionIdSuspendFinish,
+ IrInstructionIdAwaitSrc,
+ IrInstructionIdAwaitGen,
+ IrInstructionIdResume,
+ IrInstructionIdSpillBegin,
+ IrInstructionIdSpillEnd,
};
struct IrInstruction {
@@ -2607,7 +2601,6 @@ struct IrInstructionCallSrc {
IrInstruction **args;
ResultLoc *result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2622,8 +2615,8 @@ struct IrInstructionCallGen {
size_t arg_count;
IrInstruction **args;
IrInstruction *result_loc;
+ IrInstruction *frame_result_loc;
- IrInstruction *async_allocator;
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
@@ -2639,7 +2632,7 @@ struct IrInstructionConst {
struct IrInstructionReturn {
IrInstruction base;
- IrInstruction *value;
+ IrInstruction *operand;
};
enum CastOp {
@@ -2744,7 +2737,7 @@ struct IrInstructionPtrType {
bool is_allow_zero;
};
-struct IrInstructionPromiseType {
+struct IrInstructionAnyFrameType {
IrInstruction base;
IrInstruction *payload_type;
@@ -3084,8 +3077,26 @@ struct IrInstructionFrameAddress {
IrInstruction base;
};
-struct IrInstructionHandle {
+struct IrInstructionFrameHandle {
+ IrInstruction base;
+};
+
+struct IrInstructionFrameType {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
+struct IrInstructionFrameSizeSrc {
+ IrInstruction base;
+
+ IrInstruction *fn;
+};
+
+struct IrInstructionFrameSizeGen {
IrInstruction base;
+
+ IrInstruction *fn;
};
enum IrOverflowOp {
@@ -3127,6 +3138,7 @@ struct IrInstructionTestErrSrc {
IrInstruction base;
bool resolve_err_set;
+ bool base_ptr_is_payload;
IrInstruction *base_ptr;
};
@@ -3179,7 +3191,6 @@ struct IrInstructionFnProto {
IrInstruction **param_types;
IrInstruction *align_value;
IrInstruction *return_type;
- IrInstruction *async_allocator_type_value;
bool is_var_args;
};
@@ -3409,95 +3420,6 @@ struct IrInstructionErrorUnion {
IrInstruction *payload;
};
-struct IrInstructionCancel {
- IrInstruction base;
-
- IrInstruction *target;
-};
-
-enum ImplicitAllocatorId {
- ImplicitAllocatorIdArg,
- ImplicitAllocatorIdLocalVar,
-};
-
-struct IrInstructionGetImplicitAllocator {
- IrInstruction base;
-
- ImplicitAllocatorId id;
-};
-
-struct IrInstructionCoroId {
- IrInstruction base;
-
- IrInstruction *promise_ptr;
-};
-
-struct IrInstructionCoroAlloc {
- IrInstruction base;
-
- IrInstruction *coro_id;
-};
-
-struct IrInstructionCoroSize {
- IrInstruction base;
-};
-
-struct IrInstructionCoroBegin {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_mem_ptr;
-};
-
-struct IrInstructionCoroAllocFail {
- IrInstruction base;
-
- IrInstruction *err_val;
-};
-
-struct IrInstructionCoroSuspend {
- IrInstruction base;
-
- IrInstruction *save_point;
- IrInstruction *is_final;
-};
-
-struct IrInstructionCoroEnd {
- IrInstruction base;
-};
-
-struct IrInstructionCoroFree {
- IrInstruction base;
-
- IrInstruction *coro_id;
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroResume {
- IrInstruction base;
-
- IrInstruction *awaiter_handle;
-};
-
-struct IrInstructionCoroSave {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroPromise {
- IrInstruction base;
-
- IrInstruction *coro_handle;
-};
-
-struct IrInstructionCoroAllocHelper {
- IrInstruction base;
-
- IrInstruction *realloc_fn;
- IrInstruction *coro_size;
-};
-
struct IrInstructionAtomicRmw {
IrInstruction base;
@@ -3519,18 +3441,6 @@ struct IrInstructionAtomicLoad {
AtomicOrder resolved_ordering;
};
-struct IrInstructionPromiseResultType {
- IrInstruction base;
-
- IrInstruction *promise_type;
-};
-
-struct IrInstructionAwaitBookkeeping {
- IrInstruction base;
-
- IrInstruction *promise_result_type;
-};
-
struct IrInstructionSaveErrRetAddr {
IrInstruction base;
};
@@ -3541,20 +3451,6 @@ struct IrInstructionAddImplicitReturnType {
IrInstruction *value;
};
-struct IrInstructionMergeErrRetTraces {
- IrInstruction base;
-
- IrInstruction *coro_promise_ptr;
- IrInstruction *src_err_ret_trace_ptr;
- IrInstruction *dest_err_ret_trace_ptr;
-};
-
-struct IrInstructionMarkErrRetTracePtr {
- IrInstruction base;
-
- IrInstruction *err_ret_trace_ptr;
-};
-
// For float ops which take a single argument
struct IrInstructionFloatOp {
IrInstruction base;
@@ -3645,6 +3541,7 @@ struct IrInstructionAllocaGen {
uint32_t align;
const char *name_hint;
+ size_t field_index;
};
struct IrInstructionEndExpr {
@@ -3692,6 +3589,56 @@ struct IrInstructionPtrOfArrayToSlice {
IrInstruction *result_loc;
};
+struct IrInstructionSuspendBegin {
+ IrInstruction base;
+
+ LLVMBasicBlockRef resume_bb;
+};
+
+struct IrInstructionSuspendFinish {
+ IrInstruction base;
+
+ IrInstructionSuspendBegin *begin;
+};
+
+struct IrInstructionAwaitSrc {
+ IrInstruction base;
+
+ IrInstruction *frame;
+ ResultLoc *result_loc;
+};
+
+struct IrInstructionAwaitGen {
+ IrInstruction base;
+
+ IrInstruction *frame;
+ IrInstruction *result_loc;
+};
+
+struct IrInstructionResume {
+ IrInstruction base;
+
+ IrInstruction *frame;
+};
+
+enum SpillId {
+ SpillIdInvalid,
+ SpillIdRetErrCode,
+};
+
+struct IrInstructionSpillBegin {
+ IrInstruction base;
+
+ SpillId spill_id;
+ IrInstruction *operand;
+};
+
+struct IrInstructionSpillEnd {
+ IrInstruction base;
+
+ IrInstructionSpillBegin *begin;
+};
+
enum ResultLocId {
ResultLocIdInvalid,
ResultLocIdNone,
@@ -3775,20 +3722,16 @@ static const size_t maybe_null_index = 1;
static const size_t err_union_payload_index = 0;
static const size_t err_union_err_index = 1;
-// TODO call graph analysis to find out what this number needs to be for every function
-// MUST BE A POWER OF TWO.
-static const size_t stack_trace_ptr_count = 32;
-
-// these belong to the async function
-#define RETURN_ADDRESSES_FIELD_NAME "return_addresses"
-#define ERR_RET_TRACE_FIELD_NAME "err_ret_trace"
-#define RESULT_FIELD_NAME "result"
-#define ASYNC_REALLOC_FIELD_NAME "reallocFn"
-#define ASYNC_SHRINK_FIELD_NAME "shrinkFn"
-#define ATOMIC_STATE_FIELD_NAME "atomic_state"
-// these point to data belonging to the awaiter
-#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
-#define RESULT_PTR_FIELD_NAME "result_ptr"
+// label (grep this): [fn_frame_struct_layout]
+static const size_t frame_fn_ptr_index = 0;
+static const size_t frame_resume_index = 1;
+static const size_t frame_awaiter_index = 2;
+static const size_t frame_ret_start = 3;
+
+// TODO https://github.com/ziglang/zig/issues/3056
+// We require this to be a power of 2 so that we can use shifting rather than
+// remainder division.
+static const size_t stack_trace_ptr_count = 32; // Must be a power of 2.
#define NAMESPACE_SEP_CHAR '.'
#define NAMESPACE_SEP_STR "."
@@ -3811,11 +3754,13 @@ enum FnWalkId {
struct FnWalkAttrs {
ZigFn *fn;
+ LLVMValueRef llvm_fn;
unsigned gen_i;
};
struct FnWalkCall {
ZigList<LLVMValueRef> *gen_param_values;
+ ZigList<ZigType *> *gen_param_types;
IrInstructionCallGen *inst;
bool is_var_args;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 672e75a5ee..21289f24a8 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7,6 +7,7 @@
#include "analyze.hpp"
#include "ast_render.hpp"
+#include "codegen.hpp"
#include "config.h"
#include "error.hpp"
#include "ir.hpp"
@@ -31,6 +32,11 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
+// nullptr means not analyzed yet; this one means currently being analyzed
+static const AstNode *inferred_async_checking = reinterpret_cast<AstNode *>(0x1);
+// this one means analyzed and it's not async
+static const AstNode *inferred_async_none = reinterpret_cast<AstNode *>(0x2);
+
static bool is_top_level_struct(ZigType *import) {
return import->id == ZigTypeIdStruct && import->data.structure.root_struct != nullptr;
}
@@ -56,14 +62,14 @@ ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg) {
return err;
}
-ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
+ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
return add_token_error(g, node->owner, &fake_token, msg);
}
-ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg) {
+ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg) {
Token fake_token;
fake_token.start_line = node->line;
fake_token.start_column = node->column;
@@ -188,12 +194,6 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
return &scope->base;
}
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent) {
- ScopeCoroPrelude *scope = allocate<ScopeCoroPrelude>(1);
- init_scope(g, &scope->base, ScopeIdCoroPrelude, node, parent);
- return &scope->base;
-}
-
ZigType *get_scope_import(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDecls) {
@@ -234,6 +234,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
return type_entry->data.enumeration.decl_node;
case ZigTypeIdUnion:
return type_entry->data.unionation.decl_node;
+ case ZigTypeIdFnFrame:
+ return type_entry->data.frame.fn->proto_node;
case ZigTypeIdOpaque:
case ZigTypeIdMetaType:
case ZigTypeIdVoid:
@@ -254,8 +256,8 @@ AstNode *type_decl_node(ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return nullptr;
}
zig_unreachable();
@@ -269,6 +271,20 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
return type_entry->data.structure.resolve_status >= status;
case ZigTypeIdUnion:
return type_entry->data.unionation.resolve_status >= status;
+ case ZigTypeIdFnFrame:
+ switch (status) {
+ case ResolveStatusInvalid:
+ zig_unreachable();
+ case ResolveStatusUnstarted:
+ case ResolveStatusZeroBitsKnown:
+ return true;
+ case ResolveStatusAlignmentKnown:
+ case ResolveStatusSizeKnown:
+ return type_entry->data.frame.locals_struct != nullptr;
+ case ResolveStatusLLVMFwdDecl:
+ case ResolveStatusLLVMFull:
+ return type_entry->llvm_type != nullptr;
+ }
case ZigTypeIdEnum:
switch (status) {
case ResolveStatusUnstarted:
@@ -307,8 +323,8 @@ bool type_is_resolved(ZigType *type_entry, ResolveStatus status) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return true;
}
zig_unreachable();
@@ -341,27 +357,27 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) {
return get_int_type(g, false, bits_needed_for_unsigned(x));
}
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type) {
- if (result_type != nullptr && result_type->promise_parent != nullptr) {
- return result_type->promise_parent;
- } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) {
- return g->builtin_types.entry_promise;
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type) {
+ if (result_type != nullptr && result_type->any_frame_parent != nullptr) {
+ return result_type->any_frame_parent;
+ } else if (result_type == nullptr && g->builtin_types.entry_any_frame != nullptr) {
+ return g->builtin_types.entry_any_frame;
}
- ZigType *entry = new_type_table_entry(ZigTypeIdPromise);
+ ZigType *entry = new_type_table_entry(ZigTypeIdAnyFrame);
entry->abi_size = g->builtin_types.entry_usize->abi_size;
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->data.promise.result_type = result_type;
- buf_init_from_str(&entry->name, "promise");
+ entry->data.any_frame.result_type = result_type;
+ buf_init_from_str(&entry->name, "anyframe");
if (result_type != nullptr) {
buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name));
}
if (result_type != nullptr) {
- result_type->promise_parent = entry;
+ result_type->any_frame_parent = entry;
} else if (result_type == nullptr) {
- g->builtin_types.entry_promise = entry;
+ g->builtin_types.entry_any_frame = entry;
}
return entry;
}
@@ -378,6 +394,25 @@ static const char *ptr_len_to_star_str(PtrLen ptr_len) {
zig_unreachable();
}
+ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn) {
+ if (fn->frame_type != nullptr) {
+ return fn->frame_type;
+ }
+
+ ZigType *entry = new_type_table_entry(ZigTypeIdFnFrame);
+ buf_resize(&entry->name, 0);
+ buf_appendf(&entry->name, "@Frame(%s)", buf_ptr(&fn->symbol_name));
+
+ entry->data.frame.fn = fn;
+
+ // Async function frames are always non-zero bits because they always have a resume index.
+ entry->abi_size = SIZE_MAX;
+ entry->size_in_bits = SIZE_MAX;
+
+ fn->frame_type = entry;
+ return entry;
+}
+
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment,
uint32_t bit_offset_in_host, uint32_t host_int_bytes, bool allow_zero)
@@ -490,42 +525,6 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const) {
return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, 0, 0, 0, false);
}
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type) {
- if (return_type->promise_frame_parent != nullptr) {
- return return_type->promise_frame_parent;
- }
-
- ZigType *atomic_state_type = g->builtin_types.entry_usize;
- ZigType *result_ptr_type = get_pointer_to_type(g, return_type, false);
-
- ZigList<const char *> field_names = {};
- field_names.append(ATOMIC_STATE_FIELD_NAME);
- field_names.append(RESULT_FIELD_NAME);
- field_names.append(RESULT_PTR_FIELD_NAME);
- if (g->have_err_ret_tracing) {
- field_names.append(ERR_RET_TRACE_PTR_FIELD_NAME);
- field_names.append(ERR_RET_TRACE_FIELD_NAME);
- field_names.append(RETURN_ADDRESSES_FIELD_NAME);
- }
-
- ZigList<ZigType *> field_types = {};
- field_types.append(atomic_state_type);
- field_types.append(return_type);
- field_types.append(result_ptr_type);
- if (g->have_err_ret_tracing) {
- field_types.append(get_ptr_to_stack_trace_type(g));
- field_types.append(g->stack_trace_type);
- field_types.append(get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count));
- }
-
- assert(field_names.length == field_types.length);
- Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name));
- ZigType *entry = get_struct_type(g, buf_ptr(name), field_names.items, field_types.items, field_names.length);
-
- return_type->promise_frame_parent = entry;
- return entry;
-}
-
ZigType *get_optional_type(CodeGen *g, ZigType *child_type) {
if (child_type->optional_parent != nullptr) {
return child_type->optional_parent;
@@ -631,6 +630,7 @@ ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payloa
size_t field2_offset = next_field_offset(0, entry->abi_align, field_sizes[0], field_aligns[1]);
entry->abi_size = next_field_offset(field2_offset, entry->abi_align, field_sizes[1], entry->abi_align);
entry->size_in_bits = entry->abi_size * 8;
+ entry->data.error_union.pad_bytes = entry->abi_size - (field2_offset + field_sizes[1]);
}
g->type_table.put(type_id, entry);
@@ -828,17 +828,15 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g) {
+ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace");
assert(stack_trace_type_val->type->id == ZigTypeIdMetaType);
g->stack_trace_type = stack_trace_type_val->data.x_type;
assertNoError(type_resolve(g, g->stack_trace_type, ResolveStatusZeroBitsKnown));
-
- g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false);
}
- return g->ptr_to_stack_trace_type;
+ return g->stack_trace_type;
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
@@ -879,13 +877,8 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
// populate the name of the type
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- assert(fn_type_id->async_allocator_type != nullptr);
- buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
FnTypeParamInfo *param_info = &fn_type_id->param_info[i];
@@ -998,14 +991,8 @@ ZigType *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) {
ZigType *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
ZigType *fn_type = new_type_table_entry(ZigTypeIdFn);
buf_resize(&fn_type->name, 0);
- if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ?
- "var" : buf_ptr(&fn_type_id->async_allocator_type->name);
- buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str);
- } else {
- const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
- buf_appendf(&fn_type->name, "%s", cc_str);
- }
+ const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
+ buf_appendf(&fn_type->name, "%s", cc_str);
buf_appendf(&fn_type->name, "fn(");
size_t i = 0;
for (; i < fn_type_id->next_param_index; i += 1) {
@@ -1119,7 +1106,8 @@ static Error emit_error_unless_type_allowed_in_packed_struct(CodeGen *g, ZigType
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
add_node_error(g, source_node,
buf_sprintf("type '%s' not allowed in packed struct; no guaranteed in-memory representation",
buf_ptr(&type_entry->name)));
@@ -1207,8 +1195,9 @@ bool type_allowed_in_extern(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVoid:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdOpaque:
case ZigTypeIdUnreachable:
@@ -1378,8 +1367,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, type_entry)) {
case ReqCompTimeNo:
break;
@@ -1474,8 +1464,9 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
switch (type_requires_comptime(g, fn_type_id.return_type)) {
case ReqCompTimeInvalid:
return g->builtin_types.entry_invalid;
@@ -1487,16 +1478,6 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
break;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (fn_proto->async_allocator_type == nullptr) {
- return get_generic_fn_type(g, &fn_type_id);
- }
- fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type);
- if (type_is_invalid(fn_type_id.async_allocator_type)) {
- return g->builtin_types.entry_invalid;
- }
- }
-
return get_fn_type(g, &fn_type_id);
}
@@ -1516,9 +1497,14 @@ bool type_is_invalid(ZigType *type_entry) {
zig_unreachable();
}
+struct SrcField {
+ const char *name;
+ ZigType *ty;
+ unsigned align;
+};
-ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
- ZigType *field_types[], size_t field_count)
+static ZigType *get_struct_type(CodeGen *g, const char *type_name, SrcField fields[], size_t field_count,
+ unsigned min_abi_align)
{
ZigType *struct_type = new_type_table_entry(ZigTypeIdStruct);
@@ -1530,22 +1516,20 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
struct_type->data.structure.fields = allocate<TypeStructField>(field_count);
struct_type->data.structure.fields_by_name.init(field_count);
- size_t abi_align = 0;
+ size_t abi_align = min_abi_align;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- field->name = buf_create_from_str(field_names[i]);
- field->type_entry = field_types[i];
+ field->name = buf_create_from_str(fields[i].name);
+ field->type_entry = fields[i].ty;
field->src_index = i;
+ field->align = fields[i].align;
if (type_has_bits(field->type_entry)) {
assert(type_is_resolved(field->type_entry, ResolveStatusSizeKnown));
- if (field->type_entry->abi_align > abi_align) {
- abi_align = field->type_entry->abi_align;
+ unsigned field_abi_align = max(field->align, field->type_entry->abi_align);
+ if (field_abi_align > abi_align) {
+ abi_align = field_abi_align;
}
- field->gen_index = struct_type->data.structure.gen_field_count;
- struct_type->data.structure.gen_field_count += 1;
- } else {
- field->gen_index = SIZE_MAX;
}
auto prev_entry = struct_type->data.structure.fields_by_name.put_unique(field->name, field);
@@ -1555,17 +1539,24 @@ ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_na
size_t next_offset = 0;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = &struct_type->data.structure.fields[i];
- if (field->gen_index == SIZE_MAX)
+ if (!type_has_bits(field->type_entry))
continue;
+
field->offset = next_offset;
+
+ // find the next non-zero-byte field for offset calculations
size_t next_src_field_index = i + 1;
for (; next_src_field_index < field_count; next_src_field_index += 1) {
- if (struct_type->data.structure.fields[next_src_field_index].gen_index != SIZE_MAX) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
break;
- }
}
- size_t next_abi_align = (next_src_field_index == field_count) ?
- abi_align : struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = abi_align;
+ } else {
+ next_abi_align = max(fields[next_src_field_index].align,
+ struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align);
+ }
next_offset = next_field_offset(next_offset, abi_align, field->type_entry->abi_size, next_abi_align);
}
@@ -2653,7 +2644,6 @@ ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota;
- fn_entry->codegen = g;
fn_entry->analyzed_executable.backward_branch_count = &fn_entry->prealloc_bbc;
fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota;
fn_entry->analyzed_executable.fn_entry = fn_entry;
@@ -2781,6 +2771,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
}
}
} else {
+ fn_table_entry->inferred_async_node = inferred_async_none;
g->external_prototypes.put_unique(tld_fn->base.name, &tld_fn->base);
}
@@ -2802,6 +2793,13 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
g->fn_defs.append(fn_table_entry);
}
+ // if the calling convention implies that it cannot be async, we save that for later
+ // and leave the value to be nullptr to indicate that we have not emitted possible
+ // compile errors for improperly calling async functions.
+ if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ fn_table_entry->inferred_async_node = fn_table_entry->proto_node;
+ }
+
if (scope_is_root_decls(tld_fn->base.parent_scope) &&
(import == g->root_import || import->data.structure.root_struct->package == g->panic_package))
{
@@ -3035,12 +3033,11 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeIfErrorExpr:
case NodeTypeIfOptional:
case NodeTypeErrorSetDecl:
- case NodeTypeCancel:
case NodeTypeResume:
case NodeTypeAwaitExpr:
case NodeTypeSuspend:
- case NodeTypePromiseType:
case NodeTypeEnumLiteral:
+ case NodeTypeAnyFrameType:
zig_unreachable();
}
}
@@ -3091,8 +3088,9 @@ ZigType *validate_var_type(CodeGen *g, AstNode *source_node, ZigType *type_entry
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return type_entry;
}
zig_unreachable();
@@ -3592,8 +3590,9 @@ bool is_container(ZigType *type_entry) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
}
zig_unreachable();
@@ -3649,8 +3648,9 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdInvalid:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
zig_unreachable();
@@ -3659,13 +3659,13 @@ Error resolve_container_type(CodeGen *g, ZigType *type_entry) {
ZigType *get_src_ptr_type(ZigType *type) {
if (type->id == ZigTypeIdPointer) return type;
if (type->id == ZigTypeIdFn) return type;
- if (type->id == ZigTypeIdPromise) return type;
+ if (type->id == ZigTypeIdAnyFrame) return type;
if (type->id == ZigTypeIdOptional) {
if (type->data.maybe.child_type->id == ZigTypeIdPointer) {
return type->data.maybe.child_type->data.pointer.allow_zero ? nullptr : type->data.maybe.child_type;
}
if (type->data.maybe.child_type->id == ZigTypeIdFn) return type->data.maybe.child_type;
- if (type->data.maybe.child_type->id == ZigTypeIdPromise) return type->data.maybe.child_type;
+ if (type->data.maybe.child_type->id == ZigTypeIdAnyFrame) return type->data.maybe.child_type;
}
return nullptr;
}
@@ -3681,6 +3681,13 @@ bool type_is_nonnull_ptr(ZigType *type) {
return get_codegen_ptr_type(type) == type && !ptr_allows_addr_zero(type);
}
+static uint32_t get_async_frame_align_bytes(CodeGen *g) {
+ uint32_t a = g->pointer_size_bytes * 2;
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (a < 8) a = 8;
+ return a;
+}
+
uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
ZigType *ptr_type = get_src_ptr_type(type);
if (ptr_type->id == ZigTypeIdPointer) {
@@ -3692,8 +3699,8 @@ uint32_t get_ptr_align(CodeGen *g, ZigType *type) {
// when getting the alignment of `?extern fn() void`.
// See http://lists.llvm.org/pipermail/llvm-dev/2018-September/126142.html
return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment;
- } else if (ptr_type->id == ZigTypeIdPromise) {
- return get_coro_frame_align_bytes(g);
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
+ return get_async_frame_align_bytes(g);
} else {
zig_unreachable();
}
@@ -3705,7 +3712,7 @@ bool get_ptr_const(ZigType *type) {
return ptr_type->data.pointer.is_const;
} else if (ptr_type->id == ZigTypeIdFn) {
return true;
- } else if (ptr_type->id == ZigTypeIdPromise) {
+ } else if (ptr_type->id == ZigTypeIdAnyFrame) {
return true;
} else {
zig_unreachable();
@@ -3780,18 +3787,128 @@ bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *sour
return true;
}
-void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node) {
- ZigType *fn_type = fn_table_entry->type_entry;
+static void resolve_async_fn_frame(CodeGen *g, ZigFn *fn) {
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ Error err;
+ if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown))) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+}
+
+bool fn_is_async(ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ return fn->inferred_async_node != inferred_async_none;
+}
+
+static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
+ assert(fn->inferred_async_node != nullptr);
+ assert(fn->inferred_async_node != inferred_async_checking);
+ assert(fn->inferred_async_node != inferred_async_none);
+ if (fn->inferred_async_fn != nullptr) {
+ ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async function call here"));
+ return add_async_error_notes(g, new_msg, fn->inferred_async_fn);
+ } else if (fn->inferred_async_node->type == NodeTypeFnProto) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async calling convention here"));
+ } else if (fn->inferred_async_node->type == NodeTypeSuspend) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ } else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("await is a suspend point"));
+ } else if (fn->inferred_async_node->type == NodeTypeFnCallExpr &&
+ fn->inferred_async_node->data.fn_call_expr.is_builtin)
+ {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("@frame() causes function to be async"));
+ } else {
+ add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("suspends here"));
+ }
+}
+
+// This function resolves functions being inferred async.
+static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
+ if (fn->inferred_async_node == inferred_async_checking) {
+ // TODO call graph cycle detected, disallow the recursion
+ fn->inferred_async_node = inferred_async_none;
+ return;
+ }
+ if (fn->inferred_async_node == inferred_async_none) {
+ return;
+ }
+ if (fn->inferred_async_node != nullptr) {
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ fn->inferred_async_node = inferred_async_checking;
+
+ bool must_not_be_async = false;
+ if (fn->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified) {
+ must_not_be_async = true;
+ fn->inferred_async_node = inferred_async_none;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ // TODO function pointer call here, could be anything
+ continue;
+ }
+
+ if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ continue;
+ if (callee->anal_state == FnAnalStateReady) {
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ }
+ assert(callee->anal_state == FnAnalStateComplete);
+ analyze_fn_async(g, callee, true);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (fn_is_async(callee)) {
+ fn->inferred_async_node = call->base.source_node;
+ fn->inferred_async_fn = callee;
+ if (must_not_be_async) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ }
+ }
+ fn->inferred_async_node = inferred_async_none;
+}
+
+static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) {
+ ZigType *fn_type = fn->type_entry;
assert(!fn_type->data.fn.is_generic);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
- ZigType *block_return_type = ir_analyze(g, &fn_table_entry->ir_executable,
- &fn_table_entry->analyzed_executable, fn_type_id->return_type, return_type_node);
- fn_table_entry->src_implicit_return_type = block_return_type;
+ ZigType *block_return_type = ir_analyze(g, &fn->ir_executable,
+ &fn->analyzed_executable, fn_type_id->return_type, return_type_node);
+ fn->src_implicit_return_type = block_return_type;
- if (type_is_invalid(block_return_type) || fn_table_entry->analyzed_executable.invalid) {
+ if (type_is_invalid(block_return_type) || fn->analyzed_executable.invalid) {
assert(g->errors.length > 0);
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
@@ -3799,20 +3916,20 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
ZigType *return_err_set_type = fn_type_id->return_type->data.error_union.err_set_type;
if (return_err_set_type->data.error_set.infer_fn != nullptr) {
ZigType *inferred_err_set_type;
- if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorSet) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type;
- } else if (fn_table_entry->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
- inferred_err_set_type = fn_table_entry->src_implicit_return_type->data.error_union.err_set_type;
+ if (fn->src_implicit_return_type->id == ZigTypeIdErrorSet) {
+ inferred_err_set_type = fn->src_implicit_return_type;
+ } else if (fn->src_implicit_return_type->id == ZigTypeIdErrorUnion) {
+ inferred_err_set_type = fn->src_implicit_return_type->data.error_union.err_set_type;
} else {
add_node_error(g, return_type_node,
buf_sprintf("function with inferred error set must return at least one possible error"));
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
- fn_table_entry->anal_state = FnAnalStateInvalid;
+ fn->anal_state = FnAnalStateInvalid;
return;
}
}
@@ -3832,13 +3949,25 @@ void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node)
}
}
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
+ if (cc != CallingConventionUnspecified && cc != CallingConventionAsync &&
+ fn->inferred_async_node != nullptr &&
+ fn->inferred_async_node != inferred_async_checking &&
+ fn->inferred_async_node != inferred_async_none)
+ {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(cc)));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ }
+
if (g->verbose_ir) {
- fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name));
- ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
+ fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name));
+ ir_print(g, stderr, &fn->analyzed_executable, 4);
fprintf(stderr, "}\n");
}
-
- fn_table_entry->anal_state = FnAnalStateComplete;
+ fn->anal_state = FnAnalStateComplete;
}
static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
@@ -4008,6 +4137,16 @@ void semantic_analyze(CodeGen *g) {
analyze_fn_body(g, fn_entry);
}
}
+
+ if (g->errors.length != 0) {
+ return;
+ }
+
+ // second pass over functions for detecting async
+ for (g->fn_defs_index = 0; g->fn_defs_index < g->fn_defs.length; g->fn_defs_index += 1) {
+ ZigFn *fn_entry = g->fn_defs.at(g->fn_defs_index);
+ analyze_fn_async(g, fn_entry, true);
+ }
}
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
@@ -4103,11 +4242,12 @@ bool handle_is_ptr(ZigType *type_entry) {
case ZigTypeIdErrorSet:
case ZigTypeIdFn:
case ZigTypeIdEnum:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdArray:
case ZigTypeIdStruct:
+ case ZigTypeIdFnFrame:
return type_has_bits(type_entry);
case ZigTypeIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
@@ -4143,7 +4283,6 @@ uint32_t fn_type_id_hash(FnTypeId *id) {
result += ((uint32_t)(id->cc)) * (uint32_t)3349388391;
result += id->is_var_args ? (uint32_t)1931444534 : 0;
result += hash_ptr(id->return_type);
- result += hash_ptr(id->async_allocator_type);
result += id->alignment * 0xd3b3f3e2;
for (size_t i = 0; i < id->param_count; i += 1) {
FnTypeParamInfo *info = &id->param_info[i];
@@ -4158,8 +4297,7 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
a->return_type != b->return_type ||
a->is_var_args != b->is_var_args ||
a->param_count != b->param_count ||
- a->alignment != b->alignment ||
- a->async_allocator_type != b->async_allocator_type)
+ a->alignment != b->alignment)
{
return false;
}
@@ -4321,9 +4459,6 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case ZigTypeIdPointer:
return hash_const_val_ptr(const_val);
- case ZigTypeIdPromise:
- // TODO better hashing algorithm
- return 223048345;
case ZigTypeIdUndefined:
return 162837799;
case ZigTypeIdNull:
@@ -4357,6 +4492,12 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case ZigTypeIdVector:
// TODO better hashing algorithm
return 3647867726;
+ case ZigTypeIdFnFrame:
+ // TODO better hashing algorithm
+ return 675741936;
+ case ZigTypeIdAnyFrame:
+ // TODO better hashing algorithm
+ return 3747294894;
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@@ -4389,7 +4530,7 @@ bool generic_fn_type_id_eql(GenericFnTypeId *a, GenericFnTypeId *b) {
if (a_val->special != ConstValSpecialRuntime && b_val->special != ConstValSpecialRuntime) {
assert(a_val->special == ConstValSpecialStatic);
assert(b_val->special == ConstValSpecialStatic);
- if (!const_values_equal(a->fn_entry->codegen, a_val, b_val)) {
+ if (!const_values_equal(a->codegen, a_val, b_val)) {
return false;
}
} else {
@@ -4419,9 +4560,10 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return false;
case ZigTypeIdPointer:
@@ -4489,11 +4631,12 @@ static bool return_type_is_cacheable(ZigType *return_type) {
case ZigTypeIdBoundFn:
case ZigTypeIdFn:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdPointer:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdArray:
@@ -4624,8 +4767,9 @@ OnePossibleValue type_has_one_possible_value(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFn:
case ZigTypeIdBool:
case ZigTypeIdFloat:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return OnePossibleValueNo;
case ZigTypeIdUndefined:
case ZigTypeIdNull:
@@ -4713,7 +4857,8 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) {
case ZigTypeIdFloat:
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return ReqCompTimeNo;
}
zig_unreachable();
@@ -5032,6 +5177,221 @@ Error ensure_complete_type(CodeGen *g, ZigType *type_entry) {
return type_resolve(g, type_entry, ResolveStatusSizeKnown);
}
+static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
+ if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync)
+ return orig_fn_type;
+
+ ZigType *fn_type = allocate_nonzero<ZigType>(1);
+ *fn_type = *orig_fn_type;
+ fn_type->data.fn.fn_type_id.cc = CallingConventionAsync;
+ fn_type->llvm_type = nullptr;
+ fn_type->llvm_di_type = nullptr;
+
+ return fn_type;
+}
+
+static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
+ Error err;
+
+ if (frame_type->data.frame.locals_struct != nullptr)
+ return ErrorNone;
+
+ ZigFn *fn = frame_type->data.frame.fn;
+ switch (fn->anal_state) {
+ case FnAnalStateInvalid:
+ return ErrorSemanticAnalyzeFail;
+ case FnAnalStateComplete:
+ break;
+ case FnAnalStateReady:
+ analyze_fn_body(g, fn);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+ break;
+ case FnAnalStateProbing: {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("cannot resolve '%s': function not fully analyzed yet",
+ buf_ptr(&frame_type->name)));
+ ir_add_analysis_trace(fn->ir_executable.analysis, msg,
+ buf_sprintf("depends on its own frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ }
+ analyze_fn_async(g, fn, false);
+ if (fn->anal_state == FnAnalStateInvalid)
+ return ErrorSemanticAnalyzeFail;
+
+ if (!fn_is_async(fn)) {
+ ZigType *fn_type = fn->type_entry;
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList<SrcField> fields = {};
+
+ fields.append({"@fn_ptr", g->builtin_types.entry_usize, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_to_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_to_stack_trace_type, 0});
+
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+
+ return ErrorNone;
+ }
+
+ ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
+
+ if (fn->analyzed_executable.need_err_code_spill) {
+ IrInstructionAllocaGen *alloca_gen = allocate<IrInstructionAllocaGen>(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = fn->proto_node;
+ alloca_gen->base.scope = fn->child_scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ fn->err_code_spill = &alloca_gen->base;
+ }
+
+ for (size_t i = 0; i < fn->call_list.length; i += 1) {
+ IrInstructionCallGen *call = fn->call_list.at(i);
+ ZigFn *callee = call->fn_entry;
+ if (callee == nullptr) {
+ add_node_error(g, call->base.source_node,
+ buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ErrorSemanticAnalyzeFail;
+ }
+ if (callee->body_node == nullptr) {
+ continue;
+ }
+ if (callee->anal_state == FnAnalStateProbing) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("unable to determine async function frame of '%s'", buf_ptr(&fn->symbol_name)));
+ ErrorMsg *note = add_error_note(g, msg, call->base.source_node,
+ buf_sprintf("analysis of function '%s' depends on the frame", buf_ptr(&callee->symbol_name)));
+ ir_add_analysis_trace(callee->ir_executable.analysis, note,
+ buf_sprintf("depends on the frame here"));
+ return ErrorSemanticAnalyzeFail;
+ }
+
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
+ return ErrorSemanticAnalyzeFail;
+ }
+ analyze_fn_async(g, callee, true);
+ if (!fn_is_async(callee))
+ continue;
+
+ ZigType *callee_frame_type = get_fn_frame_type(g, callee);
+
+ IrInstructionAllocaGen *alloca_gen = allocate<IrInstructionAllocaGen>(1);
+ alloca_gen->base.id = IrInstructionIdAllocaGen;
+ alloca_gen->base.source_node = call->base.source_node;
+ alloca_gen->base.scope = call->base.scope;
+ alloca_gen->base.value.type = get_pointer_to_type(g, callee_frame_type, false);
+ alloca_gen->base.ref_count = 1;
+ alloca_gen->name_hint = "";
+ fn->alloca_gen_list.append(alloca_gen);
+ call->frame_result_loc = &alloca_gen->base;
+ }
+ FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
+ ZigType *ptr_return_type = get_pointer_to_type(g, fn_type_id->return_type, false);
+
+ // label (grep this): [fn_frame_struct_layout]
+ ZigList<SrcField> fields = {};
+
+ fields.append({"@fn_ptr", fn_type, 0});
+ fields.append({"@resume_index", g->builtin_types.entry_usize, 0});
+ fields.append({"@awaiter", g->builtin_types.entry_usize, 0});
+
+ fields.append({"@result_ptr_callee", ptr_return_type, 0});
+ fields.append({"@result_ptr_awaiter", ptr_return_type, 0});
+ fields.append({"@result", fn_type_id->return_type, 0});
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ ZigType *ptr_stack_trace_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ fields.append({"@ptr_stack_trace_callee", ptr_stack_trace_type, 0});
+ fields.append({"@ptr_stack_trace_awaiter", ptr_stack_trace_type, 0});
+ }
+
+ for (size_t arg_i = 0; arg_i < fn_type_id->param_count; arg_i += 1) {
+ FnTypeParamInfo *param_info = &fn_type_id->param_info[arg_i];
+ AstNode *param_decl_node = get_param_decl_node(fn, arg_i);
+ Buf *param_name;
+ bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args;
+ if (param_decl_node && !is_var_args) {
+ param_name = param_decl_node->data.param_decl.name;
+ } else {
+ param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i);
+ }
+ ZigType *param_type = param_info->type;
+
+ fields.append({buf_ptr(param_name), param_type, 0});
+ }
+
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn, true)) {
+ fields.append({"@stack_trace", get_stack_trace_type(g), 0});
+ fields.append({"@instruction_addresses",
+ get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count), 0});
+ }
+
+ for (size_t alloca_i = 0; alloca_i < fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn->alloca_gen_list.at(alloca_i);
+ instruction->field_index = SIZE_MAX;
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
+ continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
+ }
+ if ((err = type_resolve(g, child_type, ResolveStatusSizeKnown))) {
+ return err;
+ }
+ const char *name;
+ if (*instruction->name_hint == 0) {
+ name = buf_ptr(buf_sprintf("@local%" ZIG_PRI_usize, alloca_i));
+ } else {
+ name = buf_ptr(buf_sprintf("%s.%" ZIG_PRI_usize, instruction->name_hint, alloca_i));
+ }
+ instruction->field_index = fields.length;
+
+ fields.append({name, child_type, instruction->align});
+ }
+
+
+ frame_type->data.frame.locals_struct = get_struct_type(g, buf_ptr(&frame_type->name),
+ fields.items, fields.length, target_fn_align(g->zig_target));
+ frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
+ frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
+ frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
+ return ErrorNone;
+}
+
Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
if (type_is_invalid(ty))
return ErrorSemanticAnalyzeFail;
@@ -5056,6 +5416,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_alignment(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusSizeKnown:
@@ -5065,6 +5427,8 @@ Error type_resolve(CodeGen *g, ZigType *ty, ResolveStatus status) {
return resolve_enum_zero_bits(g, ty);
} else if (ty->id == ZigTypeIdUnion) {
return resolve_union_type(g, ty);
+ } else if (ty->id == ZigTypeIdFnFrame) {
+ return resolve_async_frame(g, ty);
}
return ErrorNone;
case ResolveStatusLLVMFwdDecl:
@@ -5259,6 +5623,10 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
return false;
}
return true;
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
case ZigTypeIdUndefined:
zig_panic("TODO");
case ZigTypeIdNull:
@@ -5279,7 +5647,6 @@ bool const_values_equal(CodeGen *g, ConstExprValue *a, ConstExprValue *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
- case ZigTypeIdPromise:
zig_unreachable();
}
zig_unreachable();
@@ -5612,8 +5979,14 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "(args value)");
return;
}
- case ZigTypeIdPromise:
- zig_unreachable();
+ case ZigTypeIdFnFrame:
+ buf_appendf(buf, "(TODO: async function frame value)");
+ return;
+
+ case ZigTypeIdAnyFrame:
+ buf_appendf(buf, "(TODO: anyframe value)");
+ return;
+
}
zig_unreachable();
}
@@ -5627,6 +6000,15 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
entry->llvm_type = LLVMIntType(size_in_bits);
entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type);
entry->abi_align = LLVMABIAlignmentOfType(g->target_data_ref, entry->llvm_type);
+
+ if (size_in_bits >= 128) {
+ // Override the incorrect alignment reported by LLVM. Clang does this as well.
+ // On x86_64 there are some instructions like CMPXCHG16B which require this.
+ // On all targets, integers 128 bits and above have ABI alignment of 16.
+ // See: https://github.com/ziglang/zig/issues/2987
+ assert(entry->abi_align == 8); // if this trips we can remove the workaround
+ entry->abi_align = 16;
+ }
}
const char u_or_i = is_signed ? 'i' : 'u';
@@ -5660,7 +6042,8 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
@@ -5702,7 +6085,6 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
@@ -5710,6 +6092,8 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
@@ -5875,7 +6259,8 @@ static const ZigTypeId all_type_ids[] = {
ZigTypeIdBoundFn,
ZigTypeIdArgTuple,
ZigTypeIdOpaque,
- ZigTypeIdPromise,
+ ZigTypeIdFnFrame,
+ ZigTypeIdAnyFrame,
ZigTypeIdVector,
ZigTypeIdEnumLiteral,
};
@@ -5939,12 +6324,14 @@ size_t type_id_index(ZigType *entry) {
return 20;
case ZigTypeIdOpaque:
return 21;
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
return 22;
- case ZigTypeIdVector:
+ case ZigTypeIdAnyFrame:
return 23;
- case ZigTypeIdEnumLiteral:
+ case ZigTypeIdVector:
return 24;
+ case ZigTypeIdEnumLiteral:
+ return 25;
}
zig_unreachable();
}
@@ -5999,10 +6386,12 @@ const char *type_id_name(ZigTypeId id) {
return "ArgTuple";
case ZigTypeIdOpaque:
return "Opaque";
- case ZigTypeIdPromise:
- return "Promise";
case ZigTypeIdVector:
return "Vector";
+ case ZigTypeIdFnFrame:
+ return "Frame";
+ case ZigTypeIdAnyFrame:
+ return "AnyFrame";
}
zig_unreachable();
}
@@ -6067,19 +6456,12 @@ bool type_is_global_error_set(ZigType *err_set_type) {
return err_set_type->data.error_set.err_count == UINT32_MAX;
}
-uint32_t get_coro_frame_align_bytes(CodeGen *g) {
- uint32_t a = g->pointer_size_bytes * 2;
- // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
- if (a < 8) a = 8;
- return a;
-}
-
bool type_can_fail(ZigType *type_entry) {
return type_entry->id == ZigTypeIdErrorUnion || type_entry->id == ZigTypeIdErrorSet;
}
bool fn_type_can_fail(FnTypeId *fn_type_id) {
- return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
+ return type_can_fail(fn_type_id->return_type);
}
// ErrorNone - result pointer has the type
@@ -6449,7 +6831,9 @@ static void resolve_llvm_types_slice(CodeGen *g, ZigType *type, ResolveStatus wa
type->data.structure.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status) {
+static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveStatus wanted_resolve_status,
+ ZigType *async_frame_type)
+{
assert(struct_type->id == ZigTypeIdStruct);
assert(struct_type->data.structure.resolve_status != ResolveStatusInvalid);
assert(struct_type->data.structure.resolve_status >= ResolveStatusSizeKnown);
@@ -6486,10 +6870,9 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
size_t field_count = struct_type->data.structure.src_field_count;
- size_t gen_field_count = struct_type->data.structure.gen_field_count;
- LLVMTypeRef *element_types = allocate<LLVMTypeRef>(gen_field_count);
+ // Every field could potentially have a generated padding field after it.
+ LLVMTypeRef *element_types = allocate<LLVMTypeRef>(field_count * 2);
- size_t gen_field_index = 0;
bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked);
size_t packed_bits_offset = 0;
size_t first_packed_bits_offset_misalign = SIZE_MAX;
@@ -6497,20 +6880,36 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
// trigger all the recursive get_llvm_type calls
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
if (!type_has_bits(field_type))
continue;
(void)get_llvm_type(g, field_type);
if (struct_type->data.structure.resolve_status >= wanted_resolve_status) return;
}
- for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- ZigType *field_type = type_struct_field->type_entry;
+ size_t gen_field_index = 0;
+ // Calculate what LLVM thinks the ABI align of the struct will be. We do this to avoid
+ // inserting padding bytes where LLVM would do it automatically.
+ size_t llvm_struct_abi_align = 0;
+ for (size_t i = 0; i < field_count; i += 1) {
+ ZigType *field_type = struct_type->data.structure.fields[i].type_entry;
if (!type_has_bits(field_type))
continue;
+ LLVMTypeRef field_llvm_type = get_llvm_type(g, field_type);
+ size_t llvm_field_abi_align = LLVMABIAlignmentOfType(g->target_data_ref, field_llvm_type);
+ llvm_struct_abi_align = max(llvm_struct_abi_align, llvm_field_abi_align);
+ }
+
+ for (size_t i = 0; i < field_count; i += 1) {
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ ZigType *field_type = field->type_entry;
+
+ if (!type_has_bits(field_type)) {
+ field->gen_index = SIZE_MAX;
+ continue;
+ }
if (packed) {
size_t field_size_in_bits = type_size_bits(g, field_type);
@@ -6537,12 +6936,61 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
packed_bits_offset = next_packed_bits_offset;
} else {
- element_types[gen_field_index] = get_llvm_type(g, field_type);
-
+ LLVMTypeRef llvm_type;
+ if (i == 0 && async_frame_type != nullptr) {
+ assert(async_frame_type->id == ZigTypeIdFnFrame);
+ assert(field_type->id == ZigTypeIdFn);
+ resolve_llvm_types_fn(g, async_frame_type->data.frame.fn);
+ llvm_type = LLVMPointerType(async_frame_type->data.frame.fn->raw_type_ref, 0);
+ } else {
+ llvm_type = get_llvm_type(g, field_type);
+ }
+ element_types[gen_field_index] = llvm_type;
+ field->gen_index = gen_field_index;
gen_field_index += 1;
+
+ // find the next non-zero-byte field for offset calculations
+ size_t next_src_field_index = i + 1;
+ for (; next_src_field_index < field_count; next_src_field_index += 1) {
+ if (type_has_bits(struct_type->data.structure.fields[next_src_field_index].type_entry))
+ break;
+ }
+ size_t next_abi_align;
+ if (next_src_field_index == field_count) {
+ next_abi_align = struct_type->abi_align;
+ } else {
+ if (struct_type->data.structure.fields[next_src_field_index].align == 0) {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].type_entry->abi_align;
+ } else {
+ next_abi_align = struct_type->data.structure.fields[next_src_field_index].align;
+ }
+ }
+ size_t llvm_next_abi_align = (next_src_field_index == field_count) ?
+ llvm_struct_abi_align :
+ LLVMABIAlignmentOfType(g->target_data_ref,
+ get_llvm_type(g, struct_type->data.structure.fields[next_src_field_index].type_entry));
+
+ size_t next_offset = next_field_offset(field->offset, struct_type->abi_align,
+ field_type->abi_size, next_abi_align);
+ size_t llvm_next_offset = next_field_offset(field->offset, llvm_struct_abi_align,
+ LLVMABISizeOfType(g->target_data_ref, llvm_type), llvm_next_abi_align);
+
+ assert(next_offset >= llvm_next_offset);
+ if (next_offset > llvm_next_offset) {
+ size_t pad_bytes = next_offset - (field->offset + field_type->abi_size);
+ if (pad_bytes != 0) {
+ LLVMTypeRef pad_llvm_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
+ element_types[gen_field_index] = pad_llvm_type;
+ gen_field_index += 1;
+ }
+ }
}
debug_field_count += 1;
}
+ if (!packed) {
+ struct_type->data.structure.gen_field_count = gen_field_index;
+ }
+
if (first_packed_bits_offset_misalign != SIZE_MAX) {
size_t full_bit_count = packed_bits_offset - first_packed_bits_offset_misalign;
size_t full_abi_size = get_abi_size_bytes(full_bit_count, g->pointer_size_bytes);
@@ -6551,19 +6999,20 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
}
if (type_has_bits(struct_type)) {
- LLVMStructSetBody(struct_type->llvm_type, element_types, (unsigned)gen_field_count, packed);
+ LLVMStructSetBody(struct_type->llvm_type, element_types,
+ (unsigned)struct_type->data.structure.gen_field_count, packed);
}
ZigLLVMDIType **di_element_types = allocate<ZigLLVMDIType*>(debug_field_count);
size_t debug_field_index = 0;
for (size_t i = 0; i < field_count; i += 1) {
- TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
- size_t gen_field_index = type_struct_field->gen_index;
+ TypeStructField *field = &struct_type->data.structure.fields[i];
+ size_t gen_field_index = field->gen_index;
if (gen_field_index == SIZE_MAX) {
continue;
}
- ZigType *field_type = type_struct_field->type_entry;
+ ZigType *field_type = field->type_entry;
// if the field is a function, actually the debug info should be a pointer.
ZigLLVMDIType *field_di_type;
@@ -6581,13 +7030,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
uint64_t debug_align_in_bits;
uint64_t debug_offset_in_bits;
if (packed) {
- debug_size_in_bits = type_struct_field->type_entry->size_in_bits;
- debug_align_in_bits = 8 * type_struct_field->type_entry->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset + type_struct_field->bit_offset_in_host;
+ debug_size_in_bits = field->type_entry->size_in_bits;
+ debug_align_in_bits = 8 * field->type_entry->abi_align;
+ debug_offset_in_bits = 8 * field->offset + field->bit_offset_in_host;
} else {
debug_size_in_bits = 8 * get_store_size_bytes(field_type->size_in_bits);
debug_align_in_bits = 8 * field_type->abi_align;
- debug_offset_in_bits = 8 * type_struct_field->offset;
+ debug_offset_in_bits = 8 * field->offset;
}
unsigned line;
if (decl_node != nullptr) {
@@ -6597,7 +7046,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
line = 0;
}
di_element_types[debug_field_index] = ZigLLVMCreateDebugMemberType(g->dbuilder,
- ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(type_struct_field->name),
+ ZigLLVMTypeToScope(struct_type->llvm_di_type), buf_ptr(field->name),
di_file, line,
debug_size_in_bits,
debug_align_in_bits,
@@ -6838,7 +7287,7 @@ static void resolve_llvm_types_union(CodeGen *g, ZigType *union_type, ResolveSta
union_type->data.unionation.resolve_status = ResolveStatusLLVMFull;
}
-static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
+static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
if (type->llvm_di_type != nullptr) return;
if (!type_has_bits(type)) {
@@ -6867,7 +7316,7 @@ static void resolve_llvm_types_pointer(CodeGen *g, ZigType *type) {
uint64_t debug_align_in_bits = 8*type->abi_align;
type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, elem_type->llvm_di_type,
debug_size_in_bits, debug_align_in_bits, buf_ptr(&type->name));
- assertNoError(type_resolve(g, elem_type, ResolveStatusLLVMFull));
+ assertNoError(type_resolve(g, elem_type, wanted_resolve_status));
} else {
ZigType *host_int_type = get_int_type(g, false, type->data.pointer.host_int_bytes * 8);
LLVMTypeRef host_int_llvm_type = get_llvm_type(g, host_int_type);
@@ -6993,10 +7442,17 @@ static void resolve_llvm_types_error_union(CodeGen *g, ZigType *type) {
} else {
LLVMTypeRef err_set_llvm_type = get_llvm_type(g, err_set_type);
LLVMTypeRef payload_llvm_type = get_llvm_type(g, payload_type);
- LLVMTypeRef elem_types[2];
+ LLVMTypeRef elem_types[3];
elem_types[err_union_err_index] = err_set_llvm_type;
elem_types[err_union_payload_index] = payload_llvm_type;
+
type->llvm_type = LLVMStructType(elem_types, 2, false);
+ if (LLVMABISizeOfType(g->target_data_ref, type->llvm_type) != type->abi_size) {
+ // we need to do our own padding
+ type->data.error_union.pad_llvm_type = LLVMArrayType(LLVMInt8Type(), type->data.error_union.pad_bytes);
+ elem_types[2] = type->data.error_union.pad_llvm_type;
+ type->llvm_type = LLVMStructType(elem_types, 3, false);
+ }
ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
ZigLLVMDIFile *di_file = nullptr;
@@ -7068,7 +7524,7 @@ static void resolve_llvm_types_array(CodeGen *g, ZigType *type) {
debug_align_in_bits, get_llvm_di_type(g, elem_type), (int)type->data.array.len);
}
-static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
+static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
if (fn_type->llvm_di_type != nullptr) return;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -7085,67 +7541,73 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
// +1 for maybe first argument the error return trace
// +2 for maybe arguments async allocator and error code pointer
ZigList<ZigLLVMDIType *> param_di_types = {};
- param_di_types.append(get_llvm_di_type(g, fn_type_id->return_type));
ZigType *gen_return_type;
if (is_async) {
- gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (!type_has_bits(fn_type_id->return_type)) {
gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
} else if (first_arg_return) {
+ gen_return_type = g->builtin_types.entry_void;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
ZigType *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
- gen_return_type = g->builtin_types.entry_void;
} else {
gen_return_type = fn_type_id->return_type;
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
}
fn_type->data.fn.gen_return_type = gen_return_type;
- if (prefix_arg_error_return_trace) {
- ZigType *gen_type = get_ptr_to_stack_trace_type(g);
+ if (prefix_arg_error_return_trace && !is_async) {
+ ZigType *gen_type = get_pointer_to_type(g, get_stack_trace_type(g), false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
}
if (is_async) {
- {
- // async allocator param
- ZigType *gen_type = fn_type_id->async_allocator_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
+ fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(2);
- {
- // error code pointer
- ZigType *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
- gen_param_types.append(get_llvm_type(g, gen_type));
- param_di_types.append(get_llvm_di_type(g, gen_type));
- }
- }
+ ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
+ gen_param_types.append(get_llvm_type(g, frame_type));
+ param_di_types.append(get_llvm_di_type(g, frame_type));
- fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(fn_type_id->param_count);
- for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
- FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
- ZigType *type_entry = src_param_info->type;
- FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
+ fn_type->data.fn.gen_param_info[0].src_index = 0;
+ fn_type->data.fn.gen_param_info[0].gen_index = 0;
+ fn_type->data.fn.gen_param_info[0].type = frame_type;
- gen_param_info->src_index = i;
- gen_param_info->gen_index = SIZE_MAX;
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
- if (is_c_abi || !type_has_bits(type_entry))
- continue;
+ fn_type->data.fn.gen_param_info[1].src_index = 1;
+ fn_type->data.fn.gen_param_info[1].gen_index = 1;
+ fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize;
+ } else {
+ fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(fn_type_id->param_count);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
+ ZigType *type_entry = src_param_info->type;
+ FnGenParamInfo *gen_param_info = &fn_type->data.fn.gen_param_info[i];
- ZigType *gen_type;
- if (handle_is_ptr(type_entry)) {
- gen_type = get_pointer_to_type(g, type_entry, true);
- gen_param_info->is_byval = true;
- } else {
- gen_type = type_entry;
- }
- gen_param_info->gen_index = gen_param_types.length;
- gen_param_info->type = gen_type;
- gen_param_types.append(get_llvm_type(g, gen_type));
+ gen_param_info->src_index = i;
+ gen_param_info->gen_index = SIZE_MAX;
- param_di_types.append(get_llvm_di_type(g, gen_type));
+ if (is_c_abi || !type_has_bits(type_entry))
+ continue;
+
+ ZigType *gen_type;
+ if (handle_is_ptr(type_entry)) {
+ gen_type = get_pointer_to_type(g, type_entry, true);
+ gen_param_info->is_byval = true;
+ } else {
+ gen_type = type_entry;
+ }
+ gen_param_info->gen_index = gen_param_types.length;
+ gen_param_info->type = gen_type;
+ gen_param_types.append(get_llvm_type(g, gen_type));
+
+ param_di_types.append(get_llvm_di_type(g, gen_type));
+ }
}
if (is_c_abi) {
@@ -7161,6 +7623,7 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
for (size_t i = 0; i < gen_param_types.length; i += 1) {
assert(gen_param_types.items[i] != nullptr);
}
+
fn_type->data.fn.raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
gen_param_types.items, (unsigned int)gen_param_types.length, fn_type_id->is_var_args);
fn_type->llvm_type = LLVMPointerType(fn_type->data.fn.raw_type_ref, 0);
@@ -7170,6 +7633,40 @@ static void resolve_llvm_types_fn(CodeGen *g, ZigType *fn_type) {
LLVMABIAlignmentOfType(g->target_data_ref, fn_type->llvm_type), "");
}
+void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
+ Error err;
+ if (fn->raw_di_type != nullptr) return;
+
+ ZigType *fn_type = fn->type_entry;
+ if (!fn_is_async(fn)) {
+ resolve_llvm_types_fn_type(g, fn_type);
+ fn->raw_type_ref = fn_type->data.fn.raw_type_ref;
+ fn->raw_di_type = fn_type->data.fn.raw_di_type;
+ return;
+ }
+
+ ZigType *gen_return_type = g->builtin_types.entry_void;
+ ZigList<ZigLLVMDIType *> param_di_types = {};
+ ZigList<LLVMTypeRef> gen_param_types = {};
+ // first "parameter" is return value
+ param_di_types.append(get_llvm_di_type(g, gen_return_type));
+
+ ZigType *frame_type = get_fn_frame_type(g, fn);
+ ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);
+ if ((err = type_resolve(g, ptr_type, ResolveStatusLLVMFwdDecl)))
+ zig_unreachable();
+ gen_param_types.append(ptr_type->llvm_type);
+ param_di_types.append(ptr_type->llvm_di_type);
+
+ // this parameter is used to pass the result pointer when await completes
+ gen_param_types.append(get_llvm_type(g, g->builtin_types.entry_usize));
+ param_di_types.append(get_llvm_di_type(g, g->builtin_types.entry_usize));
+
+ fn->raw_type_ref = LLVMFunctionType(get_llvm_type(g, gen_return_type),
+ gen_param_types.items, gen_param_types.length, false);
+ fn->raw_di_type = ZigLLVMCreateSubroutineType(g->dbuilder, param_di_types.items, (int)param_di_types.length, 0);
+}
+
static void resolve_llvm_types_anyerror(CodeGen *g) {
ZigType *entry = g->builtin_types.entry_global_error_set;
entry->llvm_type = get_llvm_type(g, g->err_tag_type);
@@ -7194,6 +7691,147 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
get_llvm_di_type(g, g->err_tag_type), "");
}
+static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
+ ZigType *passed_frame_type = fn_is_async(frame_type->data.frame.fn) ? frame_type : nullptr;
+ resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, passed_frame_type);
+ frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
+ frame_type->llvm_di_type = frame_type->data.frame.locals_struct->llvm_di_type;
+}
+
+static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, ResolveStatus wanted_resolve_status) {
+ if (any_frame_type->llvm_di_type != nullptr) return;
+
+ Buf *name = buf_sprintf("(%s header)", buf_ptr(&any_frame_type->name));
+ LLVMTypeRef frame_header_type = LLVMStructCreateNamed(LLVMGetGlobalContext(), buf_ptr(name));
+ any_frame_type->llvm_type = LLVMPointerType(frame_header_type, 0);
+
+ unsigned dwarf_kind = ZigLLVMTag_DW_structure_type();
+ ZigLLVMDIFile *di_file = nullptr;
+ ZigLLVMDIScope *di_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+ unsigned line = 0;
+ ZigLLVMDIType *frame_header_di_type = ZigLLVMCreateReplaceableCompositeType(g->dbuilder,
+ dwarf_kind, buf_ptr(name), di_scope, di_file, line);
+ any_frame_type->llvm_di_type = ZigLLVMCreateDebugPointerType(g->dbuilder, frame_header_di_type,
+ 8*g->pointer_size_bytes, 8*g->builtin_types.entry_usize->abi_align, buf_ptr(&any_frame_type->name));
+
+ LLVMTypeRef llvm_void = LLVMVoidType();
+ LLVMTypeRef arg_types[] = {any_frame_type->llvm_type, g->builtin_types.entry_usize->llvm_type};
+ LLVMTypeRef fn_type = LLVMFunctionType(llvm_void, arg_types, 2, false);
+ LLVMTypeRef usize_type_ref = get_llvm_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIType *usize_di_type = get_llvm_di_type(g, g->builtin_types.entry_usize);
+ ZigLLVMDIScope *compile_unit_scope = ZigLLVMCompileUnitToScope(g->compile_unit);
+
+ ZigType *result_type = any_frame_type->data.any_frame.result_type;
+ ZigType *ptr_result_type = (result_type == nullptr) ? nullptr : get_pointer_to_type(g, result_type, false);
+ LLVMTypeRef ptr_fn_llvm_type = LLVMPointerType(fn_type, 0);
+ if (result_type == nullptr) {
+ g->anyframe_fn_type = ptr_fn_llvm_type;
+ }
+
+ ZigList<LLVMTypeRef> field_types = {};
+ ZigList<ZigLLVMDIType *> di_element_types = {};
+
+ // label (grep this): [fn_frame_struct_layout]
+ field_types.append(ptr_fn_llvm_type); // fn_ptr
+ field_types.append(usize_type_ref); // resume_index
+ field_types.append(usize_type_ref); // awaiter
+
+ bool have_result_type = result_type != nullptr && type_has_bits(result_type);
+ if (have_result_type) {
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_callee
+ field_types.append(get_llvm_type(g, ptr_result_type)); // result_ptr_awaiter
+ field_types.append(get_llvm_type(g, result_type)); // result
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_callee
+ field_types.append(get_llvm_type(g, ptr_stack_trace)); // ptr_stack_trace_awaiter
+ }
+ }
+ LLVMStructSetBody(frame_header_type, field_types.items, field_types.length, false);
+
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "fn_ptr",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "resume_index",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, usize_di_type));
+
+ if (have_result_type) {
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result_ptr_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_result_type)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "result",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, result_type)));
+
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ ZigType *ptr_stack_trace = get_pointer_to_type(g, get_stack_trace_type(g), false);
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_callee",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ di_element_types.append(
+ ZigLLVMCreateDebugMemberType(g->dbuilder,
+ ZigLLVMTypeToScope(any_frame_type->llvm_di_type), "ptr_stack_trace_awaiter",
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, field_types.at(di_element_types.length)),
+ 8*LLVMOffsetOfElement(g->target_data_ref, frame_header_type, di_element_types.length),
+ ZigLLVM_DIFlags_Zero, get_llvm_di_type(g, ptr_stack_trace)));
+ }
+ };
+
+ ZigLLVMDIType *replacement_di_type = ZigLLVMCreateDebugStructType(g->dbuilder,
+ compile_unit_scope, buf_ptr(name),
+ di_file, line,
+ 8*LLVMABISizeOfType(g->target_data_ref, frame_header_type),
+ 8*LLVMABIAlignmentOfType(g->target_data_ref, frame_header_type),
+ ZigLLVM_DIFlags_Zero,
+ nullptr, di_element_types.items, di_element_types.length, 0, nullptr, "");
+
+ ZigLLVMReplaceTemporary(g->dbuilder, frame_header_di_type, replacement_di_type);
+}
+
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
@@ -7219,20 +7857,13 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
if (type->data.structure.is_slice)
return resolve_llvm_types_slice(g, type, wanted_resolve_status);
else
- return resolve_llvm_types_struct(g, type, wanted_resolve_status);
+ return resolve_llvm_types_struct(g, type, wanted_resolve_status, nullptr);
case ZigTypeIdEnum:
return resolve_llvm_types_enum(g, type);
case ZigTypeIdUnion:
return resolve_llvm_types_union(g, type, wanted_resolve_status);
case ZigTypeIdPointer:
- return resolve_llvm_types_pointer(g, type);
- case ZigTypeIdPromise: {
- if (type->llvm_di_type != nullptr) return;
- ZigType *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
- type->llvm_type = get_llvm_type(g, u8_ptr_type);
- type->llvm_di_type = get_llvm_di_type(g, u8_ptr_type);
- return;
- }
+ return resolve_llvm_types_pointer(g, type, wanted_resolve_status);
case ZigTypeIdInt:
return resolve_llvm_types_integer(g, type);
case ZigTypeIdOptional:
@@ -7242,7 +7873,7 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
case ZigTypeIdArray:
return resolve_llvm_types_array(g, type);
case ZigTypeIdFn:
- return resolve_llvm_types_fn(g, type);
+ return resolve_llvm_types_fn_type(g, type);
case ZigTypeIdErrorSet: {
if (type->llvm_di_type != nullptr) return;
@@ -7261,14 +7892,18 @@ static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_r
type->abi_align, get_llvm_di_type(g, type->data.vector.elem_type), type->data.vector.len);
return;
}
+ case ZigTypeIdFnFrame:
+ return resolve_llvm_types_async_frame(g, type, wanted_resolve_status);
+ case ZigTypeIdAnyFrame:
+ return resolve_llvm_types_any_frame(g, type, wanted_resolve_status);
}
zig_unreachable();
}
LLVMTypeRef get_llvm_type(CodeGen *g, ZigType *type) {
assertNoError(type_resolve(g, type, ResolveStatusLLVMFull));
- assert(type->abi_size == 0 || type->abi_size == LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
- assert(type->abi_align == 0 || type->abi_align == LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_size == 0 || type->abi_size >= LLVMABISizeOfType(g->target_data_ref, type->llvm_type));
+ assert(type->abi_align == 0 || type->abi_align >= LLVMABIAlignmentOfType(g->target_data_ref, type->llvm_type));
return type->llvm_type;
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index b9e9f2df7d..5752c74751 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -11,11 +11,12 @@
#include "all_types.hpp"
void semantic_analyze(CodeGen *g);
-ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg);
+ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg);
ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg);
-ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *msg);
+ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg);
void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg);
ZigType *new_type_table_entry(ZigTypeId id);
+ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn);
ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const);
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
bool is_volatile, PtrLen ptr_len,
@@ -37,11 +38,8 @@ ZigType *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x);
ZigType *get_error_union_type(CodeGen *g, ZigType *err_set_type, ZigType *payload_type);
ZigType *get_bound_fn_type(CodeGen *g, ZigFn *fn_entry);
ZigType *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *full_name, Buf *bare_name);
-ZigType *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[],
- ZigType *field_types[], size_t field_count);
-ZigType *get_promise_type(CodeGen *g, ZigType *result_type);
-ZigType *get_promise_frame_type(CodeGen *g, ZigType *return_type);
ZigType *get_test_fn_type(CodeGen *g);
+ZigType *get_any_frame_type(CodeGen *g, ZigType *result_type);
bool handle_is_ptr(ZigType *type_entry);
bool type_has_bits(ZigType *type_entry);
@@ -106,7 +104,6 @@ void eval_min_max_value(CodeGen *g, ZigType *type_entry, ConstExprValue *const_v
void eval_min_max_value_int(CodeGen *g, ZigType *int_type, BigInt *bigint, bool is_max);
void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val);
-void analyze_fn_ir(CodeGen *g, ZigFn *fn_table_entry, AstNode *return_type_node);
ScopeBlock *create_block_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeDefer *create_defer_scope(CodeGen *g, AstNode *node, Scope *parent);
@@ -117,7 +114,6 @@ ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry);
Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent);
-Scope *create_coro_prelude_scope(CodeGen *g, AstNode *node, Scope *parent);
Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstruction *is_comptime);
void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str);
@@ -199,12 +195,11 @@ void add_var_export(CodeGen *g, ZigVar *fn_table_entry, Buf *symbol_name, Global
ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
-ZigType *get_ptr_to_stack_trace_type(CodeGen *g);
+ZigType *get_stack_trace_type(CodeGen *g);
bool resolve_inferred_error_set(CodeGen *g, ZigType *err_set_type, AstNode *source_node);
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry);
-uint32_t get_coro_frame_align_bytes(CodeGen *g);
bool fn_type_can_fail(FnTypeId *fn_type_id);
bool type_can_fail(ZigType *type_entry);
bool fn_eval_cacheable(Scope *scope, ZigType *return_type);
@@ -251,4 +246,7 @@ void src_assert(bool ok, AstNode *source_node);
bool is_container(ZigType *type_entry);
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name);
+void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
+bool fn_is_async(ZigFn *fn);
+
#endif
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index af134d29b5..334dc37b59 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -249,18 +249,16 @@ static const char *node_type_str(NodeType node_type) {
return "IfOptional";
case NodeTypeErrorSetDecl:
return "ErrorSetDecl";
- case NodeTypeCancel:
- return "Cancel";
case NodeTypeResume:
return "Resume";
case NodeTypeAwaitExpr:
return "AwaitExpr";
case NodeTypeSuspend:
return "Suspend";
- case NodeTypePromiseType:
- return "PromiseType";
case NodeTypePointerType:
return "PointerType";
+ case NodeTypeAnyFrameType:
+ return "AnyFrameType";
case NodeTypeEnumLiteral:
return "EnumLiteral";
}
@@ -699,13 +697,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "@");
}
if (node->data.fn_call_expr.is_async) {
- fprintf(ar->f, "async");
- if (node->data.fn_call_expr.async_allocator != nullptr) {
- fprintf(ar->f, "<");
- render_node_extra(ar, node->data.fn_call_expr.async_allocator, true);
- fprintf(ar->f, ">");
- }
- fprintf(ar->f, " ");
+ fprintf(ar->f, "async ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
@@ -862,15 +854,14 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_ungrouped(ar, node->data.inferred_array_type.child_type);
break;
}
- case NodeTypePromiseType:
- {
- fprintf(ar->f, "promise");
- if (node->data.promise_type.payload_type != nullptr) {
- fprintf(ar->f, "->");
- render_node_grouped(ar, node->data.promise_type.payload_type);
- }
- break;
+ case NodeTypeAnyFrameType: {
+ fprintf(ar->f, "anyframe");
+ if (node->data.anyframe_type.payload_type != nullptr) {
+ fprintf(ar->f, "->");
+ render_node_grouped(ar, node->data.anyframe_type.payload_type);
}
+ break;
+ }
case NodeTypeErrorType:
fprintf(ar->f, "anyerror");
break;
@@ -1143,12 +1134,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "}");
break;
}
- case NodeTypeCancel:
- {
- fprintf(ar->f, "cancel ");
- render_node_grouped(ar, node->data.cancel_expr.expr);
- break;
- }
case NodeTypeResume:
{
fprintf(ar->f, "resume ");
@@ -1163,9 +1148,11 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
}
case NodeTypeSuspend:
{
- fprintf(ar->f, "suspend");
if (node->data.suspend.block != nullptr) {
+ fprintf(ar->f, "suspend ");
render_node_grouped(ar, node->data.suspend.block);
+ } else {
+ fprintf(ar->f, "suspend\n");
}
break;
}
@@ -1191,3 +1178,9 @@ void ast_render(FILE *f, AstNode *node, int indent_size) {
render_node_grouped(&ar, node);
}
+
+void AstNode::src() {
+ fprintf(stderr, "%s:%" ZIG_PRI_usize ":%" ZIG_PRI_usize "\n",
+ buf_ptr(this->owner->data.structure.root_struct->path),
+ this->line + 1, this->column + 1);
+}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 32e6d2fbee..45e2e4122f 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -24,6 +24,12 @@
#include <stdio.h>
#include <errno.h>
+enum ResumeId {
+ ResumeIdManual,
+ ResumeIdReturn,
+ ResumeIdCall,
+};
+
static void init_darwin_native(CodeGen *g) {
char *osx_target = getenv("MACOSX_DEPLOYMENT_TARGET");
char *ios_target = getenv("IPHONEOS_DEPLOYMENT_TARGET");
@@ -297,12 +303,42 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
zig_unreachable();
}
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
+ // [0] *ReturnType (callee's)
+ // [1] *ReturnType (awaiter's)
+ // [2] ReturnType
+ uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0;
+ return frame_ret_start + return_field_count;
+}
+
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
+ bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
+ // [0] *StackTrace (callee's)
+ // [1] *StackTrace (awaiter's)
+ uint32_t trace_field_count = have_stack_trace ? 2 : 0;
+ return frame_index_trace_arg(g, return_type) + trace_field_count;
+}
+
+// label (grep this): [fn_frame_struct_layout]
+static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) {
+ uint32_t result = frame_index_arg(g, fn_type_id->return_type);
+ for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
+ if (type_has_bits(fn_type_id->param_info->type)) {
+ result += 1;
+ }
+ }
+ return result;
+}
+
+
static uint32_t get_err_ret_trace_arg_index(CodeGen *g, ZigFn *fn_table_entry) {
if (!g->have_err_ret_tracing) {
return UINT32_MAX;
}
- if (fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return 0;
+ if (fn_is_async(fn_table_entry)) {
+ return UINT32_MAX;
}
ZigType *fn_type = fn_table_entry->type_entry;
if (!fn_type_can_fail(&fn_type->data.fn.fn_type_id)) {
@@ -343,27 +379,28 @@ static bool cc_want_sret_attr(CallingConvention cc) {
zig_unreachable();
}
-static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
- if (fn_table_entry->llvm_value)
- return fn_table_entry->llvm_value;
+static bool codegen_have_frame_pointer(CodeGen *g) {
+ return g->build_mode == BuildModeDebug;
+}
- Buf *unmangled_name = &fn_table_entry->symbol_name;
+static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ Buf *unmangled_name = &fn->symbol_name;
Buf *symbol_name;
GlobalLinkageId linkage;
- if (fn_table_entry->body_node == nullptr) {
+ if (fn->body_node == nullptr) {
symbol_name = unmangled_name;
linkage = GlobalLinkageIdStrong;
- } else if (fn_table_entry->export_list.length == 0) {
+ } else if (fn->export_list.length == 0) {
symbol_name = get_mangled_name(g, unmangled_name, false);
linkage = GlobalLinkageIdInternal;
} else {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[0];
+ GlobalExport *fn_export = &fn->export_list.items[0];
symbol_name = &fn_export->name;
linkage = fn_export->linkage;
}
bool external_linkage = linkage != GlobalLinkageIdInternal;
- CallingConvention cc = fn_table_entry->type_entry->data.fn.fn_type_id.cc;
+ CallingConvention cc = fn->type_entry->data.fn.fn_type_id.cc;
if (cc == CallingConventionStdcall && external_linkage &&
g->zig_target->arch == ZigLLVM_x86)
{
@@ -371,130 +408,125 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
symbol_name = buf_sprintf("\x01_%s", buf_ptr(symbol_name));
}
+ bool is_async = fn_is_async(fn);
- ZigType *fn_type = fn_table_entry->type_entry;
+
+ ZigType *fn_type = fn->type_entry;
// Make the raw_type_ref populated
- (void)get_llvm_type(g, fn_type);
- LLVMTypeRef fn_llvm_type = fn_type->data.fn.raw_type_ref;
- if (fn_table_entry->body_node == nullptr) {
+ resolve_llvm_types_fn(g, fn);
+ LLVMTypeRef fn_llvm_type = fn->raw_type_ref;
+ LLVMValueRef llvm_fn = nullptr;
+ if (fn->body_node == nullptr) {
LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, buf_ptr(symbol_name));
if (existing_llvm_fn) {
- fn_table_entry->llvm_value = LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ return LLVMConstBitCast(existing_llvm_fn, LLVMPointerType(fn_llvm_type, 0));
} else {
auto entry = g->exported_symbol_names.maybe_get(symbol_name);
if (entry == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
if (target_is_wasm(g->zig_target)) {
- assert(fn_table_entry->proto_node->type == NodeTypeFnProto);
- AstNodeFnProto *fn_proto = &fn_table_entry->proto_node->data.fn_proto;
+ assert(fn->proto_node->type == NodeTypeFnProto);
+ AstNodeFnProto *fn_proto = &fn->proto_node->data.fn_proto;
if (fn_proto-> is_extern && fn_proto->lib_name != nullptr ) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "wasm-import-module", buf_ptr(fn_proto->lib_name));
+ addLLVMFnAttrStr(llvm_fn, "wasm-import-module", buf_ptr(fn_proto->lib_name));
}
}
} else {
assert(entry->value->id == TldIdFn);
TldFn *tld_fn = reinterpret_cast<TldFn *>(entry->value);
// Make the raw_type_ref populated
- (void)get_llvm_type(g, tld_fn->fn_entry->type_entry);
+ resolve_llvm_types_fn(g, tld_fn->fn_entry);
tld_fn->fn_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name),
- tld_fn->fn_entry->type_entry->data.fn.raw_type_ref);
- fn_table_entry->llvm_value = LLVMConstBitCast(tld_fn->fn_entry->llvm_value,
- LLVMPointerType(fn_llvm_type, 0));
- return fn_table_entry->llvm_value;
+ tld_fn->fn_entry->raw_type_ref);
+ llvm_fn = LLVMConstBitCast(tld_fn->fn_entry->llvm_value, LLVMPointerType(fn_llvm_type, 0));
+ return llvm_fn;
}
}
} else {
- if (fn_table_entry->llvm_value == nullptr) {
- fn_table_entry->llvm_value = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
+ if (llvm_fn == nullptr) {
+ llvm_fn = LLVMAddFunction(g->module, buf_ptr(symbol_name), fn_llvm_type);
}
- for (size_t i = 1; i < fn_table_entry->export_list.length; i += 1) {
- GlobalExport *fn_export = &fn_table_entry->export_list.items[i];
- LLVMAddAlias(g->module, LLVMTypeOf(fn_table_entry->llvm_value),
- fn_table_entry->llvm_value, buf_ptr(&fn_export->name));
+ for (size_t i = 1; i < fn->export_list.length; i += 1) {
+ GlobalExport *fn_export = &fn->export_list.items[i];
+ LLVMAddAlias(g->module, LLVMTypeOf(llvm_fn), llvm_fn, buf_ptr(&fn_export->name));
}
}
- fn_table_entry->llvm_name = strdup(LLVMGetValueName(fn_table_entry->llvm_value));
- switch (fn_table_entry->fn_inline) {
+ switch (fn->fn_inline) {
case FnInlineAlways:
- addLLVMFnAttr(fn_table_entry->llvm_value, "alwaysinline");
- g->inline_fns.append(fn_table_entry);
+ addLLVMFnAttr(llvm_fn, "alwaysinline");
+ g->inline_fns.append(fn);
break;
case FnInlineNever:
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ addLLVMFnAttr(llvm_fn, "noinline");
break;
case FnInlineAuto:
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttr(llvm_fn, "noinline");
}
break;
}
if (cc == CallingConventionNaked) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "naked");
+ addLLVMFnAttr(llvm_fn, "naked");
} else {
- LLVMSetFunctionCallConv(fn_table_entry->llvm_value, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
- }
- if (cc == CallingConventionAsync) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "optnone");
- addLLVMFnAttr(fn_table_entry->llvm_value, "noinline");
+ LLVMSetFunctionCallConv(llvm_fn, get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc));
}
- bool want_cold = fn_table_entry->is_cold || cc == CallingConventionCold;
+ bool want_cold = fn->is_cold || cc == CallingConventionCold;
if (want_cold) {
- ZigLLVMAddFunctionAttrCold(fn_table_entry->llvm_value);
+ ZigLLVMAddFunctionAttrCold(llvm_fn);
}
- LLVMSetLinkage(fn_table_entry->llvm_value, to_llvm_linkage(linkage));
+ LLVMSetLinkage(llvm_fn, to_llvm_linkage(linkage));
if (linkage == GlobalLinkageIdInternal) {
- LLVMSetUnnamedAddr(fn_table_entry->llvm_value, true);
+ LLVMSetUnnamedAddr(llvm_fn, true);
}
ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
if (return_type->id == ZigTypeIdUnreachable) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "noreturn");
+ addLLVMFnAttr(llvm_fn, "noreturn");
}
- if (fn_table_entry->body_node != nullptr) {
- maybe_export_dll(g, fn_table_entry->llvm_value, linkage);
+ if (fn->body_node != nullptr) {
+ maybe_export_dll(g, llvm_fn, linkage);
bool want_fn_safety = g->build_mode != BuildModeFastRelease &&
g->build_mode != BuildModeSmallRelease &&
- !fn_table_entry->def_scope->safety_off;
+ !fn->def_scope->safety_off;
if (want_fn_safety) {
if (g->libc_link_lib != nullptr) {
- addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong");
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "stack-protector-buffer-size", "4");
+ addLLVMFnAttr(llvm_fn, "sspstrong");
+ addLLVMFnAttrStr(llvm_fn, "stack-protector-buffer-size", "4");
}
}
- if (g->have_stack_probing && !fn_table_entry->def_scope->safety_off) {
- addLLVMFnAttrStr(fn_table_entry->llvm_value, "probe-stack", "__zig_probe_stack");
+ if (g->have_stack_probing && !fn->def_scope->safety_off) {
+ addLLVMFnAttrStr(llvm_fn, "probe-stack", "__zig_probe_stack");
}
} else {
- maybe_import_dll(g, fn_table_entry->llvm_value, linkage);
+ maybe_import_dll(g, llvm_fn, linkage);
}
- if (fn_table_entry->alignstack_value != 0) {
- addLLVMFnAttrInt(fn_table_entry->llvm_value, "alignstack", fn_table_entry->alignstack_value);
+ if (fn->alignstack_value != 0) {
+ addLLVMFnAttrInt(llvm_fn, "alignstack", fn->alignstack_value);
}
- addLLVMFnAttr(fn_table_entry->llvm_value, "nounwind");
- add_uwtable_attr(g, fn_table_entry->llvm_value);
- addLLVMFnAttr(fn_table_entry->llvm_value, "nobuiltin");
- if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
+ addLLVMFnAttr(llvm_fn, "nounwind");
+ add_uwtable_attr(g, llvm_fn);
+ addLLVMFnAttr(llvm_fn, "nobuiltin");
+ if (codegen_have_frame_pointer(g) && fn->fn_inline != FnInlineAlways) {
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(llvm_fn, "no-frame-pointer-elim-non-leaf", nullptr);
}
- if (fn_table_entry->section_name) {
- LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
+ if (fn->section_name) {
+ LLVMSetSection(llvm_fn, buf_ptr(fn->section_name));
}
- if (fn_table_entry->align_bytes > 0) {
- LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
+ if (fn->align_bytes > 0) {
+ LLVMSetAlignment(llvm_fn, (unsigned)fn->align_bytes);
} else {
// We'd like to set the best alignment for the function here, but on Darwin LLVM gives
// "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
@@ -502,36 +534,50 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn_table_entry) {
// use the ABI alignment, which is fine.
}
- unsigned init_gen_i = 0;
- if (!type_has_bits(return_type)) {
- // nothing to do
- } else if (type_is_nonnull_ptr(return_type)) {
- addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
- } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
- // Sret pointers must not be address 0
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
- if (cc_want_sret_attr(cc)) {
- addLLVMArgAttr(fn_table_entry->llvm_value, 0, "noalias");
+ if (is_async) {
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ } else {
+ unsigned init_gen_i = 0;
+ if (!type_has_bits(return_type)) {
+ // nothing to do
+ } else if (type_is_nonnull_ptr(return_type)) {
+ addLLVMAttr(llvm_fn, 0, "nonnull");
+ } else if (want_first_arg_sret(g, &fn_type->data.fn.fn_type_id)) {
+ // Sret pointers must not be address 0
+ addLLVMArgAttr(llvm_fn, 0, "nonnull");
+ addLLVMArgAttr(llvm_fn, 0, "sret");
+ if (cc_want_sret_attr(cc)) {
+ addLLVMArgAttr(llvm_fn, 0, "noalias");
+ }
+ init_gen_i = 1;
}
- init_gen_i = 1;
- }
- // set parameter attributes
- FnWalk fn_walk = {};
- fn_walk.id = FnWalkIdAttrs;
- fn_walk.data.attrs.fn = fn_table_entry;
- fn_walk.data.attrs.gen_i = init_gen_i;
- walk_function_params(g, fn_type, &fn_walk);
+ // set parameter attributes
+ FnWalk fn_walk = {};
+ fn_walk.id = FnWalkIdAttrs;
+ fn_walk.data.attrs.fn = fn;
+ fn_walk.data.attrs.llvm_fn = llvm_fn;
+ fn_walk.data.attrs.gen_i = init_gen_i;
+ walk_function_params(g, fn_type, &fn_walk);
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
- if (err_ret_trace_arg_index != UINT32_MAX) {
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
+ uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn);
+ if (err_ret_trace_arg_index != UINT32_MAX) {
+ // Error return trace memory is in the stack, which is impossible to be at address 0
+ // on any architecture.
+ addLLVMArgAttr(llvm_fn, (unsigned)err_ret_trace_arg_index, "nonnull");
+ }
}
- return fn_table_entry->llvm_value;
+ return llvm_fn;
+}
+
+static LLVMValueRef fn_llvm_value(CodeGen *g, ZigFn *fn) {
+ if (fn->llvm_value)
+ return fn->llvm_value;
+
+ fn->llvm_value = make_fn_llvm_value(g, fn);
+ fn->llvm_name = strdup(LLVMGetValueName(fn->llvm_value));
+ return fn->llvm_value;
}
static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
@@ -559,10 +605,11 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
unsigned flags = ZigLLVM_DIFlags_StaticMember;
ZigLLVMDIScope *fn_di_scope = get_di_scope(g, scope->parent);
assert(fn_di_scope != nullptr);
+ assert(fn_table_entry->raw_di_type != nullptr);
ZigLLVMDISubprogram *subprogram = ZigLLVMCreateFunction(g->dbuilder,
fn_di_scope, buf_ptr(&fn_table_entry->symbol_name), "",
import->data.structure.root_struct->di_file, line_number,
- fn_table_entry->type_entry->data.fn.raw_di_type, is_internal_linkage,
+ fn_table_entry->raw_di_type, is_internal_linkage,
is_definition, scope_line, flags, is_optimized, nullptr);
scope->di_scope = ZigLLVMSubprogramToScope(subprogram);
@@ -597,7 +644,6 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
case ScopeIdLoop:
case ScopeIdSuspend:
case ScopeIdCompTime:
- case ScopeIdCoroPrelude:
case ScopeIdRuntime:
return get_di_scope(g, scope->parent);
}
@@ -798,9 +844,8 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
return false;
}
-static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+static bool ir_want_runtime_safety_scope(CodeGen *g, Scope *scope) {
// TODO memoize
- Scope *scope = instruction->scope;
while (scope) {
if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope;
@@ -818,6 +863,10 @@ static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
g->build_mode != BuildModeSmallRelease);
}
+static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
+ return ir_want_runtime_safety_scope(g, instruction->scope);
+}
+
static Buf *panic_msg_buf(PanicMsgId msg_id) {
switch (msg_id) {
case PanicMsgIdCount:
@@ -858,6 +907,18 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("integer part of floating point value out of bounds");
case PanicMsgIdPtrCastNull:
return buf_create_from_str("cast causes pointer to be null");
+ case PanicMsgIdBadResume:
+ return buf_create_from_str("resumed an async function which already returned");
+ case PanicMsgIdBadAwait:
+ return buf_create_from_str("async function awaited twice");
+ case PanicMsgIdBadReturn:
+ return buf_create_from_str("async function returned twice");
+ case PanicMsgIdResumedAnAwaitingFn:
+ return buf_create_from_str("awaiting function resumed");
+ case PanicMsgIdFrameTooSmall:
+ return buf_create_from_str("frame too small");
+ case PanicMsgIdResumedFnPendingAwait:
+ return buf_create_from_str("resumed an async function which can only be awaited");
}
zig_unreachable();
}
@@ -882,13 +943,16 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(get_llvm_type(g, str_type), 0));
}
+static ZigType *ptr_to_stack_trace_type(CodeGen *g) {
+ return get_pointer_to_type(g, get_stack_trace_type(g), false);
+}
+
static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) {
assert(g->panic_fn != nullptr);
LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn);
LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc);
if (stack_trace_arg == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ stack_trace_arg = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
msg_arg,
@@ -904,14 +968,18 @@ static void gen_safety_crash(CodeGen *g, PanicMsgId msg_id) {
gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr);
}
-static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
- if (ir_want_runtime_safety(g, source_instruction)) {
+static void gen_assertion_scope(CodeGen *g, PanicMsgId msg_id, Scope *source_scope) {
+ if (ir_want_runtime_safety_scope(g, source_scope)) {
gen_safety_crash(g, msg_id);
} else {
LLVMBuildUnreachable(g->builder);
}
}
+static void gen_assertion(CodeGen *g, PanicMsgId msg_id, IrInstruction *source_instruction) {
+ return gen_assertion_scope(g, msg_id, source_instruction->scope);
+}
+
static LLVMValueRef get_stacksave_fn_val(CodeGen *g) {
if (g->stacksave_fn_val)
return g->stacksave_fn_val;
@@ -959,177 +1027,6 @@ static LLVMValueRef get_write_register_fn_val(CodeGen *g) {
return g->write_register_fn_val;
}
-static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) {
- if (g->coro_destroy_fn_val)
- return g->coro_destroy_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.destroy");
- g->coro_destroy_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_destroy_fn_val));
-
- return g->coro_destroy_fn_val;
-}
-
-static LLVMValueRef get_coro_id_fn_val(CodeGen *g) {
- if (g->coro_id_fn_val)
- return g->coro_id_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMInt32Type(),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 4, false);
- Buf *name = buf_sprintf("llvm.coro.id");
- g->coro_id_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_id_fn_val));
-
- return g->coro_id_fn_val;
-}
-
-static LLVMValueRef get_coro_alloc_fn_val(CodeGen *g) {
- if (g->coro_alloc_fn_val)
- return g->coro_alloc_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.alloc");
- g->coro_alloc_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_alloc_fn_val));
-
- return g->coro_alloc_fn_val;
-}
-
-static LLVMValueRef get_coro_size_fn_val(CodeGen *g) {
- if (g->coro_size_fn_val)
- return g->coro_size_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType(g->builtin_types.entry_usize->llvm_type, nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.size.i%d", g->pointer_size_bytes * 8);
- g->coro_size_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_size_fn_val));
-
- return g->coro_size_fn_val;
-}
-
-static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) {
- if (g->coro_begin_fn_val)
- return g->coro_begin_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.begin");
- g->coro_begin_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_begin_fn_val));
-
- return g->coro_begin_fn_val;
-}
-
-static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) {
- if (g->coro_suspend_fn_val)
- return g->coro_suspend_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt8Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.suspend");
- g->coro_suspend_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_suspend_fn_val));
-
- return g->coro_suspend_fn_val;
-}
-
-static LLVMValueRef get_coro_end_fn_val(CodeGen *g) {
- if (g->coro_end_fn_val)
- return g->coro_end_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.end");
- g->coro_end_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_end_fn_val));
-
- return g->coro_end_fn_val;
-}
-
-static LLVMValueRef get_coro_free_fn_val(CodeGen *g) {
- if (g->coro_free_fn_val)
- return g->coro_free_fn_val;
-
- LLVMTypeRef param_types[] = {
- ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()),
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false);
- Buf *name = buf_sprintf("llvm.coro.free");
- g->coro_free_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_free_fn_val));
-
- return g->coro_free_fn_val;
-}
-
-static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) {
- if (g->coro_resume_fn_val)
- return g->coro_resume_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.resume");
- g->coro_resume_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_resume_fn_val));
-
- return g->coro_resume_fn_val;
-}
-
-static LLVMValueRef get_coro_save_fn_val(CodeGen *g) {
- if (g->coro_save_fn_val)
- return g->coro_save_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 1, false);
- Buf *name = buf_sprintf("llvm.coro.save");
- g->coro_save_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_save_fn_val));
-
- return g->coro_save_fn_val;
-}
-
-static LLVMValueRef get_coro_promise_fn_val(CodeGen *g) {
- if (g->coro_promise_fn_val)
- return g->coro_promise_fn_val;
-
- LLVMTypeRef param_types[] = {
- LLVMPointerType(LLVMInt8Type(), 0),
- LLVMInt32Type(),
- LLVMInt1Type(),
- };
- LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 3, false);
- Buf *name = buf_sprintf("llvm.coro.promise");
- g->coro_promise_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_promise_fn_val));
-
- return g->coro_promise_fn_val;
-}
-
static LLVMValueRef get_return_address_fn_val(CodeGen *g) {
if (g->return_address_fn_val)
return g->return_address_fn_val;
@@ -1149,7 +1046,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return g->add_error_return_trace_addr_fn_val;
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
g->builtin_types.entry_usize->llvm_type,
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1164,7 +1061,7 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
// Error return trace memory is in the stack, which is impossible to be at address 0
// on any architecture.
addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1222,140 +1119,6 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) {
return fn_val;
}
-static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
- if (g->merge_err_ret_traces_fn_val)
- return g->merge_err_ret_traces_fn_val;
-
- assert(g->stack_trace_type != nullptr);
-
- LLVMTypeRef param_types[] = {
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
- };
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- add_uwtable_attr(g, fn_val);
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
- addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
- if (g->build_mode == BuildModeDebug) {
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
- ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
- }
-
- // this is above the ZigLLVMClearCurrentDebugLocation
- LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g);
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
-
- // var frame_index: usize = undefined;
- // var frames_left: usize = undefined;
- // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
- // frame_index = 0;
- // frames_left = src_stack_trace.index;
- // if (frames_left == 0) return;
- // } else {
- // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len;
- // frames_left = src_stack_trace.instruction_addresses.len;
- // }
- // while (true) {
- // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]);
- // frames_left -= 1;
- // if (frames_left == 0) return;
- // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
- // }
- LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
-
- LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
- LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
-
- LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
- LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
-
- size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
- size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
- LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_index_field_index, "");
- LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
- (unsigned)src_addresses_field_index, "");
- ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry;
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, "");
- size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index;
- LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, "");
- LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, "");
- LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, "");
- LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, "");
- LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, "");
- LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap");
- LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap");
- LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop");
- LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block);
-
- LLVMPositionBuilderAtEnd(g->builder, no_wrap_block);
- LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
- LLVMBuildStore(g->builder, usize_zero, frame_index_ptr);
- LLVMBuildStore(g->builder, src_index_val, frames_left_ptr);
- LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, "");
- LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block);
- LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
- LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, "");
- LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, mod_len, frame_index_ptr);
- LLVMBuildStore(g->builder, src_len_val, frames_left_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, loop_block);
- LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
- LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
- LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
- ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
- LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
- LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
- LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
- LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue");
- LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block);
-
- LLVMPositionBuilderAtEnd(g->builder, return_block);
- LLVMBuildRetVoid(g->builder);
-
- LLVMPositionBuilderAtEnd(g->builder, continue_block);
- LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr);
- LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
- LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, "");
- LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, "");
- LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr);
- LLVMBuildBr(g->builder, loop_block);
-
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->merge_err_ret_traces_fn_val = fn_val;
- return fn_val;
-
-}
-
static LLVMValueRef get_return_err_fn(CodeGen *g) {
if (g->return_err_fn != nullptr)
return g->return_err_fn;
@@ -1364,7 +1127,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMTypeRef arg_types[] = {
// error return trace pointer
- get_llvm_type(g, get_ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false);
@@ -1376,10 +1139,7 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- // Error return trace memory is in the stack, which is impossible to be at address 0
- // on any architecture.
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1400,6 +1160,17 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMValueRef return_address_ptr = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, "");
LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, return_address_ptr, usize_type_ref, "");
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
+ LLVMBasicBlockRef dest_non_null_block = LLVMAppendBasicBlock(fn_val, "DestNonNull");
+
+ LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_ret_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(err_ret_trace_ptr)), "");
+ LLVMBuildCondBr(g->builder, null_dest_bit, return_block, dest_non_null_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, dest_non_null_block);
LLVMValueRef args[] = { err_ret_trace_ptr, return_address };
ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
LLVMBuildRetVoid(g->builder);
@@ -1434,7 +1205,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMTypeRef fn_type_ref;
if (g->have_err_ret_tracing) {
LLVMTypeRef arg_types[] = {
- get_llvm_type(g, g->ptr_to_stack_trace_type),
+ get_llvm_type(g, get_pointer_to_type(g, get_stack_trace_type(g), false)),
get_llvm_type(g, g->err_tag_type),
};
fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
@@ -1451,7 +1222,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -1543,25 +1314,10 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
return fn_val;
}
-static bool is_coro_prelude_scope(Scope *scope) {
- while (scope != nullptr) {
- if (scope->id == ScopeIdCoroPrelude) {
- return true;
- } else if (scope->id == ScopeIdFnDef) {
- break;
- }
- scope = scope->parent;
- }
- return false;
-}
-
static LLVMValueRef get_cur_err_ret_trace_val(CodeGen *g, Scope *scope) {
if (!g->have_err_ret_tracing) {
return nullptr;
}
- if (g->cur_fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync) {
- return is_coro_prelude_scope(scope) ? g->cur_err_ret_trace_val_arg : g->cur_err_ret_trace_val_stack;
- }
if (g->cur_err_ret_trace_val_stack != nullptr) {
return g->cur_err_ret_trace_val_stack;
}
@@ -1574,8 +1330,7 @@ static void gen_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val, Scope *sc
if (g->have_err_ret_tracing) {
LLVMValueRef err_ret_trace_val = get_cur_err_ret_trace_val(g, scope);
if (err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ err_ret_trace_val = LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
LLVMValueRef args[] = {
err_ret_trace_val,
@@ -1820,14 +1575,14 @@ static LLVMRealPredicate cmp_op_to_real_predicate(IrBinOp cmp_op) {
}
}
-static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type,
+static void gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_type,
LLVMValueRef value)
{
assert(ptr_type->id == ZigTypeIdPointer);
ZigType *child_type = ptr_type->data.pointer.child_type;
if (!type_has_bits(child_type))
- return nullptr;
+ return;
if (handle_is_ptr(child_type)) {
assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMPointerTypeKind);
@@ -1847,13 +1602,13 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty
ZigLLVMBuildMemCpy(g->builder, dest_ptr, align_bytes, src_ptr, align_bytes,
LLVMConstInt(usize->llvm_type, size_bytes, false),
ptr_type->data.pointer.is_volatile);
- return nullptr;
+ return;
}
uint32_t host_int_bytes = ptr_type->data.pointer.host_int_bytes;
if (host_int_bytes == 0) {
gen_store(g, value, ptr, ptr_type);
- return nullptr;
+ return;
}
bool big_endian = g->is_big_endian;
@@ -1883,7 +1638,7 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty
LLVMValueRef ored_value = LLVMBuildOr(g->builder, shifted_value, anded_containing_int, "");
gen_store(g, ored_value, ptr, ptr_type);
- return nullptr;
+ return;
}
static void gen_var_debug_decl(CodeGen *g, ZigVar *var) {
@@ -1967,7 +1722,7 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
param_info = &fn_type->data.fn.fn_type_id.param_info[src_i];
ty = param_info->type;
source_node = fn_walk->data.attrs.fn->proto_node;
- llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ llvm_fn = fn_walk->data.attrs.llvm_fn;
break;
case FnWalkIdCall: {
if (src_i >= fn_walk->data.call.inst->arg_count)
@@ -2149,10 +1904,12 @@ static bool iter_function_params_c_abi(CodeGen *g, ZigType *fn_type, FnWalk *fn_
}
case FnWalkIdInits: {
clear_debug_source_node(g);
- LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i);
- LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0);
- LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, "");
- gen_store_untyped(g, arg, bitcasted, var->align_bytes, false);
+ if (!fn_is_async(fn_walk->data.inits.fn)) {
+ LLVMValueRef arg = LLVMGetParam(llvm_fn, fn_walk->data.inits.gen_i);
+ LLVMTypeRef ptr_to_int_type_ref = LLVMPointerType(LLVMIntType((unsigned)ty_size * 8), 0);
+ LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, var->value_ref, ptr_to_int_type_ref, "");
+ gen_store_untyped(g, arg, bitcasted, var->align_bytes, false);
+ }
if (var->decl_node) {
gen_var_debug_decl(g, var);
}
@@ -2201,6 +1958,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
LLVMValueRef param_value = ir_llvm_value(g, param_instruction);
assert(param_value);
fn_walk->data.call.gen_param_values->append(param_value);
+ fn_walk->data.call.gen_param_types->append(param_type);
}
}
return;
@@ -2216,7 +1974,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
switch (fn_walk->id) {
case FnWalkIdAttrs: {
- LLVMValueRef llvm_fn = fn_walk->data.attrs.fn->llvm_value;
+ LLVMValueRef llvm_fn = fn_walk->data.attrs.llvm_fn;
bool is_byval = gen_info->is_byval;
FnTypeParamInfo *param_info = &fn_type->data.fn.fn_type_id.param_info[param_i];
@@ -2245,7 +2003,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
assert(variable);
assert(variable->value_ref);
- if (!handle_is_ptr(variable->var_type)) {
+ if (!handle_is_ptr(variable->var_type) && !fn_is_async(fn_walk->data.inits.fn)) {
clear_debug_source_node(g);
ZigType *fn_type = fn_table_entry->type_entry;
unsigned gen_arg_index = fn_type->data.fn.gen_param_info[variable->src_arg_index].gen_index;
@@ -2271,48 +2029,357 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) {
}
}
+static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) {
+ if (g->merge_err_ret_traces_fn_val)
+ return g->merge_err_ret_traces_fn_val;
+
+ assert(g->stack_trace_type != nullptr);
+
+ LLVMTypeRef param_types[] = {
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
+ get_llvm_type(g, ptr_to_stack_trace_type(g)),
+ };
+ LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
+
+ Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_merge_error_return_traces"), false);
+ LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
+ LLVMSetLinkage(fn_val, LLVMInternalLinkage);
+ LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
+ addLLVMFnAttr(fn_val, "nounwind");
+ add_uwtable_attr(g, fn_val);
+ addLLVMArgAttr(fn_val, (unsigned)0, "noalias");
+ addLLVMArgAttr(fn_val, (unsigned)0, "writeonly");
+
+ addLLVMArgAttr(fn_val, (unsigned)1, "noalias");
+ addLLVMArgAttr(fn_val, (unsigned)1, "readonly");
+ if (g->build_mode == BuildModeDebug) {
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
+ }
+
+ // this is above the ZigLLVMClearCurrentDebugLocation
+ LLVMValueRef add_error_return_trace_addr_fn_val = get_add_error_return_trace_addr_fn(g);
+
+ LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
+ LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
+ LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
+ LLVMPositionBuilderAtEnd(g->builder, entry_block);
+ ZigLLVMClearCurrentDebugLocation(g->builder);
+
+ // if (dest_stack_trace == null or src_stack_trace == null) return;
+ // var frame_index: usize = undefined;
+ // var frames_left: usize = undefined;
+ // if (src_stack_trace.index < src_stack_trace.instruction_addresses.len) {
+ // frame_index = 0;
+ // frames_left = src_stack_trace.index;
+ // if (frames_left == 0) return;
+ // } else {
+ // frame_index = (src_stack_trace.index + 1) % src_stack_trace.instruction_addresses.len;
+ // frames_left = src_stack_trace.instruction_addresses.len;
+ // }
+ // while (true) {
+ // __zig_add_err_ret_trace_addr(dest_stack_trace, src_stack_trace.instruction_addresses[frame_index]);
+ // frames_left -= 1;
+ // if (frames_left == 0) return;
+ // frame_index = (frame_index + 1) % src_stack_trace.instruction_addresses.len;
+ // }
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(fn_val, "Return");
+ LLVMBasicBlockRef non_null_block = LLVMAppendBasicBlock(fn_val, "NonNull");
+
+ LLVMValueRef frame_index_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frame_index");
+ LLVMValueRef frames_left_ptr = LLVMBuildAlloca(g->builder, g->builtin_types.entry_usize->llvm_type, "frames_left");
+
+ LLVMValueRef dest_stack_trace_ptr = LLVMGetParam(fn_val, 0);
+ LLVMValueRef src_stack_trace_ptr = LLVMGetParam(fn_val, 1);
+
+ LLVMValueRef null_dest_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, dest_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(dest_stack_trace_ptr)), "");
+ LLVMValueRef null_src_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_stack_trace_ptr,
+ LLVMConstNull(LLVMTypeOf(src_stack_trace_ptr)), "");
+ LLVMValueRef null_bit = LLVMBuildOr(g->builder, null_dest_bit, null_src_bit, "");
+ LLVMBuildCondBr(g->builder, null_bit, return_block, non_null_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, non_null_block);
+ size_t src_index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index;
+ size_t src_addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index;
+ LLVMValueRef src_index_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
+ (unsigned)src_index_field_index, "");
+ LLVMValueRef src_addresses_field_ptr = LLVMBuildStructGEP(g->builder, src_stack_trace_ptr,
+ (unsigned)src_addresses_field_index, "");
+ ZigType *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry;
+ size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
+ LLVMValueRef src_ptr_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)ptr_field_index, "");
+ size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index;
+ LLVMValueRef src_len_field_ptr = LLVMBuildStructGEP(g->builder, src_addresses_field_ptr, (unsigned)len_field_index, "");
+ LLVMValueRef src_index_val = LLVMBuildLoad(g->builder, src_index_field_ptr, "");
+ LLVMValueRef src_ptr_val = LLVMBuildLoad(g->builder, src_ptr_field_ptr, "");
+ LLVMValueRef src_len_val = LLVMBuildLoad(g->builder, src_len_field_ptr, "");
+ LLVMValueRef no_wrap_bit = LLVMBuildICmp(g->builder, LLVMIntULT, src_index_val, src_len_val, "");
+ LLVMBasicBlockRef no_wrap_block = LLVMAppendBasicBlock(fn_val, "NoWrap");
+ LLVMBasicBlockRef yes_wrap_block = LLVMAppendBasicBlock(fn_val, "YesWrap");
+ LLVMBasicBlockRef loop_block = LLVMAppendBasicBlock(fn_val, "Loop");
+ LLVMBuildCondBr(g->builder, no_wrap_bit, no_wrap_block, yes_wrap_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, no_wrap_block);
+ LLVMValueRef usize_zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
+ LLVMBuildStore(g->builder, usize_zero, frame_index_ptr);
+ LLVMBuildStore(g->builder, src_index_val, frames_left_ptr);
+ LLVMValueRef frames_left_eq_zero_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, src_index_val, usize_zero, "");
+ LLVMBuildCondBr(g->builder, frames_left_eq_zero_bit, return_block, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, yes_wrap_block);
+ LLVMValueRef usize_one = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 1, false);
+ LLVMValueRef plus_one = LLVMBuildNUWAdd(g->builder, src_index_val, usize_one, "");
+ LLVMValueRef mod_len = LLVMBuildURem(g->builder, plus_one, src_len_val, "");
+ LLVMBuildStore(g->builder, mod_len, frame_index_ptr);
+ LLVMBuildStore(g->builder, src_len_val, frames_left_ptr);
+ LLVMBuildBr(g->builder, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, loop_block);
+ LLVMValueRef ptr_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
+ LLVMValueRef addr_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr_val, &ptr_index, 1, "");
+ LLVMValueRef this_addr_val = LLVMBuildLoad(g->builder, addr_ptr, "");
+ LLVMValueRef args[] = {dest_stack_trace_ptr, this_addr_val};
+ ZigLLVMBuildCall(g->builder, add_error_return_trace_addr_fn_val, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAlways, "");
+ LLVMValueRef prev_frames_left = LLVMBuildLoad(g->builder, frames_left_ptr, "");
+ LLVMValueRef new_frames_left = LLVMBuildNUWSub(g->builder, prev_frames_left, usize_one, "");
+ LLVMValueRef done_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, new_frames_left, usize_zero, "");
+ LLVMBasicBlockRef continue_block = LLVMAppendBasicBlock(fn_val, "Continue");
+ LLVMBuildCondBr(g->builder, done_bit, return_block, continue_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, continue_block);
+ LLVMBuildStore(g->builder, new_frames_left, frames_left_ptr);
+ LLVMValueRef prev_index = LLVMBuildLoad(g->builder, frame_index_ptr, "");
+ LLVMValueRef index_plus_one = LLVMBuildNUWAdd(g->builder, prev_index, usize_one, "");
+ LLVMValueRef index_mod_len = LLVMBuildURem(g->builder, index_plus_one, src_len_val, "");
+ LLVMBuildStore(g->builder, index_mod_len, frame_index_ptr);
+ LLVMBuildBr(g->builder, loop_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, prev_block);
+ if (!g->strip_debug_symbols) {
+ LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
+ }
+
+ g->merge_err_ret_traces_fn_val = fn_val;
+ return fn_val;
+
+}
static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *executable,
IrInstructionSaveErrRetAddr *save_err_ret_addr_instruction)
{
assert(g->have_err_ret_tracing);
LLVMValueRef return_err_fn = get_return_err_fn(g);
- LLVMValueRef args[] = {
- get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope),
- };
- LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 1,
+ LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, save_err_ret_addr_instruction->base.scope);
+ ZigLLVMBuildCall(g->builder, return_err_fn, &my_err_trace_val, 1,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- return call_instruction;
+
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ if (fn_is_async(g->cur_fn) && codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type), "");
+ LLVMBuildStore(g->builder, my_err_trace_val, trace_ptr_ptr);
+ }
+
+ return nullptr;
}
-static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) {
+static void gen_assert_resume_id(CodeGen *g, IrInstruction *source_instr, ResumeId resume_id, PanicMsgId msg_id,
+ LLVMBasicBlockRef end_bb)
+{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ if (end_bb == nullptr) end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "OkResume");
+ LLVMValueRef expected_value = LLVMConstSub(LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false));
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, LLVMGetParam(g->cur_fn_val, 1), expected_value, "");
+ LLVMBuildCondBr(g->builder, ok_bit, end_bb, bad_resume_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion(g, msg_id, source_instr);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
+}
+
+static LLVMValueRef gen_resume(CodeGen *g, LLVMValueRef fn_val, LLVMValueRef target_frame_ptr, ResumeId resume_id) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ if (fn_val == nullptr) {
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_fn_ptr_index, "");
+ fn_val = LLVMBuildLoad(g->builder, fn_ptr_ptr, "");
+ }
+ LLVMValueRef arg_val = LLVMBuildSub(g->builder, LLVMConstAllOnes(usize_type_ref),
+ LLVMConstInt(usize_type_ref, resume_id, false), "");
+ LLVMValueRef args[] = {target_frame_ptr, arg_val};
+ return ZigLLVMBuildCall(g->builder, fn_val, args, 2, LLVMFastCallConv, ZigLLVM_FnInlineAuto, "");
+}
+
+static LLVMBasicBlockRef gen_suspend_begin(CodeGen *g, const char *name_hint) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMBasicBlockRef resume_bb = LLVMAppendBasicBlock(g->cur_fn_val, name_hint);
+ size_t new_block_index = g->cur_resume_block_count;
+ g->cur_resume_block_count += 1;
+ LLVMValueRef new_block_index_val = LLVMConstInt(usize_type_ref, new_block_index, false);
+ LLVMAddCase(g->cur_async_switch_instr, new_block_index_val, resume_bb);
+ LLVMBuildStore(g->builder, new_block_index_val, g->cur_async_resume_index_ptr);
+ return resume_bb;
+}
+
+static void set_tail_call_if_appropriate(CodeGen *g, LLVMValueRef call_inst) {
+ LLVMSetTailCall(call_inst, true);
+}
+
+static LLVMValueRef gen_maybe_atomic_op(CodeGen *g, LLVMAtomicRMWBinOp op, LLVMValueRef ptr, LLVMValueRef val,
+ LLVMAtomicOrdering order)
+{
+ if (g->is_single_threaded) {
+ LLVMValueRef loaded = LLVMBuildLoad(g->builder, ptr, "");
+ LLVMValueRef modified;
+ switch (op) {
+ case LLVMAtomicRMWBinOpXchg:
+ modified = val;
+ break;
+ case LLVMAtomicRMWBinOpXor:
+ modified = LLVMBuildXor(g->builder, loaded, val, "");
+ break;
+ default:
+ zig_unreachable();
+ }
+ LLVMBuildStore(g->builder, modified, ptr);
+ return loaded;
+ } else {
+ return LLVMBuildAtomicRMW(g->builder, op, ptr, val, order, false);
+ }
+}
+
+static void gen_async_return(CodeGen *g, IrInstructionReturn *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
+ ZigType *operand_type = (instruction->operand != nullptr) ? instruction->operand->value.type : nullptr;
+ bool operand_has_bits = (operand_type != nullptr) && type_has_bits(operand_type);
+ ZigType *ret_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type;
+ bool ret_type_has_bits = type_has_bits(ret_type);
+
+ if (operand_has_bits && instruction->operand != nullptr) {
+ bool need_store = instruction->operand->value.special != ConstValSpecialRuntime || !handle_is_ptr(ret_type);
+ if (need_store) {
+ // It didn't get written to the result ptr. We do that now.
+ ZigType *ret_ptr_type = get_pointer_to_type(g, ret_type, true);
+ gen_assign_raw(g, g->cur_ret_ptr, ret_ptr_type, ir_llvm_value(g, instruction->operand));
+ }
+ }
+
+ // Whether we tail resume the awaiter, or do an early return, we are done and will not be resumed.
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef new_resume_index = LLVMConstAllOnes(usize_type_ref);
+ LLVMBuildStore(g->builder, new_resume_index, g->cur_async_resume_index_ptr);
+ }
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+
+ LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXor, g->cur_async_awaiter_ptr,
+ all_ones, LLVMAtomicOrderingAcquire);
+
+ LLVMBasicBlockRef bad_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadReturn");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+ LLVMBasicBlockRef resume_them_block = LLVMAppendBasicBlock(g->cur_fn_val, "ResumeThem");
+
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, resume_them_block, 2);
+
+ LLVMAddCase(switch_instr, zero, early_return_block);
+ LLVMAddCase(switch_instr, all_ones, bad_return_block);
+
+ // Something has gone horribly wrong, and this is an invalid second return.
+ LLVMPositionBuilderAtEnd(g->builder, bad_return_block);
+ gen_assertion(g, PanicMsgIdBadReturn, &instruction->base);
+
+ // There is no awaiter yet, but we're completely done.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // We need to resume the caller by tail calling them,
+ // but first write through the result pointer and possibly
+ // error return trace pointer.
+ LLVMPositionBuilderAtEnd(g->builder, resume_them_block);
+
+ if (ret_type_has_bits) {
+ // If the awaiter result pointer is non-null, we need to copy the result to there.
+ LLVMBasicBlockRef copy_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResult");
+ LLVMBasicBlockRef copy_end_block = LLVMAppendBasicBlock(g->cur_fn_val, "CopyResultEnd");
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start + 1, "");
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildLoad(g->builder, awaiter_ret_ptr_ptr, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMTypeOf(awaiter_ret_ptr));
+ LLVMValueRef need_copy_bit = LLVMBuildICmp(g->builder, LLVMIntNE, awaiter_ret_ptr, zero_ptr, "");
+ LLVMBuildCondBr(g->builder, need_copy_bit, copy_block, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_block);
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, awaiter_ret_ptr, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, g->cur_ret_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, ret_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, ret_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ LLVMBuildBr(g->builder, copy_end_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, copy_end_block);
+ if (codegen_fn_has_err_ret_tracing_arg(g, ret_type)) {
+ LLVMValueRef awaiter_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, ret_type) + 1, "");
+ LLVMValueRef dest_trace_ptr = LLVMBuildLoad(g->builder, awaiter_trace_ptr_ptr, "");
+ LLVMValueRef my_err_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, my_err_trace_val };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ }
+
+ // Resume the caller by tail calling them.
+ ZigType *any_frame_type = get_any_frame_type(g, ret_type);
+ LLVMValueRef their_frame_ptr = LLVMBuildIntToPtr(g->builder, prev_val, get_llvm_type(g, any_frame_type), "");
+ LLVMValueRef call_inst = gen_resume(g, nullptr, their_frame_ptr, ResumeIdReturn);
+ set_tail_call_if_appropriate(g, call_inst);
+ LLVMBuildRetVoid(g->builder);
+}
+
+static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *instruction) {
+ if (fn_is_async(g->cur_fn)) {
+ gen_async_return(g, instruction);
+ return nullptr;
+ }
+
if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) {
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
return nullptr;
}
assert(g->cur_ret_ptr);
- src_assert(return_instruction->value->value.special != ConstValSpecialRuntime,
- return_instruction->base.source_node);
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
- ZigType *return_type = return_instruction->value->value.type;
+ src_assert(instruction->operand->value.special != ConstValSpecialRuntime,
+ instruction->base.source_node);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
+ ZigType *return_type = instruction->operand->value.type;
gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value);
LLVMBuildRetVoid(g->builder);
} else if (g->cur_fn->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync &&
handle_is_ptr(g->cur_fn->type_entry->data.fn.fn_type_id.return_type))
{
- if (return_instruction->value == nullptr) {
+ if (instruction->operand == nullptr) {
LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
}
- } else if (return_instruction->value == nullptr) {
+ } else if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
} else {
- LLVMValueRef value = ir_llvm_value(g, return_instruction->value);
+ LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMBuildRet(g->builder, value);
}
return nullptr;
@@ -3242,14 +3309,17 @@ static LLVMValueRef ir_render_bool_not(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildICmp(g->builder, LLVMIntEQ, value, zero, "");
}
-static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
- ZigVar *var = instruction->var;
-
+static void render_decl_var(CodeGen *g, ZigVar *var) {
if (!type_has_bits(var->var_type))
- return nullptr;
+ return;
- var->value_ref = ir_llvm_value(g, instruction->var_ptr);
+ var->value_ref = ir_llvm_value(g, var->ptr_instruction);
gen_var_debug_decl(g, var);
+}
+
+static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) {
+ instruction->var->ptr_instruction = instruction->var_ptr;
+ render_decl_var(g, instruction->var);
return nullptr;
}
@@ -3467,8 +3537,9 @@ static LLVMValueRef ir_render_var_ptr(CodeGen *g, IrExecutable *executable, IrIn
static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable,
IrInstructionReturnPtr *instruction)
{
- src_assert(g->cur_ret_ptr != nullptr || !type_has_bits(instruction->base.value.type),
- instruction->base.source_node);
+ if (!type_has_bits(instruction->base.value.type))
+ return nullptr;
+ src_assert(g->cur_ret_ptr != nullptr, instruction->base.source_node);
return g->cur_ret_ptr;
}
@@ -3566,26 +3637,6 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
}
}
-static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) {
- return g->have_err_ret_tracing &&
- (fn_type_id->return_type->id == ZigTypeIdErrorUnion ||
- fn_type_id->return_type->id == ZigTypeIdErrorSet ||
- fn_type_id->cc == CallingConventionAsync);
-}
-
-static size_t get_async_allocator_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return get_prefix_arg_err_ret_stack(g, fn_type_id) ? 1 : 0;
-}
-
-static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
- // 0 1 2 3
- // err_ret_stack allocator_ptr err_code other_args...
- return 1 + get_async_allocator_arg_index(g, fn_type_id);
-}
-
-
static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, "");
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, "");
@@ -3623,16 +3674,124 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
LLVMAddCallSiteAttribute(call_instr, 1, sret_attr);
}
+static void render_async_spills(CodeGen *g) {
+ ZigType *fn_type = g->cur_fn->type_entry;
+ ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
+ uint32_t async_var_index = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
+ for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
+ ZigVar *var = g->cur_fn->variable_list.at(var_i);
+
+ if (!type_has_bits(var->var_type)) {
+ continue;
+ }
+ if (ir_get_var_is_comptime(var))
+ continue;
+ switch (type_requires_comptime(g, var->var_type)) {
+ case ReqCompTimeInvalid:
+ zig_unreachable();
+ case ReqCompTimeYes:
+ continue;
+ case ReqCompTimeNo:
+ break;
+ }
+ if (var->src_arg_index == SIZE_MAX) {
+ continue;
+ }
+
+ var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index,
+ buf_ptr(&var->name));
+ async_var_index += 1;
+ if (var->decl_node) {
+ var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ buf_ptr(&var->name), import->data.structure.root_struct->di_file,
+ (unsigned)(var->decl_node->line + 1),
+ get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0);
+ gen_var_debug_decl(g, var);
+ }
+ }
+
+ ZigType *frame_type = g->cur_fn->frame_type->data.frame.locals_struct;
+
+ for (size_t alloca_i = 0; alloca_i < g->cur_fn->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = g->cur_fn->alloca_gen_list.at(alloca_i);
+ if (instruction->field_index == SIZE_MAX)
+ continue;
+
+ size_t gen_index = frame_type->data.structure.fields[instruction->field_index].gen_index;
+ instruction->base.llvm_value = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, gen_index,
+ instruction->name_hint);
+ }
+}
+
+static void render_async_var_decls(CodeGen *g, Scope *scope) {
+ for (;;) {
+ switch (scope->id) {
+ case ScopeIdCImport:
+ zig_unreachable();
+ case ScopeIdFnDef:
+ return;
+ case ScopeIdVarDecl: {
+ ZigVar *var = reinterpret_cast<ScopeVarDecl *>(scope)->var;
+ if (var->ptr_instruction != nullptr) {
+ render_decl_var(g, var);
+ }
+ // fallthrough
+ }
+ case ScopeIdDecls:
+ case ScopeIdBlock:
+ case ScopeIdDefer:
+ case ScopeIdDeferExpr:
+ case ScopeIdLoop:
+ case ScopeIdSuspend:
+ case ScopeIdCompTime:
+ case ScopeIdRuntime:
+ scope = scope->parent;
+ continue;
+ }
+ }
+}
+
+static LLVMValueRef gen_frame_size(CodeGen *g, LLVMValueRef fn_val) {
+ LLVMTypeRef usize_llvm_type = g->builtin_types.entry_usize->llvm_type;
+ LLVMTypeRef ptr_usize_llvm_type = LLVMPointerType(usize_llvm_type, 0);
+ LLVMValueRef casted_fn_val = LLVMBuildBitCast(g->builder, fn_val, ptr_usize_llvm_type, "");
+ LLVMValueRef negative_one = LLVMConstInt(LLVMInt32Type(), -1, true);
+ LLVMValueRef prefix_ptr = LLVMBuildInBoundsGEP(g->builder, casted_fn_val, &negative_one, 1, "");
+ return LLVMBuildLoad(g->builder, prefix_ptr, "");
+}
+
+static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMValueRef addrs_field_ptr) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+
+ LLVMValueRef index_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 0, "");
+ LLVMBuildStore(g->builder, zero, index_ptr);
+
+ LLVMValueRef addrs_slice_ptr = LLVMBuildStructGEP(g->builder, trace_field_ptr, 1, "");
+ LLVMValueRef addrs_ptr_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef indices[] = { LLVMConstNull(usize_type_ref), LLVMConstNull(usize_type_ref) };
+ LLVMValueRef trace_field_addrs_as_ptr = LLVMBuildInBoundsGEP(g->builder, addrs_field_ptr, indices, 2, "");
+ LLVMBuildStore(g->builder, trace_field_addrs_as_ptr, addrs_ptr_ptr);
+
+ LLVMValueRef addrs_len_ptr = LLVMBuildStructGEP(g->builder, addrs_slice_ptr, slice_len_index, "");
+ LLVMBuildStore(g->builder, LLVMConstInt(usize_type_ref, stack_trace_ptr_count, false), addrs_len_ptr);
+}
+
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+
LLVMValueRef fn_val;
ZigType *fn_type;
+ bool callee_is_async;
if (instruction->fn_entry) {
fn_val = fn_llvm_value(g, instruction->fn_entry);
fn_type = instruction->fn_entry->type_entry;
+ callee_is_async = fn_is_async(instruction->fn_entry);
} else {
assert(instruction->fn_ref);
fn_val = ir_llvm_value(g, instruction->fn_ref);
fn_type = instruction->fn_ref->value.type;
+ callee_is_async = fn_type->data.fn.fn_type_id.cc == CallingConventionAsync;
}
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -3643,27 +3802,154 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
CallingConvention cc = fn_type->data.fn.fn_type_id.cc;
bool first_arg_ret = ret_has_bits && want_first_arg_sret(g, fn_type_id);
- bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id);
+ bool prefix_arg_err_ret_stack = codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type);
bool is_var_args = fn_type_id->is_var_args;
ZigList<LLVMValueRef> gen_param_values = {};
+ ZigList<ZigType *> gen_param_types = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
- if (first_arg_ret) {
- gen_param_values.append(result_loc);
- }
- if (prefix_arg_err_ret_stack) {
- gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
- }
- if (instruction->is_async) {
- gen_param_values.append(ir_llvm_value(g, instruction->async_allocator));
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef frame_result_loc;
+ LLVMValueRef awaiter_init_val;
+ LLVMValueRef ret_ptr;
+ if (callee_is_async) {
+ if (instruction->is_async) {
+ if (instruction->new_stack == nullptr) {
+ awaiter_init_val = zero;
+ frame_result_loc = result_loc;
+
+ if (ret_has_bits) {
+ // Use the result location which is inside the frame if this is an async call.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ }
+ } else if (cc == CallingConventionAsync) {
+ awaiter_init_val = zero;
+ LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, "");
+ LLVMValueRef given_frame_len = LLVMBuildLoad(g->builder, given_len_ptr, "");
+ LLVMValueRef actual_frame_len = gen_frame_size(g, fn_val);
+
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FrameSizeCheckOk");
+
+ LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntUGE, given_frame_len, actual_frame_len, "");
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, fail_block);
+ gen_safety_crash(g, PanicMsgIdFrameTooSmall);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+ LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
+ LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
+ frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
+ get_llvm_type(g, instruction->base.value.type), "");
+
+ if (ret_has_bits) {
+ // Use the result location provided to the @asyncCall builtin
+ ret_ptr = result_loc;
+ }
+ }
+
+ // even if prefix_arg_err_ret_stack is true, let the async function do its own
+ // initialization.
+ } else {
+ // async function called as a normal function
+
+ frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
+ awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
+ if (ret_has_bits) {
+ if (result_loc == nullptr) {
+ // return type is a scalar, but we still need a pointer to it. Use the async fn frame.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ } else {
+ // Use the call instruction's result location.
+ ret_ptr = result_loc;
+ }
+
+ // Store a zero in the awaiter's result ptr to indicate we do not need a copy made.
+ LLVMValueRef awaiter_ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 1, "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, awaiter_ret_ptr);
+ }
+
+ if (prefix_arg_err_ret_stack) {
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ frame_index_trace_arg(g, src_return_type) + 1, "");
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
+ }
+
+ assert(frame_result_loc != nullptr);
+
+ LLVMValueRef fn_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_fn_ptr_index, "");
+ LLVMValueRef bitcasted_fn_val = LLVMBuildBitCast(g->builder, fn_val,
+ LLVMGetElementType(LLVMTypeOf(fn_ptr_ptr)), "");
+ LLVMBuildStore(g->builder, bitcasted_fn_val, fn_ptr_ptr);
+
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_resume_index, "");
+ LLVMBuildStore(g->builder, zero, resume_index_ptr);
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, "");
- gen_param_values.append(err_val_ptr);
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, "");
+ LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+ }
+ } else if (instruction->is_async) {
+ // Async call of blocking function
+ if (instruction->new_stack != nullptr) {
+ zig_panic("TODO @asyncCall of non-async function");
+ }
+ frame_result_loc = result_loc;
+ awaiter_init_val = LLVMConstAllOnes(usize_type_ref);
+
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_awaiter_index, "");
+ LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
+
+ if (ret_has_bits) {
+ LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
+ LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
+
+ if (first_arg_ret) {
+ gen_param_values.append(ret_ptr);
+ }
+ if (prefix_arg_err_ret_stack) {
+ // Set up the callee stack trace pointer pointing into the frame.
+ // Then we have to wire up the StackTrace pointers.
+ // Await is responsible for merging error return traces.
+ uint32_t trace_field_index_start = frame_index_trace_arg(g, src_return_type);
+ LLVMValueRef callee_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start, "");
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start + 2, "");
+ LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc,
+ trace_field_index_start + 3, "");
+
+ LLVMBuildStore(g->builder, trace_field_ptr, callee_trace_ptr_ptr);
+
+ gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr);
+
+ gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
+ }
+ }
+ } else {
+ if (first_arg_ret) {
+ gen_param_values.append(result_loc);
+ }
+ if (prefix_arg_err_ret_stack) {
+ gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope));
+ }
}
FnWalk fn_walk = {};
fn_walk.id = FnWalkIdCall;
fn_walk.data.call.inst = instruction;
fn_walk.data.call.is_var_args = is_var_args;
fn_walk.data.call.gen_param_values = &gen_param_values;
+ fn_walk.data.call.gen_param_types = &gen_param_types;
walk_function_params(g, fn_type, &fn_walk);
ZigLLVM_FnInline fn_inline;
@@ -3682,9 +3968,68 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMCallConv llvm_cc = get_llvm_cc(g, cc);
LLVMValueRef result;
+ if (callee_is_async) {
+ uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
+
+ LLVMValueRef casted_frame;
+ if (instruction->new_stack != nullptr) {
+ // We need the frame type to be a pointer to a struct that includes the args
+ size_t field_count = arg_start_i + gen_param_values.length;
+ LLVMTypeRef *field_types = allocate_nonzero<LLVMTypeRef>(field_count);
+ LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
+ assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_start_i);
+ for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
+ field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i));
+ }
+ LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false);
+ LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0);
+
+ casted_frame = LLVMBuildBitCast(g->builder, frame_result_loc, ptr_frame_with_args_type, "");
+ } else {
+ casted_frame = frame_result_loc;
+ }
+
+ for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
+ LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, "");
+ gen_assign_raw(g, arg_ptr, get_pointer_to_type(g, gen_param_types.at(arg_i), true),
+ gen_param_values.at(arg_i));
+ }
+
+ if (instruction->is_async) {
+ gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
+ if (instruction->new_stack != nullptr) {
+ return frame_result_loc;
+ }
+ return nullptr;
+ } else {
+ ZigType *ptr_result_type = get_pointer_to_type(g, src_return_type, true);
+
+ LLVMBasicBlockRef call_bb = gen_suspend_begin(g, "CallResume");
+
+ LLVMValueRef call_inst = gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
+ set_tail_call_if_appropriate(g, call_inst);
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, call_bb);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ render_async_var_decls(g, instruction->base.scope);
+
+ if (!type_has_bits(src_return_type))
+ return nullptr;
+
+ if (result_loc != nullptr)
+ return get_handle_value(g, result_loc, src_return_type, ptr_result_type);
+
+ LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ return LLVMBuildLoad(g->builder, result_ptr, "");
+ }
+ }
+
if (instruction->new_stack == nullptr) {
result = ZigLLVMBuildCall(g->builder, fn_val,
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
+ } else if (instruction->is_async) {
+ zig_panic("TODO @asyncCall of non-async function");
} else {
LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g);
LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g);
@@ -3697,13 +4042,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, "");
}
-
- if (instruction->is_async) {
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, "");
- LLVMBuildStore(g->builder, result, payload_ptr);
- return result_loc;
- }
-
if (src_return_type->id == ZigTypeIdUnreachable) {
return LLVMBuildUnreachable(g->builder);
} else if (!ret_has_bits) {
@@ -4200,7 +4538,7 @@ static LLVMValueRef get_enum_tag_name_function(CodeGen *g, ZigType *enum_type) {
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
- if (g->build_mode == BuildModeDebug) {
+ if (codegen_have_frame_pointer(g)) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
}
@@ -4347,10 +4685,6 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == ZigTypeIdOptional &&
- target_type->data.maybe.child_type->id == ZigTypeIdPromise)
- {
- zig_panic("TODO audit this function");
} else if (target_type->id == ZigTypeIdStruct && target_type->data.structure.is_slice) {
ZigType *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry;
align_bytes = get_ptr_align(g, slice_ptr_type);
@@ -4388,26 +4722,11 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu
{
LLVMValueRef cur_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
if (cur_err_ret_trace_val == nullptr) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g);
- return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type));
+ return LLVMConstNull(get_llvm_type(g, ptr_to_stack_trace_type(g)));
}
return cur_err_ret_trace_val;
}
-static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) {
- LLVMValueRef target_handle = ir_llvm_value(g, instruction->target);
- LLVMBuildCall(g->builder, get_coro_destroy_fn_val(g), &target_handle, 1, "");
- return nullptr;
-}
-
-static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable,
- IrInstructionGetImplicitAllocator *instruction)
-{
- assert(instruction->id == ImplicitAllocatorIdArg);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- return LLVMGetParam(g->cur_fn_val, allocator_arg_index);
-}
-
static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) {
switch (atomic_order) {
case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered;
@@ -4722,24 +5041,8 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
}
-static LLVMValueRef get_handle_fn_val(CodeGen *g) {
- if (g->coro_frame_fn_val)
- return g->coro_frame_fn_val;
-
- LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0)
- , nullptr, 0, false);
- Buf *name = buf_sprintf("llvm.coro.frame");
- g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
- assert(LLVMGetIntrinsicID(g->coro_frame_fn_val));
-
- return g->coro_frame_fn_val;
-}
-
-static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable,
- IrInstructionHandle *instruction)
-{
- LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->builtin_types.entry_promise));
- return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, "");
+static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, IrInstructionFrameHandle *instruction) {
+ return g->cur_frame_ptr;
}
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
@@ -5005,248 +5308,6 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst
return nullptr;
}
-static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) {
- LLVMValueRef promise_ptr = ir_llvm_value(g, instruction->promise_ptr);
- LLVMValueRef align_val = LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false);
- LLVMValueRef null = LLVMConstIntToPtr(LLVMConstNull(g->builtin_types.entry_usize->llvm_type),
- LLVMPointerType(LLVMInt8Type(), 0));
- LLVMValueRef params[] = {
- align_val,
- promise_ptr,
- null,
- null,
- };
- return LLVMBuildCall(g->builder, get_coro_id_fn_val(g), params, 4, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) {
- LLVMValueRef token = ir_llvm_value(g, instruction->coro_id);
- return LLVMBuildCall(g->builder, get_coro_alloc_fn_val(g), &token, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) {
- return LLVMBuildCall(g->builder, get_coro_size_fn_val(g), nullptr, 0, "");
-}
-
-static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_mem_ptr = ir_llvm_value(g, instruction->coro_mem_ptr);
- LLVMValueRef params[] = {
- coro_id,
- coro_mem_ptr,
- };
- return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocFail *instruction)
-{
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index);
- LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val);
- LLVMBuildStore(g->builder, err_code, err_code_ptr_val);
-
- LLVMValueRef return_value;
- if (ir_want_runtime_safety(g, &instruction->base)) {
- return_value = LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0));
- } else {
- return_value = LLVMGetUndef(LLVMPointerType(LLVMInt8Type(), 0));
- }
- LLVMBuildRet(g->builder, return_value);
- return nullptr;
-}
-
-static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) {
- LLVMValueRef save_point;
- if (instruction->save_point == nullptr) {
- save_point = LLVMConstNull(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()));
- } else {
- save_point = ir_llvm_value(g, instruction->save_point);
- }
- LLVMValueRef is_final = ir_llvm_value(g, instruction->is_final);
- LLVMValueRef params[] = {
- save_point,
- is_final,
- };
- return LLVMBuildCall(g->builder, get_coro_suspend_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) {
- LLVMValueRef params[] = {
- LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)),
- LLVMConstNull(LLVMInt1Type()),
- };
- return LLVMBuildCall(g->builder, get_coro_end_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) {
- LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id);
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_id,
- coro_handle,
- };
- return LLVMBuildCall(g->builder, get_coro_free_fn_val(g), params, 2, "");
-}
-
-static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) {
- LLVMValueRef awaiter_handle = ir_llvm_value(g, instruction->awaiter_handle);
- return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, IrInstructionCoroSave *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, "");
-}
-
-static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, IrInstructionCoroPromise *instruction) {
- LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle);
- LLVMValueRef params[] = {
- coro_handle,
- LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false),
- LLVMConstNull(LLVMInt1Type()),
- };
- LLVMValueRef uncasted_result = LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, "");
- return LLVMBuildBitCast(g->builder, uncasted_result, get_llvm_type(g, instruction->base.value.type), "");
-}
-
-static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, ZigType *fn_type) {
- if (g->coro_alloc_helper_fn_val != nullptr)
- return g->coro_alloc_helper_fn_val;
-
- assert(fn_type->id == ZigTypeIdFn);
-
- ZigType *ptr_to_err_code_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
-
- LLVMTypeRef alloc_raw_fn_type_ref = LLVMGetElementType(alloc_fn_type_ref);
- LLVMTypeRef *alloc_fn_arg_types = allocate<LLVMTypeRef>(LLVMCountParamTypes(alloc_raw_fn_type_ref));
- LLVMGetParamTypes(alloc_raw_fn_type_ref, alloc_fn_arg_types);
-
- ZigList<LLVMTypeRef> arg_types = {};
- arg_types.append(alloc_fn_type_ref);
- if (g->have_err_ret_tracing) {
- arg_types.append(alloc_fn_arg_types[1]);
- }
- arg_types.append(alloc_fn_arg_types[g->have_err_ret_tracing ? 2 : 1]);
- arg_types.append(get_llvm_type(g, ptr_to_err_code_type));
- arg_types.append(g->builtin_types.entry_usize->llvm_type);
-
- LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0),
- arg_types.items, arg_types.length, false);
-
- Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_coro_alloc_helper"), false);
- LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
- LLVMSetLinkage(fn_val, LLVMInternalLinkage);
- LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
- addLLVMFnAttr(fn_val, "nounwind");
- addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
- addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
-
- LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
- LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
- ZigFn *prev_cur_fn = g->cur_fn;
- LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
-
- LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
- LLVMPositionBuilderAtEnd(g->builder, entry_block);
- ZigLLVMClearCurrentDebugLocation(g->builder);
- g->cur_fn = nullptr;
- g->cur_fn_val = fn_val;
-
- LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), "");
-
- size_t next_arg = 0;
- LLVMValueRef realloc_fn_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
-
- LLVMValueRef stack_trace_val;
- if (g->have_err_ret_tracing) {
- stack_trace_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- }
-
- LLVMValueRef allocator_val = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef err_code_ptr = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg);
- next_arg += 1;
- LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->llvm_type,
- get_coro_frame_align_bytes(g), false);
-
- ConstExprValue *zero_array = create_const_str_lit(g, buf_create_from_str(""));
- ConstExprValue *undef_slice_zero = create_const_slice(g, zero_array, 0, 0, false);
- render_const_val(g, undef_slice_zero, "");
- render_const_val_global(g, undef_slice_zero, "");
-
- ZigList<LLVMValueRef> args = {};
- args.append(sret_ptr);
- if (g->have_err_ret_tracing) {
- args.append(stack_trace_val);
- }
- args.append(allocator_val);
- args.append(undef_slice_zero->global_refs->llvm_global);
- args.append(LLVMGetUndef(g->builtin_types.entry_u29->llvm_type));
- args.append(coro_size);
- args.append(alignment_val);
- LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, realloc_fn_val, args.items, args.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- set_call_instr_sret(g, call_instruction);
- LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, "");
- LLVMValueRef err_val = LLVMBuildLoad(g->builder, err_val_ptr, "");
- LLVMBuildStore(g->builder, err_val, err_code_ptr);
- LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, LLVMConstNull(LLVMTypeOf(err_val)), "");
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(fn_val, "AllocOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(fn_val, "AllocFail");
- LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
-
- LLVMPositionBuilderAtEnd(g->builder, ok_block);
- LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, "");
- ZigType *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
- PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0, false);
- ZigType *slice_type = get_slice_type(g, u8_ptr_type);
- size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
- LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, "");
- LLVMValueRef ptr_val = LLVMBuildLoad(g->builder, ptr_field_ptr, "");
- LLVMBuildRet(g->builder, ptr_val);
-
- LLVMPositionBuilderAtEnd(g->builder, fail_block);
- LLVMBuildRet(g->builder, LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)));
-
- g->cur_fn = prev_cur_fn;
- g->cur_fn_val = prev_cur_fn_val;
- LLVMPositionBuilderAtEnd(g->builder, prev_block);
- if (!g->strip_debug_symbols) {
- LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
- }
-
- g->coro_alloc_helper_fn_val = fn_val;
- return fn_val;
-}
-
-static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable,
- IrInstructionCoroAllocHelper *instruction)
-{
- LLVMValueRef realloc_fn = ir_llvm_value(g, instruction->realloc_fn);
- LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size);
- LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(realloc_fn), instruction->realloc_fn->value.type);
- size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
- size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
-
- ZigList<LLVMValueRef> params = {};
- params.append(realloc_fn);
- uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn);
- if (err_ret_trace_arg_index != UINT32_MAX) {
- params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index));
- }
- params.append(LLVMGetParam(g->cur_fn_val, allocator_arg_index));
- params.append(LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index));
- params.append(coro_size);
-
- return ZigLLVMBuildCall(g->builder, fn_val, params.items, params.length,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
-}
-
static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
IrInstructionAtomicRmw *instruction)
{
@@ -5263,14 +5324,15 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
if (get_codegen_ptr_type(operand_type) == nullptr) {
- return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false);
+ return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded);
}
// it's a pointer but we need to treat it as an int
LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr,
LLVMPointerType(g->builtin_types.entry_usize->llvm_type, 0), "");
LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_usize->llvm_type, "");
- LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, false);
+ LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering,
+ g->is_single_threaded);
return LLVMBuildIntToPtr(g->builder, uncasted_result, get_llvm_type(g, operand_type), "");
}
@@ -5284,27 +5346,6 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
return load_inst;
}
-static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable,
- IrInstructionMergeErrRetTraces *instruction)
-{
- assert(g->have_err_ret_tracing);
-
- LLVMValueRef src_trace_ptr = ir_llvm_value(g, instruction->src_err_ret_trace_ptr);
- LLVMValueRef dest_trace_ptr = ir_llvm_value(g, instruction->dest_err_ret_trace_ptr);
-
- LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
- ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- return nullptr;
-}
-
-static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *executable,
- IrInstructionMarkErrRetTracePtr *instruction)
-{
- assert(g->have_err_ret_tracing);
- g->cur_err_ret_trace_val_stack = ir_llvm_value(g, instruction->err_ret_trace_ptr);
- return nullptr;
-}
-
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
assert(instruction->base.value.type->id == ZigTypeIdFloat);
@@ -5424,6 +5465,174 @@ static LLVMValueRef ir_render_assert_non_null(CodeGen *g, IrExecutable *executab
return nullptr;
}
+static LLVMValueRef ir_render_suspend_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendBegin *instruction)
+{
+ instruction->resume_bb = gen_suspend_begin(g, "SuspendResume");
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executable,
+ IrInstructionSuspendFinish *instruction)
+{
+ LLVMBuildRetVoid(g->builder);
+
+ LLVMPositionBuilderAtEnd(g->builder, instruction->begin->resume_bb);
+ render_async_var_decls(g, instruction->base.scope);
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ LLVMValueRef target_frame_ptr = ir_llvm_value(g, instruction->frame);
+ ZigType *result_type = instruction->base.value.type;
+ ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
+
+ // Prepare to be suspended
+ LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
+ LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
+
+ // At this point resuming the function will continue from resume_bb.
+ // This code is as if it is running inside the suspend block.
+
+ // supply the awaiter return pointer
+ LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
+ nullptr : ir_llvm_value(g, instruction->result_loc);
+ if (type_has_bits(result_type)) {
+ LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
+ if (result_loc == nullptr) {
+ // no copy needed
+ LLVMBuildStore(g->builder, LLVMConstNull(LLVMGetElementType(LLVMTypeOf(awaiter_ret_ptr_ptr))),
+ awaiter_ret_ptr_ptr);
+ } else {
+ LLVMBuildStore(g->builder, result_loc, awaiter_ret_ptr_ptr);
+ }
+ }
+
+ // supply the error return trace pointer
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef my_err_ret_trace_val = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ assert(my_err_ret_trace_val != nullptr);
+ LLVMValueRef err_ret_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type) + 1, "");
+ LLVMBuildStore(g->builder, my_err_ret_trace_val, err_ret_trace_ptr_ptr);
+ }
+
+ // caller's own frame pointer
+ LLVMValueRef awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, "");
+ LLVMValueRef awaiter_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_awaiter_index, "");
+ LLVMValueRef prev_val = gen_maybe_atomic_op(g, LLVMAtomicRMWBinOpXchg, awaiter_ptr, awaiter_init_val,
+ LLVMAtomicOrderingRelease);
+
+ LLVMBasicBlockRef bad_await_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadAwait");
+ LLVMBasicBlockRef complete_suspend_block = LLVMAppendBasicBlock(g->cur_fn_val, "CompleteSuspend");
+ LLVMBasicBlockRef early_return_block = LLVMAppendBasicBlock(g->cur_fn_val, "EarlyReturn");
+
+ LLVMValueRef all_ones = LLVMConstAllOnes(usize_type_ref);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, prev_val, bad_await_block, 2);
+
+ LLVMAddCase(switch_instr, zero, complete_suspend_block);
+ LLVMAddCase(switch_instr, all_ones, early_return_block);
+
+ // We discovered that another awaiter was already here.
+ LLVMPositionBuilderAtEnd(g->builder, bad_await_block);
+ gen_assertion(g, PanicMsgIdBadAwait, &instruction->base);
+
+ // Rely on the target to resume us from suspension.
+ LLVMPositionBuilderAtEnd(g->builder, complete_suspend_block);
+ LLVMBuildRetVoid(g->builder);
+
+ // Early return: The async function has already completed. We must copy the result and
+ // the error return trace if applicable.
+ LLVMPositionBuilderAtEnd(g->builder, early_return_block);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
+ LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, result_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ }
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
+ LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
+ LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ LLVMBuildBr(g->builder, end_bb);
+
+ LLVMPositionBuilderAtEnd(g->builder, resume_bb);
+ gen_assert_resume_id(g, &instruction->base, ResumeIdReturn, PanicMsgIdResumedAnAwaitingFn, nullptr);
+ LLVMBuildBr(g->builder, end_bb);
+
+ LLVMPositionBuilderAtEnd(g->builder, end_bb);
+ if (type_has_bits(result_type) && result_loc != nullptr) {
+ return get_handle_value(g, result_loc, result_type, ptr_result_type);
+ }
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_resume(CodeGen *g, IrExecutable *executable, IrInstructionResume *instruction) {
+ LLVMValueRef frame = ir_llvm_value(g, instruction->frame);
+ ZigType *frame_type = instruction->frame->value.type;
+ assert(frame_type->id == ZigTypeIdAnyFrame);
+
+ gen_resume(g, nullptr, frame, ResumeIdManual);
+ return nullptr;
+}
+
+static LLVMValueRef ir_render_frame_size(CodeGen *g, IrExecutable *executable,
+ IrInstructionFrameSizeGen *instruction)
+{
+ LLVMValueRef fn_val = ir_llvm_value(g, instruction->fn);
+ return gen_frame_size(g, fn_val);
+}
+
+static LLVMValueRef ir_render_spill_begin(CodeGen *g, IrExecutable *executable,
+ IrInstructionSpillBegin *instruction)
+{
+ if (!fn_is_async(g->cur_fn))
+ return nullptr;
+
+ switch (instruction->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef operand = ir_llvm_value(g, instruction->operand);
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ LLVMBuildStore(g->builder, operand, ptr);
+ return nullptr;
+ }
+
+ }
+ zig_unreachable();
+}
+
+static LLVMValueRef ir_render_spill_end(CodeGen *g, IrExecutable *executable, IrInstructionSpillEnd *instruction) {
+ if (!fn_is_async(g->cur_fn))
+ return ir_llvm_value(g, instruction->begin->operand);
+
+ switch (instruction->begin->spill_id) {
+ case SpillIdInvalid:
+ zig_unreachable();
+ case SpillIdRetErrCode: {
+ LLVMValueRef ptr = ir_llvm_value(g, g->cur_fn->err_code_spill);
+ return LLVMBuildLoad(g->builder, ptr, "");
+ }
+
+ }
+ zig_unreachable();
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -5445,7 +5654,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdSetRuntimeSafety:
case IrInstructionIdSetFloatMode:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdSwitchTarget:
@@ -5485,8 +5694,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTagType:
case IrInstructionIdExport:
case IrInstructionIdErrorUnion:
- case IrInstructionIdPromiseResultType:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdIntCast:
case IrInstructionIdFloatCast:
@@ -5508,17 +5715,19 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCallSrc:
case IrInstructionIdAllocaSrc:
case IrInstructionIdEndExpr:
- case IrInstructionIdAllocaGen:
case IrInstructionIdImplicitCast:
case IrInstructionIdResolveResult:
case IrInstructionIdResetResult:
- case IrInstructionIdResultPtr:
case IrInstructionIdContainerInitList:
case IrInstructionIdSliceSrc:
case IrInstructionIdRef:
case IrInstructionIdBitCastSrc:
case IrInstructionIdTestErrSrc:
case IrInstructionIdUnionInitNamedField:
+ case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdAllocaGen:
+ case IrInstructionIdAwaitSrc:
zig_unreachable();
case IrInstructionIdDeclVarGen:
@@ -5597,8 +5806,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_render_handle(g, executable, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_render_handle(g, executable, (IrInstructionFrameHandle *)instruction);
case IrInstructionIdOverflowOp:
return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction);
case IrInstructionIdTestErrGen:
@@ -5641,44 +5850,12 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction);
case IrInstructionIdErrorReturnTrace:
return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction);
- case IrInstructionIdCancel:
- return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroId:
- return ir_render_coro_id(g, executable, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_render_coro_alloc(g, executable, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_render_coro_suspend(g, executable, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_render_coro_end(g, executable, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_render_coro_promise(g, executable, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_render_float_op(g, executable, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
@@ -5695,6 +5872,20 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_resize_slice(g, executable, (IrInstructionResizeSlice *)instruction);
case IrInstructionIdPtrOfArrayToSlice:
return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_render_suspend_begin(g, executable, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_render_suspend_finish(g, executable, (IrInstructionSuspendFinish *)instruction);
+ case IrInstructionIdResume:
+ return ir_render_resume(g, executable, (IrInstructionResume *)instruction);
+ case IrInstructionIdFrameSizeGen:
+ return ir_render_frame_size(g, executable, (IrInstructionFrameSizeGen *)instruction);
+ case IrInstructionIdAwaitGen:
+ return ir_render_await(g, executable, (IrInstructionAwaitGen *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_render_spill_begin(g, executable, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_render_spill_end(g, executable, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
@@ -5704,6 +5895,7 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
IrExecutable *executable = &fn_entry->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
assert(current_block->llvm_block);
@@ -5894,7 +6086,6 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
case ZigTypeIdPointer:
case ZigTypeIdFn:
case ZigTypeIdOptional:
- case ZigTypeIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
LLVMValueRef ptr_size_int_val = LLVMConstPtrToInt(ptr_val, g->builtin_types.entry_usize->llvm_type);
@@ -5957,7 +6148,10 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
return val;
}
-
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO bit pack an async function frame");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO bit pack an anyframe");
}
zig_unreachable();
}
@@ -6110,6 +6304,9 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
break;
}
+ if ((err = type_resolve(g, type_entry, ResolveStatusLLVMFull)))
+ zig_unreachable();
+
switch (type_entry->id) {
case ZigTypeIdInt:
return bigint_to_llvm_const(get_llvm_type(g, type_entry), &const_val->data.x_bigint);
@@ -6181,6 +6378,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
LLVMValueRef *fields = allocate<LLVMValueRef>(type_entry->data.structure.gen_field_count);
size_t src_field_count = type_entry->data.structure.src_field_count;
bool make_unnamed_struct = false;
+ assert(type_entry->data.structure.resolve_status == ResolveStatusLLVMFull);
if (type_entry->data.structure.layout == ContainerLayoutPacked) {
size_t src_field_index = 0;
while (src_field_index < src_field_count) {
@@ -6250,6 +6448,22 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
LLVMValueRef val = gen_const_val(g, field_val, "");
fields[type_struct_field->gen_index] = val;
make_unnamed_struct = make_unnamed_struct || is_llvm_value_unnamed_type(g, field_val->type, val);
+
+ size_t end_pad_gen_index = (i + 1 < src_field_count) ?
+ type_entry->data.structure.fields[i + 1].gen_index :
+ type_entry->data.structure.gen_field_count;
+ size_t next_offset = (i + 1 < src_field_count) ?
+ type_entry->data.structure.fields[i + 1].offset : type_entry->abi_size;
+ if (end_pad_gen_index != SIZE_MAX) {
+ for (size_t gen_i = type_struct_field->gen_index + 1; gen_i < end_pad_gen_index;
+ gen_i += 1)
+ {
+ size_t pad_bytes = next_offset -
+ (type_struct_field->offset + type_struct_field->type_entry->abi_size);
+ LLVMTypeRef llvm_array_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
+ fields[gen_i] = LLVMGetUndef(llvm_array_type);
+ }
+ }
}
}
if (make_unnamed_struct) {
@@ -6437,13 +6651,18 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
err_payload_value = gen_const_val(g, payload_val, "");
make_unnamed_struct = is_llvm_value_unnamed_type(g, payload_val->type, err_payload_value);
}
- LLVMValueRef fields[2];
+ LLVMValueRef fields[3];
fields[err_union_err_index] = err_tag_value;
fields[err_union_payload_index] = err_payload_value;
+ size_t field_count = 2;
+ if (type_entry->data.error_union.pad_llvm_type != nullptr) {
+ fields[2] = LLVMGetUndef(type_entry->data.error_union.pad_llvm_type);
+ field_count = 3;
+ }
if (make_unnamed_struct) {
- return LLVMConstStruct(fields, 2, false);
+ return LLVMConstStruct(fields, field_count, false);
} else {
- return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, 2);
+ return LLVMConstNamedStruct(get_llvm_type(g, type_entry), fields, field_count);
}
}
}
@@ -6460,9 +6679,11 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
zig_unreachable();
-
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO");
}
zig_unreachable();
}
@@ -6546,12 +6767,20 @@ static void generate_error_name_table(CodeGen *g) {
static void build_all_basic_blocks(CodeGen *g, ZigFn *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
+ LLVMValueRef fn_val = fn_llvm_value(g, fn);
+ LLVMBasicBlockRef first_bb = nullptr;
+ if (fn_is_async(fn)) {
+ first_bb = LLVMAppendBasicBlock(fn_val, "AsyncSwitch");
+ g->cur_preamble_llvm_block = first_bb;
+ }
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *bb = executable->basic_block_list.at(block_i);
- bb->llvm_block = LLVMAppendBasicBlock(fn_llvm_value(g, fn), bb->name_hint);
+ bb->llvm_block = LLVMAppendBasicBlock(fn_val, bb->name_hint);
}
- IrBasicBlock *entry_bb = executable->basic_block_list.at(0);
- LLVMPositionBuilderAtEnd(g->builder, entry_bb->llvm_block);
+ if (first_bb == nullptr) {
+ first_bb = executable->basic_block_list.at(0)->llvm_block;
+ }
+ LLVMPositionBuilderAtEnd(g->builder, first_bb);
}
static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
@@ -6728,13 +6957,19 @@ static void do_code_gen(CodeGen *g) {
build_all_basic_blocks(g, fn_table_entry);
clear_debug_source_node(g);
- if (want_sret) {
- g->cur_ret_ptr = LLVMGetParam(fn, 0);
- } else if (handle_is_ptr(fn_type_id->return_type)) {
- g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
- // TODO add debug info variable for this
+ bool is_async = fn_is_async(fn_table_entry);
+
+ if (is_async) {
+ g->cur_frame_ptr = LLVMGetParam(fn, 0);
} else {
- g->cur_ret_ptr = nullptr;
+ if (want_sret) {
+ g->cur_ret_ptr = LLVMGetParam(fn, 0);
+ } else if (handle_is_ptr(fn_type_id->return_type)) {
+ g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0);
+ // TODO add debug info variable for this
+ } else {
+ g->cur_ret_ptr = nullptr;
+ }
}
uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
@@ -6746,39 +6981,41 @@ static void do_code_gen(CodeGen *g) {
}
// error return tracing setup
- bool is_async = cc == CallingConventionAsync;
- bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn && !is_async && !have_err_ret_trace_arg;
+ bool have_err_ret_trace_stack = g->have_err_ret_tracing && fn_table_entry->calls_or_awaits_errorable_fn &&
+ !is_async && !have_err_ret_trace_arg;
LLVMValueRef err_ret_array_val = nullptr;
if (have_err_ret_trace_stack) {
ZigType *array_type = get_array_type(g, g->builtin_types.entry_usize, stack_trace_ptr_count);
err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", get_abi_alignment(g, array_type));
- // populate g->stack_trace_type
- (void)get_ptr_to_stack_trace_type(g);
- g->cur_err_ret_trace_val_stack = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type));
+ (void)get_llvm_type(g, get_stack_trace_type(g));
+ g->cur_err_ret_trace_val_stack = build_alloca(g, get_stack_trace_type(g), "error_return_trace",
+ get_abi_alignment(g, g->stack_trace_type));
} else {
g->cur_err_ret_trace_val_stack = nullptr;
}
- // allocate temporary stack data
- for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
- IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
- ZigType *ptr_type = instruction->base.value.type;
- assert(ptr_type->id == ZigTypeIdPointer);
- ZigType *child_type = ptr_type->data.pointer.child_type;
- if (!type_has_bits(child_type))
- continue;
- if (instruction->base.ref_count == 0)
- continue;
- if (instruction->base.value.special != ConstValSpecialRuntime) {
- if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
- ConstValSpecialRuntime)
- {
+ if (!is_async) {
+ // allocate temporary stack data
+ for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) {
+ IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i);
+ ZigType *ptr_type = instruction->base.value.type;
+ assert(ptr_type->id == ZigTypeIdPointer);
+ ZigType *child_type = ptr_type->data.pointer.child_type;
+ if (!type_has_bits(child_type))
continue;
+ if (instruction->base.ref_count == 0)
+ continue;
+ if (instruction->base.value.special != ConstValSpecialRuntime) {
+ if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special !=
+ ConstValSpecialRuntime)
+ {
+ continue;
+ }
}
+ instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint,
+ get_ptr_align(g, ptr_type));
}
- instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint,
- get_ptr_align(g, ptr_type));
}
ZigType *import = get_scope_import(&fn_table_entry->fndef_scope->base);
@@ -6816,7 +7053,7 @@ static void do_code_gen(CodeGen *g) {
} else if (is_c_abi) {
fn_walk_var.data.vars.var = var;
iter_function_params_c_abi(g, fn_table_entry->type_entry, &fn_walk_var, var->src_arg_index);
- } else {
+ } else if (!is_async) {
ZigType *gen_type;
FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
assert(gen_info->gen_index != SIZE_MAX);
@@ -6867,14 +7104,76 @@ static void do_code_gen(CodeGen *g) {
gen_store(g, LLVMConstInt(usize->llvm_type, stack_trace_ptr_count, false), len_field_ptr, get_pointer_to_type(g, usize, false));
}
- // create debug variable declarations for parameters
- // rely on the first variables in the variable_list being parameters.
- FnWalk fn_walk_init = {};
- fn_walk_init.id = FnWalkIdInits;
- fn_walk_init.data.inits.fn = fn_table_entry;
- fn_walk_init.data.inits.llvm_fn = fn;
- fn_walk_init.data.inits.gen_i = gen_i_init;
- walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
+ if (is_async) {
+ (void)get_llvm_type(g, fn_table_entry->frame_type);
+ g->cur_resume_block_count = 0;
+
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef size_val = LLVMConstInt(usize_type_ref, fn_table_entry->frame_type->abi_size, false);
+ ZigLLVMFunctionSetPrefixData(fn_table_entry->llvm_value, size_val);
+
+ if (!g->strip_debug_symbols) {
+ AstNode *source_node = fn_table_entry->proto_node;
+ ZigLLVMSetCurrentDebugLocation(g->builder, (int)source_node->line + 1,
+ (int)source_node->column + 1, get_di_scope(g, fn_table_entry->child_scope));
+ }
+ IrExecutable *executable = &fn_table_entry->analyzed_executable;
+ LLVMBasicBlockRef bad_resume_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadResume");
+ LLVMPositionBuilderAtEnd(g->builder, bad_resume_block);
+ gen_assertion_scope(g, PanicMsgIdBadResume, fn_table_entry->child_scope);
+
+ LLVMPositionBuilderAtEnd(g->builder, g->cur_preamble_llvm_block);
+ render_async_spills(g);
+ g->cur_async_awaiter_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_awaiter_index, "");
+ LLVMValueRef resume_index_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_resume_index, "");
+ g->cur_async_resume_index_ptr = resume_index_ptr;
+
+ if (type_has_bits(fn_type_id->return_type)) {
+ LLVMValueRef cur_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, frame_ret_start, "");
+ g->cur_ret_ptr = LLVMBuildLoad(g->builder, cur_ret_ptr_ptr, "");
+ }
+ uint32_t trace_field_index_stack = UINT32_MAX;
+ if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
+ trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
+ g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack, "");
+ }
+
+ LLVMValueRef resume_index = LLVMBuildLoad(g->builder, resume_index_ptr, "");
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, resume_index, bad_resume_block, 4);
+ g->cur_async_switch_instr = switch_instr;
+
+ LLVMValueRef zero = LLVMConstNull(usize_type_ref);
+ IrBasicBlock *entry_block = executable->basic_block_list.at(0);
+ LLVMAddCase(switch_instr, zero, entry_block->llvm_block);
+ g->cur_resume_block_count += 1;
+ LLVMPositionBuilderAtEnd(g->builder, entry_block->llvm_block);
+ if (trace_field_index_stack != UINT32_MAX) {
+ if (codegen_fn_has_err_ret_tracing_arg(g, fn_type_id->return_type)) {
+ LLVMValueRef trace_ptr_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ frame_index_trace_arg(g, fn_type_id->return_type), "");
+ LLVMValueRef zero_ptr = LLVMConstNull(LLVMGetElementType(LLVMTypeOf(trace_ptr_ptr)));
+ LLVMBuildStore(g->builder, zero_ptr, trace_ptr_ptr);
+ }
+
+ LLVMValueRef trace_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack, "");
+ LLVMValueRef addrs_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
+ trace_field_index_stack + 1, "");
+
+ gen_init_stack_trace(g, trace_field_ptr, addrs_field_ptr);
+ }
+ render_async_var_decls(g, entry_block->instruction_list.at(0)->scope);
+ } else {
+ // create debug variable declarations for parameters
+ // rely on the first variables in the variable_list being parameters.
+ FnWalk fn_walk_init = {};
+ fn_walk_init.id = FnWalkIdInits;
+ fn_walk_init.data.inits.fn = fn_table_entry;
+ fn_walk_init.data.inits.llvm_fn = fn;
+ fn_walk_init.data.inits.gen_i = gen_i_init;
+ walk_function_params(g, fn_table_entry->type_entry, &fn_walk_init);
+ }
ir_render(g, fn_table_entry);
@@ -6893,8 +7192,6 @@ static void do_code_gen(CodeGen *g) {
LLVMDumpModule(g->module);
}
- // in release mode, we're sooooo confident that we've generated correct ir,
- // that we skip the verify module step in order to get better performance.
#ifndef NDEBUG
char *error = nullptr;
LLVMVerifyModule(g->module, LLVMAbortProcessAction, &error);
@@ -7163,16 +7460,8 @@ static void define_builtin_types(CodeGen *g) {
g->primitive_type_table.put(&entry->name, entry);
}
- {
- ZigType *entry = get_promise_type(g, nullptr);
- g->primitive_type_table.put(&entry->name, entry);
- entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
- entry->abi_align = g->builtin_types.entry_usize->abi_align;
- entry->abi_size = g->builtin_types.entry_usize->abi_size;
- }
}
-
static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) {
BuiltinFnEntry *builtin_fn = allocate<BuiltinFnEntry>(1);
buf_init_from_str(&builtin_fn->name, name);
@@ -7185,8 +7474,6 @@ static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char
static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0);
create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0);
- create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
- create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0);
create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3);
create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3);
create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1);
@@ -7262,13 +7549,13 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2);
create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2);
create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2);
- //Needs library support on Windows
- //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
+ create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
+ create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2);
create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2);
@@ -7287,6 +7574,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdThis, "This", 0);
create_builtin_fn(g, BuiltinFnIdHasDecl, "hasDecl", 2);
create_builtin_fn(g, BuiltinFnIdUnionInit, "unionInit", 3);
+ create_builtin_fn(g, BuiltinFnIdFrameHandle, "frame", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameType, "Frame", 1);
+ create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
+ create_builtin_fn(g, BuiltinFnIdFrameSize, "frameSize", 1);
}
static const char *bool_to_str(bool b) {
@@ -7598,7 +7889,8 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" BoundFn: Fn,\n"
" ArgTuple: void,\n"
" Opaque: void,\n"
- " Promise: Promise,\n"
+ " Frame: void,\n"
+ " AnyFrame: AnyFrame,\n"
" Vector: Vector,\n"
" EnumLiteral: void,\n"
"\n\n"
@@ -7711,11 +8003,10 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" is_generic: bool,\n"
" is_var_args: bool,\n"
" return_type: ?type,\n"
- " async_allocator_type: ?type,\n"
" args: []FnArg,\n"
" };\n"
"\n"
- " pub const Promise = struct {\n"
+ " pub const AnyFrame = struct {\n"
" child: ?type,\n"
" };\n"
"\n"
@@ -8308,6 +8599,12 @@ void add_cc_args(CodeGen *g, ZigList<const char *> &args, const char *out_dep_pa
args.append("-g");
}
+ if (codegen_have_frame_pointer(g)) {
+ args.append("-fno-omit-frame-pointer");
+ } else {
+ args.append("-fomit-frame-pointer");
+ }
+
switch (g->build_mode) {
case BuildModeDebug:
// windows c runtime requires -D_DEBUG if using debug libraries
@@ -8320,7 +8617,6 @@ void add_cc_args(CodeGen *g, ZigList<const char *> &args, const char *out_dep_pa
} else {
args.append("-fno-stack-protector");
}
- args.append("-fno-omit-frame-pointer");
break;
case BuildModeSafeRelease:
// See the comment in the BuildModeFastRelease case for why we pass -O2 rather
@@ -8334,7 +8630,6 @@ void add_cc_args(CodeGen *g, ZigList<const char *> &args, const char *out_dep_pa
} else {
args.append("-fno-stack-protector");
}
- args.append("-fomit-frame-pointer");
break;
case BuildModeFastRelease:
args.append("-DNDEBUG");
@@ -8345,13 +8640,11 @@ void add_cc_args(CodeGen *g, ZigList<const char *> &args, const char *out_dep_pa
// running in -O2 and thus the -O3 path has been tested less.
args.append("-O2");
args.append("-fno-stack-protector");
- args.append("-fomit-frame-pointer");
break;
case BuildModeSmallRelease:
args.append("-DNDEBUG");
args.append("-Os");
args.append("-fno-stack-protector");
- args.append("-fomit-frame-pointer");
break;
}
@@ -8878,7 +9171,8 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
case ZigTypeIdArgTuple:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdVoid:
case ZigTypeIdUnreachable:
@@ -9062,7 +9356,8 @@ static void get_c_type(CodeGen *g, GenH *gen_h, ZigType *type_entry, Buf *out_bu
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
}
}
@@ -9229,9 +9524,11 @@ static void gen_h_file(CodeGen *g) {
case ZigTypeIdArgTuple:
case ZigTypeIdOptional:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
zig_unreachable();
+
case ZigTypeIdEnum:
if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
fprintf(out_h, "enum %s {\n", buf_ptr(type_h_name(type_entry)));
@@ -9770,3 +10067,18 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget
return g;
}
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type) {
+ return g->have_err_ret_tracing &&
+ (return_type->id == ZigTypeIdErrorUnion ||
+ return_type->id == ZigTypeIdErrorSet);
+}
+
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async) {
+ if (is_async) {
+ return g->have_err_ret_tracing && (fn->calls_or_awaits_errorable_fn ||
+ codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type));
+ } else {
+ return g->have_err_ret_tracing && fn->calls_or_awaits_errorable_fn &&
+ !codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
+ }
+}
diff --git a/src/codegen.hpp b/src/codegen.hpp
index cdff61a26f..794a0fd5a6 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -61,5 +61,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g);
TargetSubsystem detect_subsystem(CodeGen *g);
void codegen_release_caches(CodeGen *codegen);
+bool codegen_fn_has_err_ret_tracing_arg(CodeGen *g, ZigType *return_type);
+bool codegen_fn_has_err_ret_tracing_stack(CodeGen *g, ZigFn *fn, bool is_async);
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index fde2b972f8..ddaf82893a 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -26,6 +26,7 @@ struct IrBuilder {
CodeGen *codegen;
IrExecutable *exec;
IrBasicBlock *current_basic_block;
+ AstNode *main_block_node;
};
struct IrAnalyze {
@@ -99,7 +100,6 @@ struct ConstCastOnly {
ConstCastErrUnionErrSetMismatch *error_union_error_set;
ConstCastTypeMismatch *type_mismatch;
ConstCastOnly *return_type;
- ConstCastOnly *async_allocator_type;
ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
@@ -305,6 +305,7 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdBoundFn:
case ZigTypeIdErrorSet:
case ZigTypeIdOpaque:
+ case ZigTypeIdAnyFrame:
return true;
case ZigTypeIdFloat:
return a->data.floating.bit_count == b->data.floating.bit_count;
@@ -319,8 +320,8 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) {
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
return false;
}
zig_unreachable();
@@ -565,8 +566,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArrayType *) {
return IrInstructionIdArrayType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseType *) {
- return IrInstructionIdPromiseType;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAnyFrameType *) {
+ return IrInstructionIdAnyFrameType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceType *) {
@@ -761,8 +762,20 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *)
return IrInstructionIdFrameAddress;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) {
- return IrInstructionIdHandle;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameHandle *) {
+ return IrInstructionIdFrameHandle;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameType *) {
+ return IrInstructionIdFrameType;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeSrc *) {
+ return IrInstructionIdFrameSizeSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameSizeGen *) {
+ return IrInstructionIdFrameSizeGen;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) {
@@ -933,10 +946,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionResetResult *) {
return IrInstructionIdResetResult;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionResultPtr *) {
- return IrInstructionIdResultPtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrOfArrayToSlice *) {
return IrInstructionIdPtrOfArrayToSlice;
}
@@ -961,62 +970,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) {
return IrInstructionIdErrorUnion;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) {
- return IrInstructionIdCancel;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAllocator *) {
- return IrInstructionIdGetImplicitAllocator;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroId *) {
- return IrInstructionIdCoroId;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAlloc *) {
- return IrInstructionIdCoroAlloc;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSize *) {
- return IrInstructionIdCoroSize;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) {
- return IrInstructionIdCoroBegin;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) {
- return IrInstructionIdCoroAllocFail;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSuspend *) {
- return IrInstructionIdCoroSuspend;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroEnd *) {
- return IrInstructionIdCoroEnd;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroFree *) {
- return IrInstructionIdCoroFree;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) {
- return IrInstructionIdCoroResume;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) {
- return IrInstructionIdCoroSave;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroPromise *) {
- return IrInstructionIdCoroPromise;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) {
- return IrInstructionIdCoroAllocHelper;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
@@ -1025,14 +978,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
return IrInstructionIdAtomicLoad;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) {
- return IrInstructionIdPromiseResultType;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitBookkeeping *) {
- return IrInstructionIdAwaitBookkeeping;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
return IrInstructionIdSaveErrRetAddr;
}
@@ -1041,14 +986,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAddImplicitRetur
return IrInstructionIdAddImplicitReturnType;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMergeErrRetTraces *) {
- return IrInstructionIdMergeErrRetTraces;
-}
-
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTracePtr *) {
- return IrInstructionIdMarkErrRetTracePtr;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatOp *) {
return IrInstructionIdFloatOp;
}
@@ -1097,6 +1034,34 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionInitNamedFi
return IrInstructionIdUnionInitNamedField;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendBegin *) {
+ return IrInstructionIdSuspendBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSuspendFinish *) {
+ return IrInstructionIdSuspendFinish;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitSrc *) {
+ return IrInstructionIdAwaitSrc;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAwaitGen *) {
+ return IrInstructionIdAwaitGen;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionResume *) {
+ return IrInstructionIdResume;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillBegin *) {
+ return IrInstructionIdSpillBegin;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSpillEnd *) {
+ return IrInstructionIdSpillEnd;
+}
+
template<typename T>
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate<T>(1);
@@ -1149,14 +1114,14 @@ static IrInstruction *ir_build_cond_br(IrBuilder *irb, Scope *scope, AstNode *so
}
static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *return_value)
+ IrInstruction *operand)
{
IrInstructionReturn *return_instruction = ir_build_instruction<IrInstructionReturn>(irb, scope, source_node);
return_instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
return_instruction->base.value.special = ConstValSpecialStatic;
- return_instruction->value = return_value;
+ return_instruction->operand = operand;
- if (return_value != nullptr) ir_ref_instruction(return_value, irb->current_basic_block);
+ if (operand != nullptr) ir_ref_instruction(operand, irb->current_basic_block);
return &return_instruction->base;
}
@@ -1214,14 +1179,6 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode
return &const_instruction->base;
}
-static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) {
- IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, source_node);
- const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8;
- const_instruction->base.value.special = ConstValSpecialStatic;
- bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value);
- return &const_instruction->base;
-}
-
static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigType *type_entry)
{
@@ -1429,7 +1386,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator,
+ bool is_comptime, FnInline fn_inline, bool is_async,
IrInstruction *new_stack, ResultLoc *result_loc)
{
IrInstructionCallSrc *call_instruction = ir_build_instruction<IrInstructionCallSrc>(irb, scope, source_node);
@@ -1440,22 +1397,24 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], irb->current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, irb->current_basic_block);
+ if (is_async && new_stack != nullptr) {
+ // in this case the arg at the end is the return pointer
+ ir_ref_instruction(args[arg_count], irb->current_basic_block);
+ }
if (new_stack != nullptr) ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
}
-static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *new_stack,
+ FnInline fn_inline, bool is_async, IrInstruction *new_stack,
IrInstruction *result_loc, ZigType *return_type)
{
IrInstructionCallGen *call_instruction = ir_build_instruction<IrInstructionCallGen>(&ira->new_irb,
@@ -1467,18 +1426,16 @@ static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_in
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
- call_instruction->async_allocator = async_allocator;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
if (fn_ref != nullptr) ir_ref_instruction(fn_ref, ira->new_irb.current_basic_block);
for (size_t i = 0; i < arg_count; i += 1)
ir_ref_instruction(args[i], ira->new_irb.current_basic_block);
- if (async_allocator != nullptr) ir_ref_instruction(async_allocator, ira->new_irb.current_basic_block);
if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
- return &call_instruction->base;
+ return call_instruction;
}
static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source_node,
@@ -1754,17 +1711,16 @@ static IrInstruction *ir_build_array_type(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstruction *ir_build_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *payload_type)
{
- IrInstructionPromiseType *instruction = ir_build_instruction<IrInstructionPromiseType>(irb, scope, source_node);
+ IrInstructionAnyFrameType *instruction = ir_build_instruction<IrInstructionAnyFrameType>(irb, scope, source_node);
instruction->payload_type = payload_type;
if (payload_type != nullptr) ir_ref_instruction(payload_type, irb->current_basic_block);
return &instruction->base;
}
-
static IrInstruction *ir_build_slice_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, bool is_allow_zero)
{
@@ -2443,7 +2399,35 @@ static IrInstruction *ir_build_frame_address(IrBuilder *irb, Scope *scope, AstNo
}
static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionHandle *instruction = ir_build_instruction<IrInstructionHandle>(irb, scope, source_node);
+ IrInstructionFrameHandle *instruction = ir_build_instruction<IrInstructionFrameHandle>(irb, scope, source_node);
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameType *instruction = ir_build_instruction<IrInstructionFrameType>(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_size_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn) {
+ IrInstructionFrameSizeSrc *instruction = ir_build_instruction<IrInstructionFrameSizeSrc>(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_frame_size_gen(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *fn)
+{
+ IrInstructionFrameSizeGen *instruction = ir_build_instruction<IrInstructionFrameSizeGen>(irb, scope, source_node);
+ instruction->fn = fn;
+
+ ir_ref_instruction(fn, irb->current_basic_block);
+
return &instruction->base;
}
@@ -2546,11 +2530,12 @@ static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *s
}
static IrInstruction *ir_build_test_err_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *base_ptr, bool resolve_err_set)
+ IrInstruction *base_ptr, bool resolve_err_set, bool base_ptr_is_payload)
{
IrInstructionTestErrSrc *instruction = ir_build_instruction<IrInstructionTestErrSrc>(irb, scope, source_node);
instruction->base_ptr = base_ptr;
instruction->resolve_err_set = resolve_err_set;
+ instruction->base_ptr_is_payload = base_ptr_is_payload;
ir_ref_instruction(base_ptr, irb->current_basic_block);
@@ -2596,13 +2581,12 @@ static IrInstruction *ir_build_unwrap_err_payload(IrBuilder *irb, Scope *scope,
static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction **param_types, IrInstruction *align_value, IrInstruction *return_type,
- IrInstruction *async_allocator_type_value, bool is_var_args)
+ bool is_var_args)
{
IrInstructionFnProto *instruction = ir_build_instruction<IrInstructionFnProto>(irb, scope, source_node);
instruction->param_types = param_types;
instruction->align_value = align_value;
instruction->return_type = return_type;
- instruction->async_allocator_type_value = async_allocator_type_value;
instruction->is_var_args = is_var_args;
assert(source_node->type == NodeTypeFnProto);
@@ -2612,7 +2596,6 @@ static IrInstruction *ir_build_fn_proto(IrBuilder *irb, Scope *scope, AstNode *s
if (param_types[i] != nullptr) ir_ref_instruction(param_types[i], irb->current_basic_block);
}
if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block);
- if (async_allocator_type_value != nullptr) ir_ref_instruction(async_allocator_type_value, irb->current_basic_block);
ir_ref_instruction(return_type, irb->current_basic_block);
return &instruction->base;
@@ -2994,18 +2977,6 @@ static IrInstruction *ir_build_reset_result(IrBuilder *irb, Scope *scope, AstNod
return &instruction->base;
}
-static IrInstruction *ir_build_result_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ResultLoc *result_loc, IrInstruction *result)
-{
- IrInstructionResultPtr *instruction = ir_build_instruction<IrInstructionResultPtr>(irb, scope, source_node);
- instruction->result_loc = result_loc;
- instruction->result = result;
-
- ir_ref_instruction(result, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_opaque_type(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionOpaqueType *instruction = ir_build_instruction<IrInstructionOpaqueType>(irb, scope, source_node);
@@ -3056,149 +3027,6 @@ static IrInstruction *ir_build_error_union(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *target)
-{
- IrInstructionCancel *instruction = ir_build_instruction<IrInstructionCancel>(irb, scope, source_node);
- instruction->target = target;
-
- ir_ref_instruction(target, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node,
- ImplicitAllocatorId id)
-{
- IrInstructionGetImplicitAllocator *instruction = ir_build_instruction<IrInstructionGetImplicitAllocator>(irb, scope, source_node);
- instruction->id = id;
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_ptr) {
- IrInstructionCoroId *instruction = ir_build_instruction<IrInstructionCoroId>(irb, scope, source_node);
- instruction->promise_ptr = promise_ptr;
-
- ir_ref_instruction(promise_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id) {
- IrInstructionCoroAlloc *instruction = ir_build_instruction<IrInstructionCoroAlloc>(irb, scope, source_node);
- instruction->coro_id = coro_id;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_size(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroSize *instruction = ir_build_instruction<IrInstructionCoroSize>(irb, scope, source_node);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id, IrInstruction *coro_mem_ptr) {
- IrInstructionCoroBegin *instruction = ir_build_instruction<IrInstructionCoroBegin>(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_mem_ptr = coro_mem_ptr;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_mem_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_val) {
- IrInstructionCoroAllocFail *instruction = ir_build_instruction<IrInstructionCoroAllocFail>(irb, scope, source_node);
- instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
- instruction->base.value.special = ConstValSpecialStatic;
- instruction->err_val = err_val;
-
- ir_ref_instruction(err_val, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_suspend(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *save_point, IrInstruction *is_final)
-{
- IrInstructionCoroSuspend *instruction = ir_build_instruction<IrInstructionCoroSuspend>(irb, scope, source_node);
- instruction->save_point = save_point;
- instruction->is_final = is_final;
-
- if (save_point != nullptr) ir_ref_instruction(save_point, irb->current_basic_block);
- ir_ref_instruction(is_final, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_end(IrBuilder *irb, Scope *scope, AstNode *source_node) {
- IrInstructionCoroEnd *instruction = ir_build_instruction<IrInstructionCoroEnd>(irb, scope, source_node);
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_free(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_id, IrInstruction *coro_handle)
-{
- IrInstructionCoroFree *instruction = ir_build_instruction<IrInstructionCoroFree>(irb, scope, source_node);
- instruction->coro_id = coro_id;
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_id, irb->current_basic_block);
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *awaiter_handle)
-{
- IrInstructionCoroResume *instruction = ir_build_instruction<IrInstructionCoroResume>(irb, scope, source_node);
- instruction->awaiter_handle = awaiter_handle;
-
- ir_ref_instruction(awaiter_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroSave *instruction = ir_build_instruction<IrInstructionCoroSave>(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_handle)
-{
- IrInstructionCoroPromise *instruction = ir_build_instruction<IrInstructionCoroPromise>(irb, scope, source_node);
- instruction->coro_handle = coro_handle;
-
- ir_ref_instruction(coro_handle, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *realloc_fn, IrInstruction *coro_size)
-{
- IrInstructionCoroAllocHelper *instruction = ir_build_instruction<IrInstructionCoroAllocHelper>(irb, scope, source_node);
- instruction->realloc_fn = realloc_fn;
- instruction->coro_size = coro_size;
-
- ir_ref_instruction(realloc_fn, irb->current_basic_block);
- ir_ref_instruction(coro_size, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand,
IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering)
@@ -3238,28 +3066,6 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_type)
-{
- IrInstructionPromiseResultType *instruction = ir_build_instruction<IrInstructionPromiseResultType>(irb, scope, source_node);
- instruction->promise_type = promise_type;
-
- ir_ref_instruction(promise_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_await_bookkeeping(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *promise_result_type)
-{
- IrInstructionAwaitBookkeeping *instruction = ir_build_instruction<IrInstructionAwaitBookkeeping>(irb, scope, source_node);
- instruction->promise_result_type = promise_result_type;
-
- ir_ref_instruction(promise_result_type, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) {
IrInstructionSaveErrRetAddr *instruction = ir_build_instruction<IrInstructionSaveErrRetAddr>(irb, scope, source_node);
return &instruction->base;
@@ -3276,30 +3082,6 @@ static IrInstruction *ir_build_add_implicit_return_type(IrBuilder *irb, Scope *s
return &instruction->base;
}
-static IrInstruction *ir_build_merge_err_ret_traces(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *coro_promise_ptr, IrInstruction *src_err_ret_trace_ptr, IrInstruction *dest_err_ret_trace_ptr)
-{
- IrInstructionMergeErrRetTraces *instruction = ir_build_instruction<IrInstructionMergeErrRetTraces>(irb, scope, source_node);
- instruction->coro_promise_ptr = coro_promise_ptr;
- instruction->src_err_ret_trace_ptr = src_err_ret_trace_ptr;
- instruction->dest_err_ret_trace_ptr = dest_err_ret_trace_ptr;
-
- ir_ref_instruction(coro_promise_ptr, irb->current_basic_block);
- ir_ref_instruction(src_err_ret_trace_ptr, irb->current_basic_block);
- ir_ref_instruction(dest_err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
-static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_ret_trace_ptr) {
- IrInstructionMarkErrRetTracePtr *instruction = ir_build_instruction<IrInstructionMarkErrRetTracePtr>(irb, scope, source_node);
- instruction->err_ret_trace_ptr = err_ret_trace_ptr;
-
- ir_ref_instruction(err_ret_trace_ptr, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_has_decl(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *container, IrInstruction *name)
{
@@ -3435,7 +3217,7 @@ static IrInstruction *ir_build_alloca_src(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstructionAllocaGen *ir_create_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionAllocaGen *ir_build_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction,
uint32_t align, const char *name_hint)
{
IrInstructionAllocaGen *instruction = ir_create_instruction<IrInstructionAllocaGen>(&ira->new_irb,
@@ -3459,6 +3241,87 @@ static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
+static IrInstructionSuspendBegin *ir_build_suspend_begin(IrBuilder *irb, Scope *scope, AstNode *source_node) {
+ IrInstructionSuspendBegin *instruction = ir_build_instruction<IrInstructionSuspendBegin>(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+
+ return instruction;
+}
+
+static IrInstruction *ir_build_suspend_finish(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSuspendBegin *begin)
+{
+ IrInstructionSuspendFinish *instruction = ir_build_instruction<IrInstructionSuspendFinish>(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->begin = begin;
+
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *frame, ResultLoc *result_loc)
+{
+ IrInstructionAwaitSrc *instruction = ir_build_instruction<IrInstructionAwaitSrc>(irb, scope, source_node);
+ instruction->frame = frame;
+ instruction->result_loc = result_loc;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+ IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc)
+{
+ IrInstructionAwaitGen *instruction = ir_build_instruction<IrInstructionAwaitGen>(&ira->new_irb,
+ source_instruction->scope, source_instruction->source_node);
+ instruction->base.value.type = result_type;
+ instruction->frame = frame;
+ instruction->result_loc = result_loc;
+
+ ir_ref_instruction(frame, ira->new_irb.current_basic_block);
+ if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
+ IrInstructionResume *instruction = ir_build_instruction<IrInstructionResume>(irb, scope, source_node);
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->frame = frame;
+
+ ir_ref_instruction(frame, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstructionSpillBegin *ir_build_spill_begin(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand, SpillId spill_id)
+{
+ IrInstructionSpillBegin *instruction = ir_build_instruction<IrInstructionSpillBegin>(irb, scope, source_node);
+ instruction->base.value.special = ConstValSpecialStatic;
+ instruction->base.value.type = irb->codegen->builtin_types.entry_void;
+ instruction->operand = operand;
+ instruction->spill_id = spill_id;
+
+ ir_ref_instruction(operand, irb->current_basic_block);
+
+ return instruction;
+}
+
+static IrInstruction *ir_build_spill_end(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstructionSpillBegin *begin)
+{
+ IrInstructionSpillEnd *instruction = ir_build_instruction<IrInstructionSpillEnd>(irb, scope, source_node);
+ instruction->begin = begin;
+
+ ir_ref_instruction(&begin->base, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
@@ -3489,7 +3352,6 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3545,7 +3407,6 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o
continue;
case ScopeIdDeferExpr:
case ScopeIdCImport:
- case ScopeIdCoroPrelude:
zig_unreachable();
}
}
@@ -3588,66 +3449,6 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
return nullptr;
}
-static bool exec_is_async(IrExecutable *exec) {
- ZigFn *fn_entry = exec_fn_entry(exec);
- return fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
-}
-
-static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value,
- bool is_generated_code)
-{
- ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
-
- bool is_async = exec_is_async(irb->exec);
- if (!is_async) {
- IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value);
- return_inst->is_gen = is_generated_code;
- return return_inst;
- }
-
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
- IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter");
- IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled");
-
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
-
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
- IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
- ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, check_canceled_block);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime);
-}
-
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) {
assert(node->type == NodeTypeReturnExpr);
@@ -3689,57 +3490,58 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
return_value = ir_build_const_void(irb, scope, node);
}
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value));
+
size_t defer_counts[2];
ir_count_defers(irb, scope, outer_scope, defer_counts);
bool have_err_defers = defer_counts[ReturnKindError] > 0;
- if (have_err_defers || irb->codegen->have_err_ret_tracing) {
- IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
- IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
- if (!have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
+ if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
+ // only generate unconditional defers
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
+ result_loc_ret->base.source_instruction = result;
+ return result;
+ }
+ bool should_inline = ir_should_inline(irb->exec, scope);
- IrInstruction *ret_ptr = ir_build_result_ptr(irb, scope, node, &result_loc_ret->base,
- return_value);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, ret_ptr, false);
+ IrBasicBlock *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
- bool should_inline = ir_should_inline(irb->exec, scope);
- IrInstruction *is_comptime;
- if (should_inline) {
- is_comptime = ir_build_const_bool(irb, scope, node, true);
- } else {
- is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
- }
+ if (!have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, false);
+ }
- ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
- IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
- ir_set_cursor_at_end_and_append_block(irb, err_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- }
- if (irb->codegen->have_err_ret_tracing && !should_inline) {
- ir_build_save_err_ret_addr(irb, scope, node);
- }
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+ IrInstruction *is_comptime;
+ if (should_inline) {
+ is_comptime = ir_build_const_bool(irb, scope, node, should_inline);
+ } else {
+ is_comptime = ir_build_test_comptime(irb, scope, node, is_err);
+ }
- ir_set_cursor_at_end_and_append_block(irb, ok_block);
- if (have_err_defers) {
- ir_gen_defers_for_block(irb, scope, outer_scope, false);
- }
- ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime));
+ IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
- ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
- result_loc_ret->base.source_instruction = result;
- return result;
- } else {
- // generate unconditional defers
+ ir_set_cursor_at_end_and_append_block(irb, err_block);
+ if (have_err_defers) {
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ }
+ if (irb->codegen->have_err_ret_tracing && !should_inline) {
+ ir_build_save_err_ret_addr(irb, scope, node);
+ }
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ok_block);
+ if (have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, false);
- IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false);
- result_loc_ret->base.source_instruction = result;
- return result;
}
+ ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
+ IrInstruction *result = ir_build_return(irb, scope, node, return_value);
+ result_loc_ret->base.source_instruction = result;
+ return result;
}
case ReturnKindError:
{
@@ -3747,7 +3549,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true);
+ IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true, false);
IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn");
IrBasicBlock *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue");
@@ -3761,19 +3563,21 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err_val, return_block, continue_block, is_comptime));
ir_set_cursor_at_end_and_append_block(irb, return_block);
+ IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
+ IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, err_val));
+ IrInstructionSpillBegin *spill_begin = ir_build_spill_begin(irb, scope, node, err_val,
+ SpillIdRetErrCode);
+ ResultLocReturn *result_loc_ret = allocate<ResultLocReturn>(1);
+ result_loc_ret->base.id = ResultLocIdReturn;
+ ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
+ ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) {
- IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr);
- IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
-
- ResultLocReturn *result_loc_ret = allocate<ResultLocReturn>(1);
- result_loc_ret->base.id = ResultLocIdReturn;
- ir_build_reset_result(irb, scope, node, &result_loc_ret->base);
- ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base);
-
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr(irb, scope, node);
}
- IrInstruction *ret_inst = ir_gen_async_return(irb, scope, node, err_val, false);
+ err_val = ir_build_spill_end(irb, scope, node, spill_begin);
+ IrInstruction *ret_inst = ir_build_return(irb, scope, node, err_val);
result_loc_ret->base.source_instruction = ret_inst;
}
@@ -3971,18 +3775,31 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
incoming_values.append(else_expr_result);
}
- if (block_node->data.block.name != nullptr) {
+ bool is_return_from_fn = block_node == irb->main_block_node;
+ if (!is_return_from_fn) {
ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ }
+
+ IrInstruction *result;
+ if (block_node->data.block.name != nullptr) {
ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime));
ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length,
incoming_blocks.items, incoming_values.items, scope_block->peer_parent);
- return ir_expr_wrap(irb, parent_scope, phi, result_loc);
+ result = ir_expr_wrap(irb, parent_scope, phi, result_loc);
} else {
- ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
IrInstruction *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node));
- return ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
+ result = ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc);
}
+ if (!is_return_from_fn)
+ return result;
+
+ // no need for save_err_ret_addr because this cannot return error
+ // only generate unconditional defers
+
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, child_scope, block_node, result));
+ ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false);
+ return ir_mark_gen(ir_build_return(irb, child_scope, result->source_node, result));
}
static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
@@ -4561,8 +4378,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- bool is_async = exec_is_async(irb->exec);
-
switch (builtin_fn->id) {
case BuiltinFnIdInvalid:
zig_unreachable();
@@ -5185,16 +5000,30 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval, result_loc);
case BuiltinFnIdFrameAddress:
return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc);
- case BuiltinFnIdHandle:
+ case BuiltinFnIdFrameHandle:
if (!irb->exec->fn_entry) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition"));
- return irb->codegen->invalid_instruction;
- }
- if (!is_async) {
- add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function"));
+ add_node_error(irb->codegen, node, buf_sprintf("@frame() called outside of function definition"));
return irb->codegen->invalid_instruction;
}
return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc);
+ case BuiltinFnIdFrameType: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_type = ir_build_frame_type(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_type, lval, result_loc);
+ }
+ case BuiltinFnIdFrameSize: {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *frame_size = ir_build_frame_size_src(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, frame_size, lval, result_loc);
+ }
case BuiltinFnIdAlignOf:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -5395,13 +5224,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- fn_inline, false, nullptr, nullptr, result_loc);
+ fn_inline, false, nullptr, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdNewStackCall:
{
- if (node->data.fn_call_expr.params.length == 0) {
- add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
+ if (node->data.fn_call_expr.params.length < 2) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least 2 arguments, found %" ZIG_PRI_usize,
+ node->data.fn_call_expr.params.length));
return irb->codegen->invalid_instruction;
}
@@ -5426,7 +5257,51 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, false, nullptr, new_stack, result_loc);
+ FnInlineAuto, false, new_stack, result_loc);
+ return ir_lval_wrap(irb, scope, call, lval, result_loc);
+ }
+ case BuiltinFnIdAsyncCall:
+ {
+ size_t arg_offset = 3;
+ if (node->data.fn_call_expr.params.length < arg_offset) {
+ add_node_error(irb->codegen, node,
+ buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
+ arg_offset, node->data.fn_call_expr.params.length));
+ return irb->codegen->invalid_instruction;
+ }
+
+ AstNode *bytes_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope);
+ if (bytes == irb->codegen->invalid_instruction)
+ return bytes;
+
+ AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
+ if (ret_ptr == irb->codegen->invalid_instruction)
+ return ret_ptr;
+
+ AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2);
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ size_t arg_count = node->data.fn_call_expr.params.length - arg_offset;
+
+ // last "arg" is return pointer
+ IrInstruction **args = allocate<IrInstruction*>(arg_count + 1);
+
+ for (size_t i = 0; i < arg_count; i += 1) {
+ AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset);
+ IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
+ if (arg == irb->codegen->invalid_instruction)
+ return arg;
+ args[i] = arg;
+ }
+
+ args[arg_count] = ret_ptr;
+
+ IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, true, bytes, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdTypeId:
@@ -5731,17 +5606,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
}
bool is_async = node->data.fn_call_expr.is_async;
- IrInstruction *async_allocator = nullptr;
- if (is_async) {
- if (node->data.fn_call_expr.async_allocator) {
- async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope);
- if (async_allocator == irb->codegen->invalid_instruction)
- return async_allocator;
- }
- }
-
- IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto,
- is_async, async_allocator, nullptr, result_loc);
+ IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, is_async, nullptr, result_loc);
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}
@@ -6254,7 +6120,8 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
LValPtr, nullptr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr,
+ true, false);
IrBasicBlock *after_cond_block = irb->current_basic_block;
IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node));
IrInstruction *cond_br_inst;
@@ -6762,10 +6629,10 @@ static IrInstruction *ir_gen_array_type(IrBuilder *irb, Scope *scope, AstNode *n
}
}
-static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypePromiseType);
+static IrInstruction *ir_gen_anyframe_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypeAnyFrameType);
- AstNode *payload_type_node = node->data.promise_type.payload_type;
+ AstNode *payload_type_node = node->data.anyframe_type.payload_type;
IrInstruction *payload_type_value = nullptr;
if (payload_type_node != nullptr) {
@@ -6775,7 +6642,7 @@ static IrInstruction *ir_gen_promise_type(IrBuilder *irb, Scope *scope, AstNode
}
- return ir_build_promise_type(irb, scope, node, payload_type_value);
+ return ir_build_anyframe_type(irb, scope, node, payload_type_value);
}
static IrInstruction *ir_gen_undefined_literal(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -7070,7 +6937,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *
return err_val_ptr;
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr);
- IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true, false);
IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "TryOk");
IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "TryElse");
@@ -7686,7 +7553,7 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true);
+ IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true, false);
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, parent_scope)) {
@@ -7980,339 +7847,45 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
//return_type = nullptr;
}
- IrInstruction *async_allocator_type_value = nullptr;
- if (node->data.fn_proto.async_allocator_type != nullptr) {
- async_allocator_type_value = ir_gen_node(irb, node->data.fn_proto.async_allocator_type, parent_scope);
- if (async_allocator_type_value == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
- }
-
- return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type,
- async_allocator_type_value, is_var_args);
-}
-
-static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
- IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // set the is_canceled bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
- ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, post_return_block);
- if (cancel_awaited) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
- if (cancel_awaited) {
- if (cancel_non_suspended) {
- ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
- } else {
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
- }
- } else {
- ir_build_br(irb, scope, node, done_block, is_comptime);
- }
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
-}
-
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeCancel);
-
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
-}
-
-static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node,
- IrInstruction *target_inst)
-{
- IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended");
-
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
- IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask);
- IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
- get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
-
- // TODO relies on Zig not re-ordering fields
- IrInstruction *casted_target_inst = ir_build_ptr_cast_src(irb, scope, node, promise_T_type_val, target_inst,
- false);
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- // clear the is_suspended bit
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr,
- AtomicRmwOp_and, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_coro_resume(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, done_block, is_comptime);
-
- ir_set_cursor_at_end_and_append_block(irb, done_block);
- return ir_build_const_void(irb, scope, node);
+ return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args);
}
static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.resume_expr.expr, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_gen_resume_target(irb, scope, node, target_inst);
+ return ir_build_resume(irb, scope, node, target_inst);
}
-static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
+static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
+ ResultLoc *result_loc)
+{
assert(node->type == NodeTypeAwaitExpr);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope);
- if (target_inst == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
ZigFn *fn_entry = exec_fn_entry(irb->exec);
if (!fn_entry) {
add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
return irb->codegen->invalid_instruction;
}
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("await in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression"));
- scope_defer_expr->reported_err = true;
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot await inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("suspend block here"));
+ existing_suspend_scope->reported_err = true;
}
return irb->codegen->invalid_instruction;
}
- Scope *outer_scope = irb->exec->begin_scope;
+ IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.await_expr.expr, scope, LValPtr, nullptr);
+ if (target_inst == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst);
- Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
-
- if (irb->codegen->have_err_ret_tracing) {
- IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
- }
-
- IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
- IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
- IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
- IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
- IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
- IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock");
- IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended");
- IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended");
- IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend");
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
- IrInstruction *undef = ir_build_const_undefined(irb, scope, node);
- IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
- IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
-
- ZigVar *result_var = ir_create_var(irb, node, scope, nullptr,
- false, false, true, const_bool_false);
- IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
- IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
- ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
- IrInstruction *undef_promise_result = ir_build_implicit_cast(irb, scope, node, promise_result_type, undef, nullptr);
- build_decl_var_and_init(irb, scope, node, result_var, undef_promise_result, "result", const_bool_false);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
- ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
- IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
-
- IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
- IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false);
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
- IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, already_awaited_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to,
- // because we're about to destroy the memory. So we store it into our result variable.
- IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr);
- ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result);
- ir_build_cancel(irb, scope, node, target_inst);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
-
- ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
- IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false);
- IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, my_suspended_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block);
- IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false);
- IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false);
- ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_suspend_block);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
-
- IrInstructionSwitchBrCase *cases = allocate<IrInstructionSwitchBrCase>(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = destroy_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
- 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, destroy_block);
- ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
- ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
- IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
- IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false);
- IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
- IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
- ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
- IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
- ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false);
- ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
- ir_gen_defers_for_block(irb, scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false));
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_build_br(irb, scope, node, merge_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, merge_block);
- return ir_build_load_ptr(irb, scope, node, my_result_var_ptr);
+ IrInstruction *await_inst = ir_build_await_src(irb, scope, node, target_inst, result_loc);
+ return ir_lval_wrap(irb, scope, await_inst, lval, result_loc);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
@@ -8323,20 +7896,6 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition"));
return irb->codegen->invalid_instruction;
}
- if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) {
- add_node_error(irb->codegen, node, buf_sprintf("suspend in non-async function"));
- return irb->codegen->invalid_instruction;
- }
-
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
- if (scope_defer_expr) {
- if (!scope_defer_expr->reported_err) {
- ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
- add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
- scope_defer_expr->reported_err = true;
- }
- return irb->codegen->invalid_instruction;
- }
ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
if (existing_suspend_scope) {
if (!existing_suspend_scope->reported_err) {
@@ -8347,91 +7906,15 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
return irb->codegen->invalid_instruction;
}
- Scope *outer_scope = irb->exec->begin_scope;
-
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
- IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
- IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled");
- IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
- IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
- IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
-
- IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true);
- IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
- IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
- IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
- IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
- IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
- IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
- IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
-
- IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
- usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
- AtomicRmwOp_or, AtomicOrderSeqCst);
-
- IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
- IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, canceled_block);
- IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
- IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
- IrBasicBlock *post_canceled_block = irb->current_basic_block;
- ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
- IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
- ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
- IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block;
- ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
- IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
- IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
- ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, suspended_block);
- ir_build_unreachable(irb, parent_scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
- IrInstruction *suspend_code;
- if (node->data.suspend.block == nullptr) {
- suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false);
- } else {
- Scope *child_scope;
+ IrInstructionSuspendBegin *begin = ir_build_suspend_begin(irb, parent_scope, node);
+ if (node->data.suspend.block != nullptr) {
ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
- suspend_scope->resume_block = resume_block;
- child_scope = &suspend_scope->base;
- IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle);
- ir_gen_node(irb, node->data.suspend.block, child_scope);
- suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false));
- }
-
- IrInstructionSwitchBrCase *cases = allocate<IrInstructionSwitchBrCase>(2);
- cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0));
- cases[0].block = resume_block;
- cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1));
- cases[1].block = canceled_block;
- IrInstructionSwitchBr *switch_br = ir_build_switch_br(irb, parent_scope, node, suspend_code,
- irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
- ir_mark_gen(&switch_br->base);
-
- ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- IrBasicBlock **incoming_blocks = allocate<IrBasicBlock *>(2);
- IrInstruction **incoming_values = allocate<IrInstruction *>(2);
- incoming_blocks[0] = post_canceled_block;
- incoming_values[0] = const_bool_true;
- incoming_blocks[1] = post_cancel_awaiter_block;
- incoming_values[1] = const_bool_false;
- IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values,
- nullptr);
- ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false));
+ Scope *child_scope = &suspend_scope->base;
+ IrInstruction *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
+ }
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- return ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
+ return ir_build_suspend_finish(irb, parent_scope, node, begin);
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -8523,8 +8006,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc);
- case NodeTypePromiseType:
- return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval, result_loc);
+ case NodeTypeAnyFrameType:
+ return ir_lval_wrap(irb, scope, ir_gen_anyframe_type(irb, scope, node), lval, result_loc);
case NodeTypeStringLiteral:
return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc);
case NodeTypeUndefinedLiteral:
@@ -8561,12 +8044,10 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc);
case NodeTypeErrorSetDecl:
return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc);
- case NodeTypeCancel:
- return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc);
case NodeTypeResume:
return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc);
case NodeTypeAwaitExpr:
- return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval, result_loc);
+ return ir_gen_await_expr(irb, scope, node, lval, result_loc);
case NodeTypeSuspend:
return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc);
case NodeTypeEnumLiteral:
@@ -8626,235 +8107,22 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->codegen = codegen;
irb->exec = ir_executable;
+ irb->main_block_node = node;
IrBasicBlock *entry_block = ir_create_basic_block(irb, scope, "Entry");
ir_set_cursor_at_end_and_append_block(irb, entry_block);
// Entry block gets a reference because we enter it to begin.
ir_ref_bb(irb->current_basic_block);
- ZigFn *fn_entry = exec_fn_entry(irb->exec);
-
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- IrInstruction *coro_id;
- IrInstruction *u8_ptr_type;
- IrInstruction *const_bool_false;
- IrInstruction *coro_promise_ptr;
- IrInstruction *err_ret_trace_ptr;
- ZigType *return_type;
- Buf *result_ptr_field_name;
- ZigVar *coro_size_var;
- if (is_async) {
- // create the coro promise
- Scope *coro_scope = create_coro_prelude_scope(irb->codegen, node, scope);
- const_bool_false = ir_build_const_bool(irb, coro_scope, node, false);
- ZigVar *promise_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
-
- return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type;
- IrInstruction *undef = ir_build_const_undefined(irb, coro_scope, node);
- // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
- ZigType *coro_frame_type = get_promise_frame_type(irb->codegen, return_type);
- IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
- IrInstruction *undef_coro_frame = ir_build_implicit_cast(irb, coro_scope, node, coro_frame_type_value, undef, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, promise_var, undef_coro_frame, "promise", const_bool_false);
- coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
-
- ZigVar *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
- IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
- get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- IrInstruction *null_await_handle = ir_build_implicit_cast(irb, coro_scope, node, await_handle_type_val, null_value, nullptr);
- build_decl_var_and_init(irb, coro_scope, node, await_handle_var, null_await_handle, "await_handle", const_bool_false);
- irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
-
- u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
- get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
- IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type,
- coro_promise_ptr, false);
- coro_id = ir_build_coro_id(irb, coro_scope, node, promise_as_u8_ptr);
- coro_size_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
- IrInstruction *coro_size = ir_build_coro_size(irb, coro_scope, node);
- build_decl_var_and_init(irb, coro_scope, node, coro_size_var, coro_size, "coro_size", const_bool_false);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, coro_scope, node,
- ImplicitAllocatorIdArg);
- irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false);
- build_decl_var_and_init(irb, coro_scope, node, irb->exec->coro_allocator_var, implicit_allocator_ptr,
- "allocator", const_bool_false);
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name, false);
- IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr);
- IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size);
- IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr);
- IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, coro_scope, "AllocError");
- IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, coro_scope, "AllocOk");
- ir_build_cond_br(irb, coro_scope, node, alloc_result_is_ok, alloc_ok_block, alloc_err_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_err_block);
- // we can return undefined here, because the caller passes a pointer to the error struct field
- // in the error union result, and we populate it in case of allocation failure.
- ir_build_return(irb, coro_scope, node, undef);
-
- ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block);
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr,
- false);
- irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
-
- Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
- irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- atomic_state_field_name, false);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero);
- Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false);
- result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, irb->exec->coro_result_field_ptr);
- if (irb->codegen->have_err_ret_tracing) {
- // initialize the error return trace
- Buf *return_addresses_field_name = buf_create_from_str(RETURN_ADDRESSES_FIELD_NAME);
- IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name, false);
-
- Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false);
- ir_build_mark_err_ret_trace_ptr(irb, scope, node, err_ret_trace_ptr);
-
- // coordinate with builtin.zig
- Buf *index_name = buf_create_from_str("index");
- IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name, false);
- ir_build_store_ptr(irb, scope, node, index_ptr, zero);
-
- Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses");
- IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name, false);
-
- IrInstruction *slice_value = ir_build_slice_src(irb, scope, node, return_addresses_ptr, zero, nullptr, false, no_result_loc());
- ir_build_store_ptr(irb, scope, node, addrs_slice_ptr, slice_value);
- }
-
-
- irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal");
- irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal");
- irb->exec->coro_suspend_block = ir_create_basic_block(irb, scope, "Suspend");
- irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup");
- }
-
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr);
assert(result);
if (irb->exec->invalid)
return false;
if (!instr_is_unreachable(result)) {
+ ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, result->source_node, result));
// no need for save_err_ret_addr because this cannot return error
- ir_gen_async_return(irb, scope, result->source_node, result, true);
- }
-
- if (is_async) {
- IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume");
- IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree");
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final);
- IrInstruction *const_bool_true = ir_build_const_bool(irb, scope, node, true);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, nullptr, const_bool_true);
- IrInstructionSwitchBrCase *cases = allocate<IrInstructionSwitchBrCase>(2);
- cases[0].value = ir_build_const_u8(irb, scope, node, 0);
- cases[0].block = invalid_resume_block;
- cases[1].value = ir_build_const_u8(irb, scope, node, 1);
- cases[1].block = irb->exec->coro_final_cleanup_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block);
- ir_build_coro_end(irb, scope, node);
- ir_build_return(irb, scope, node, irb->exec->coro_handle);
-
- ir_set_cursor_at_end_and_append_block(irb, invalid_resume_block);
- ir_build_unreachable(irb, scope, node);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final);
- if (type_has_bits(return_type)) {
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr);
- IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- result_ptr, false);
- IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast_src(irb, scope, node,
- u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr, false);
- IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node,
- fn_entry->type_entry->data.fn.fn_type_id.return_type);
- IrInstruction *size_of_ret_val = ir_build_size_of(irb, scope, node, return_type_inst);
- ir_build_memcpy(irb, scope, node, result_ptr_as_u8_ptr, return_value_ptr_as_u8_ptr, size_of_ret_val);
- }
- if (irb->codegen->have_err_ret_tracing) {
- Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr);
- ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr);
- }
- // Before we destroy the coroutine frame, we need to load the target promise into
- // a register or local variable which does not get spilled into the frame,
- // otherwise llvm tries to access memory inside the destroyed frame.
- IrInstruction *unwrapped_await_handle_ptr = ir_build_optional_unwrap_ptr(irb, scope, node,
- irb->exec->await_handle_var_ptr, false, false);
- IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block);
- ir_build_br(irb, scope, node, check_free_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, check_free_block);
- IrBasicBlock **incoming_blocks = allocate<IrBasicBlock *>(2);
- IrInstruction **incoming_values = allocate<IrInstruction *>(2);
- incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- incoming_values[0] = const_bool_false;
- incoming_blocks[1] = irb->exec->coro_normal_final;
- incoming_values[1] = const_bool_true;
- IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr);
-
- IrBasicBlock **merge_incoming_blocks = allocate<IrBasicBlock *>(2);
- IrInstruction **merge_incoming_values = allocate<IrInstruction *>(2);
- merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
- merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node);
- merge_incoming_blocks[1] = irb->exec->coro_normal_final;
- merge_incoming_values[1] = await_handle_in_block;
- IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values, nullptr);
-
- Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME);
- IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
- ImplicitAllocatorIdLocalVar);
- IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name, false);
- IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
- IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
- IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
- get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, 0, 0, 0, false));
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast_src(irb, scope, node, u8_ptr_type_unknown_len,
- coro_mem_ptr_maybe, false);
- IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
- IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
- IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
- IrInstruction *mem_slice = ir_build_slice_src(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false,
- no_result_loc());
- size_t arg_count = 5;
- IrInstruction **args = allocate<IrInstruction *>(arg_count);
- args[0] = implicit_allocator_ptr; // self
- args[1] = mem_slice; // old_mem
- args[2] = ir_build_const_usize(irb, scope, node, 8); // old_align
- // TODO: intentional memory leak here. If this is set to 0 then there is an issue where a coroutine
- // calls the function and it frees its own stack frame, but then the return value is a slice, which
- // is implemented as an sret struct. writing to the return pointer causes invalid memory write.
- // We could work around it by having a global helper function which has a void return type
- // and calling that instead. But instead this hack will suffice until I rework coroutines to be
- // non-allocating. Basically coroutines are not supported right now until they are reworked.
- args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size
- args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align
- ir_build_call_src(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr,
- nullptr, no_result_loc());
-
- IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume");
- ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
-
- ir_set_cursor_at_end_and_append_block(irb, resume_block);
- ir_gen_resume_target(irb, scope, node, awaiter_handle);
- ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
+ ir_mark_gen(ir_build_return(irb, scope, result->source_node, result));
}
return true;
@@ -8871,18 +8139,24 @@ bool ir_gen_fn(CodeGen *codegen, ZigFn *fn_entry) {
return ir_gen(codegen, body_node, fn_entry->child_scope, ir_executable);
}
-static void add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
+static void ir_add_call_stack_errors(CodeGen *codegen, IrExecutable *exec, ErrorMsg *err_msg, int limit) {
if (!exec || !exec->source_node || limit < 0) return;
add_error_note(codegen, err_msg, exec->source_node, buf_sprintf("called from here"));
- add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+ ir_add_call_stack_errors(codegen, exec->parent_exec, err_msg, limit - 1);
+}
+
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text) {
+ IrInstruction *old_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index);
+ add_error_note(ira->codegen, err_msg, old_instruction->source_node, text);
+ ir_add_call_stack_errors(ira->codegen, ira->new_irb.exec, err_msg, 10);
}
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg) {
invalidate_exec(exec);
ErrorMsg *err_msg = add_node_error(codegen, source_node, msg);
if (exec->parent_exec) {
- add_call_stack_errors(codegen, exec, err_msg, 10);
+ ir_add_call_stack_errors(codegen, exec, err_msg, 10);
}
return err_msg;
}
@@ -8946,13 +8220,13 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
IrInstruction *instruction = bb->instruction_list.at(i);
if (instruction->id == IrInstructionIdReturn) {
IrInstructionReturn *ret_inst = (IrInstructionReturn *)instruction;
- IrInstruction *value = ret_inst->value;
- if (value->value.special == ConstValSpecialRuntime) {
- exec_add_error_node(codegen, exec, value->source_node,
+ IrInstruction *operand = ret_inst->operand;
+ if (operand->value.special == ConstValSpecialRuntime) {
+ exec_add_error_node(codegen, exec, operand->source_node,
buf_sprintf("unable to evaluate constant expression"));
return &codegen->invalid_instruction->value;
}
- return &value->value;
+ return &operand->value;
} else if (ir_has_side_effects(instruction)) {
if (instr_is_comptime(instruction)) {
switch (instruction->id) {
@@ -10203,12 +9477,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
- if (wanted_type == ira->codegen->builtin_types.entry_promise &&
- actual_type->id == ZigTypeIdPromise)
- {
- return result;
- }
-
// fn
if (wanted_type->id == ZigTypeIdFn &&
actual_type->id == ZigTypeIdFn)
@@ -10243,20 +9511,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, ZigType *wanted
return result;
}
}
- if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira,
- actual_type->data.fn.fn_type_id.async_allocator_type,
- wanted_type->data.fn.fn_type_id.async_allocator_type,
- source_node, false);
- if (child.id == ConstCastResultIdInvalid)
- return child;
- if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdAsyncAllocatorType;
- result.data.async_allocator_type = allocate_nonzero<ConstCastOnly>(1);
- *result.data.async_allocator_type = child;
- return result;
- }
- }
if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
@@ -10561,6 +9815,8 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
ZigType *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
ZigType *cur_err_set_type = cur_type->data.error_union.err_set_type;
+ if (prev_err_set_type == cur_err_set_type)
+ continue;
if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -11206,7 +10462,7 @@ static IrBasicBlock *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlock *old_bb,
}
static void ir_start_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrBasicBlock *const_predecessor_bb) {
- ir_assert(!old_bb->suspended, old_bb->instruction_list.at(0));
+ ir_assert(!old_bb->suspended, (old_bb->instruction_list.length != 0) ? old_bb->instruction_list.at(0) : nullptr);
ira->instruction_index = 0;
ira->old_irb.current_basic_block = old_bb;
ira->const_predecessor_bb = const_predecessor_bb;
@@ -11729,6 +10985,33 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
return result;
}
+static IrInstruction *ir_analyze_frame_ptr_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime frame pointer");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_anyframe_to_anyframe(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, ZigType *wanted_type)
+{
+ if (instr_is_comptime(value)) {
+ zig_panic("TODO comptime anyframe->T to anyframe");
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+
static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
ZigType *wanted_type, ResultLoc *result_loc)
{
@@ -12576,12 +11859,10 @@ static IrInstruction *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInstruction *sou
static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) {
if (ty->id == ZigTypeIdPointer) return ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ty->id == ZigTypeIdFn) return true;
- if (ty->id == ZigTypeIdPromise) return true;
if (ty->id == ZigTypeIdOptional) {
ZigType *ptr_ty = ty->data.maybe.child_type;
if (ptr_ty->id == ZigTypeIdPointer) return ptr_ty->data.pointer.child_type->id != ZigTypeIdPointer;
if (ptr_ty->id == ZigTypeIdFn) return true;
- if (ptr_ty->id == ZigTypeIdPromise) return true;
}
return false;
}
@@ -12829,6 +12110,29 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // *@Frame(func) to anyframe->T or anyframe
+ if (actual_type->id == ZigTypeIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == ZigTypeIdFnFrame && wanted_type->id == ZigTypeIdAnyFrame)
+ {
+ bool ok = true;
+ if (wanted_type->data.any_frame.result_type != nullptr) {
+ ZigFn *fn = actual_type->data.pointer.child_type->data.frame.fn;
+ ZigType *fn_return_type = fn->type_entry->data.fn.fn_type_id.return_type;
+ if (wanted_type->data.any_frame.result_type != fn_return_type) {
+ ok = false;
+ }
+ }
+ if (ok) {
+ return ir_analyze_frame_ptr_to_anyframe(ira, source_instr, value, wanted_type);
+ }
+ }
+
+ // anyframe->T to anyframe
+ if (actual_type->id == ZigTypeIdAnyFrame && actual_type->data.any_frame.result_type != nullptr &&
+ wanted_type->id == ZigTypeIdAnyFrame && wanted_type->data.any_frame.result_type == nullptr)
+ {
+ return ir_analyze_anyframe_to_anyframe(ira, source_instr, value, wanted_type);
+ }
// cast from null literal to maybe type
if (wanted_type->id == ZigTypeIdOptional &&
@@ -13333,11 +12637,11 @@ static IrInstruction *ir_analyze_instruction_add_implicit_return_type(IrAnalyze
}
static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructionReturn *instruction) {
- IrInstruction *value = instruction->value->child;
- if (type_is_invalid(value->value.type))
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
return ir_unreach_error(ira);
- if (!instr_is_comptime(value) && handle_is_ptr(ira->explicit_return_type)) {
+ if (!instr_is_comptime(operand) && handle_is_ptr(ira->explicit_return_type)) {
// result location mechanism took care of it.
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, nullptr);
@@ -13345,8 +12649,8 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_finish_anal(ira, result);
}
- IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->explicit_return_type);
- if (type_is_invalid(casted_value->value.type)) {
+ IrInstruction *casted_operand = ir_implicit_cast(ira, operand, ira->explicit_return_type);
+ if (type_is_invalid(casted_operand->value.type)) {
AstNode *source_node = ira->explicit_return_type_source_node;
if (source_node != nullptr) {
ErrorMsg *msg = ira->codegen->errors.last();
@@ -13356,15 +12660,16 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio
return ir_unreach_error(ira);
}
- if (casted_value->value.special == ConstValSpecialRuntime &&
- casted_value->value.type->id == ZigTypeIdPointer &&
- casted_value->value.data.rh_ptr == RuntimeHintPtrStack)
+ if (casted_operand->value.special == ConstValSpecialRuntime &&
+ casted_operand->value.type->id == ZigTypeIdPointer &&
+ casted_operand->value.data.rh_ptr == RuntimeHintPtrStack)
{
- ir_add_error(ira, casted_value, buf_sprintf("function returns address of local variable"));
+ ir_add_error(ira, casted_operand, buf_sprintf("function returns address of local variable"));
return ir_unreach_error(ira);
}
+
IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_value);
+ instruction->base.source_node, casted_operand);
result->value.type = ira->codegen->builtin_types.entry_unreachable;
return ir_finish_anal(ira, result);
}
@@ -13658,9 +12963,9 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdOpaque:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
case ZigTypeIdEnum:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdAnyFrame:
operator_allowed = is_equality_cmp;
break;
@@ -13675,6 +12980,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
case ZigTypeIdNull:
case ZigTypeIdErrorUnion:
case ZigTypeIdUnion:
+ case ZigTypeIdFnFrame:
operator_allowed = false;
break;
case ZigTypeIdOptional:
@@ -15039,7 +14345,8 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name)));
break;
@@ -15063,8 +14370,9 @@ static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructio
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
- case ZigTypeIdPromise:
case ZigTypeIdEnumLiteral:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, target,
buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name)));
break;
@@ -15091,8 +14399,8 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
+ ZigType *ptr_to_stack_trace_type = get_pointer_to_type(ira->codegen, get_stack_trace_type(ira->codegen), false);
if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
- ZigType *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
ZigType *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
IrInstruction *result = ir_const(ira, &instruction->base, optional_type);
@@ -15110,7 +14418,7 @@ static IrInstruction *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, instruction->optional);
- new_instruction->value.type = get_ptr_to_stack_trace_type(ira->codegen);
+ new_instruction->value.type = ptr_to_stack_trace_type;
return new_instruction;
}
}
@@ -15142,42 +14450,6 @@ static IrInstruction *ir_analyze_instruction_error_union(IrAnalyze *ira,
return ir_const_type(ira, &instruction->base, result_type);
}
-IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) {
- ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
- if (parent_fn_entry == nullptr) {
- ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available"));
- return ira->codegen->invalid_instruction;
- }
-
- FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id;
- if (parent_fn_type->cc != CallingConventionAsync) {
- ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter"));
- return ira->codegen->invalid_instruction;
- }
-
- assert(parent_fn_type->async_allocator_type != nullptr);
-
- switch (id) {
- case ImplicitAllocatorIdArg:
- {
- IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope,
- source_instr->source_node, ImplicitAllocatorIdArg);
- result->value.type = parent_fn_type->async_allocator_type;
- return result;
- }
- case ImplicitAllocatorIdLocalVar:
- {
- ZigVar *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
- assert(coro_allocator_var != nullptr);
- IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
- IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst, nullptr);
- assert(result->value.type != nullptr);
- return result;
- }
- }
- zig_unreachable();
-}
-
static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type,
uint32_t align, const char *name_hint, bool force_comptime)
{
@@ -15186,7 +14458,7 @@ static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_in
ConstExprValue *pointee = create_const_vals(1);
pointee->special = ConstValSpecialUndef;
- IrInstructionAllocaGen *result = ir_create_alloca_gen(ira, source_inst, align, name_hint);
+ IrInstructionAllocaGen *result = ir_build_alloca_gen(ira, source_inst, align, name_hint);
result->base.value.special = ConstValSpecialStatic;
result->base.value.data.x_ptr.special = ConstPtrSpecialRef;
result->base.value.data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer;
@@ -15283,7 +14555,7 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
return nullptr;
}
// need to return a result location and don't have one. use a stack allocation
- IrInstructionAllocaGen *alloca_gen = ir_create_alloca_gen(ira, suspend_source_instr, 0, "");
+ IrInstructionAllocaGen *alloca_gen = ir_build_alloca_gen(ira, suspend_source_instr, 0, "");
if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_instruction;
alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false,
@@ -15353,8 +14625,12 @@ static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspe
if ((err = type_resolve(ira->codegen, ira->explicit_return_type, ResolveStatusZeroBitsKnown))) {
return ira->codegen->invalid_instruction;
}
- if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type))
- return nullptr;
+ if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) {
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ if (fn_entry == nullptr || fn_entry->inferred_async_node == nullptr) {
+ return nullptr;
+ }
+ }
ZigType *ptr_return_type = get_pointer_to_type(ira->codegen, ira->explicit_return_type, false);
result_loc->written = true;
@@ -15616,48 +14892,43 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
- IrInstruction *async_allocator_inst)
+ IrInstruction *casted_new_stack)
{
- Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
- ir_assert(async_allocator_inst->value.type->id == ZigTypeIdPointer, &call_instruction->base);
- ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type;
- IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base,
- async_allocator_inst, container_type, false);
- if (type_is_invalid(field_ptr_inst->value.type)) {
- return ira->codegen->invalid_instruction;
- }
- ZigType *ptr_to_realloc_fn_type = field_ptr_inst->value.type;
- ir_assert(ptr_to_realloc_fn_type->id == ZigTypeIdPointer, &call_instruction->base);
+ if (casted_new_stack != nullptr) {
+ // this is an @asyncCall
- ZigType *realloc_fn_type = ptr_to_realloc_fn_type->data.pointer.child_type;
- if (realloc_fn_type->id != ZigTypeIdFn) {
- ir_add_error(ira, &call_instruction->base,
- buf_sprintf("expected reallocation function, found '%s'", buf_ptr(&realloc_fn_type->name)));
- return ira->codegen->invalid_instruction;
- }
+ if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
+ ir_add_error(ira, fn_ref,
+ buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child;
+ if (type_is_invalid(ret_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type);
- ZigType *realloc_fn_return_type = realloc_fn_type->data.fn.fn_type_id.return_type;
- if (realloc_fn_return_type->id != ZigTypeIdErrorUnion) {
- ir_add_error(ira, fn_ref,
- buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&realloc_fn_return_type->name)));
+ IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref,
+ arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type);
+ return &call_gen->base;
+ } else if (fn_entry == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
return ira->codegen->invalid_instruction;
}
- ZigType *alloc_fn_error_set_type = realloc_fn_return_type->data.error_union.err_set_type;
- ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
- ZigType *promise_type = get_promise_type(ira->codegen, return_type);
- ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
- IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(),
- async_return_type, nullptr, true, true, false);
- if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
+ ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
+ IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ frame_type, nullptr, true, true, false);
+ if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) {
return result_loc;
}
-
- return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, true, async_allocator_inst, nullptr, result_loc,
- async_return_type);
+ result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
+ if (type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
+ return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
+ casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base;
}
-
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
{
@@ -16004,20 +15275,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
return ira->codegen->invalid_instruction;
}
- if (fn_type_id->cc == CallingConventionAsync && !call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("must use async keyword to call async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
- if (fn_type_id->cc != CallingConventionAsync && call_instruction->is_async) {
- ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("cannot use async keyword to call non-async function"));
- if (fn_proto_node) {
- add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here"));
- }
- return ira->codegen->invalid_instruction;
- }
if (fn_type_id->is_var_args) {
@@ -16354,33 +15611,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
break;
}
}
- IrInstruction *async_allocator_inst = nullptr;
- if (call_instruction->is_async) {
- AstNode *async_allocator_type_node = fn_proto_node->data.fn_proto.async_allocator_type;
- if (async_allocator_type_node != nullptr) {
- ZigType *async_allocator_type = ir_analyze_type_expr(ira, impl_fn->child_scope, async_allocator_type_node);
- if (type_is_invalid(async_allocator_type))
- return ira->codegen->invalid_instruction;
- inst_fn_type_id.async_allocator_type = async_allocator_type;
- }
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- }
- if (inst_fn_type_id.async_allocator_type == nullptr) {
- inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type;
- }
- async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- }
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
if (existing_entry) {
@@ -16423,17 +15653,23 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
- fn_ref, casted_args, impl_param_count, async_allocator_inst);
+ nullptr, casted_args, impl_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
- assert(async_allocator_inst == nullptr);
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
+ if (impl_fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = impl_fn;
+ }
+
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- call_instruction->is_async, nullptr, casted_new_stack, result_loc,
+ false, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
ZigFn *parent_fn_entry = exec_fn_entry(ira->new_irb.exec);
@@ -16475,20 +15711,56 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *old_arg = call_instruction->args[call_i]->child;
if (type_is_invalid(old_arg->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *casted_arg;
- if (next_arg_index < src_param_count) {
- ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
- if (type_is_invalid(param_type))
- return ira->codegen->invalid_instruction;
- casted_arg = ir_implicit_cast(ira, old_arg, param_type);
- if (type_is_invalid(casted_arg->value.type))
- return ira->codegen->invalid_instruction;
+
+ if (old_arg->value.type->id == ZigTypeIdArgTuple) {
+ for (size_t arg_tuple_i = old_arg->value.data.x_arg_tuple.start_index;
+ arg_tuple_i < old_arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1)
+ {
+ ZigVar *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i);
+ if (arg_var == nullptr) {
+ ir_add_error(ira, old_arg,
+ buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
+ return ira->codegen->invalid_instruction;
+ }
+ IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, old_arg, arg_var);
+ if (type_is_invalid(arg_var_ptr_inst->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *arg_tuple_arg = ir_get_deref(ira, old_arg, arg_var_ptr_inst, nullptr);
+ if (type_is_invalid(arg_tuple_arg->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, arg_tuple_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = arg_tuple_arg;
+ }
+
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
} else {
- casted_arg = old_arg;
- }
+ IrInstruction *casted_arg;
+ if (next_arg_index < src_param_count) {
+ ZigType *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->invalid_instruction;
+ casted_arg = ir_implicit_cast(ira, old_arg, param_type);
+ if (type_is_invalid(casted_arg->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ casted_arg = old_arg;
+ }
- casted_args[next_arg_index] = casted_arg;
- next_arg_index += 1;
+ casted_args[next_arg_index] = casted_arg;
+ next_arg_index += 1;
+ }
}
assert(next_arg_index == call_param_count);
@@ -16497,32 +15769,21 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
if (type_is_invalid(return_type))
return ira->codegen->invalid_instruction;
- if (call_instruction->is_async) {
- IrInstruction *uncasted_async_allocator_inst;
- if (call_instruction->async_allocator == nullptr) {
- uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base,
- ImplicitAllocatorIdLocalVar);
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
- } else {
- uncasted_async_allocator_inst = call_instruction->async_allocator->child;
- if (type_is_invalid(uncasted_async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
-
- }
- IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type);
- if (type_is_invalid(async_allocator_inst->value.type))
- return ira->codegen->invalid_instruction;
+ if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
+ ir_add_error(ira, &call_instruction->base,
+ buf_sprintf("no-inline call of inline function"));
+ return ira->codegen->invalid_instruction;
+ }
+ if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref,
- casted_args, call_param_count, async_allocator_inst);
+ casted_args, call_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
- if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && fn_inline == FnInlineNever) {
- ir_add_error(ira, &call_instruction->base,
- buf_sprintf("no-inline call of inline function"));
- return ira->codegen->invalid_instruction;
+ if (fn_type_id->cc == CallingConventionAsync && parent_fn_entry->inferred_async_node == nullptr) {
+ parent_fn_entry->inferred_async_node = fn_ref->source_node;
+ parent_fn_entry->inferred_async_fn = fn_entry;
}
IrInstruction *result_loc;
@@ -16536,10 +15797,11 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
result_loc = nullptr;
}
- IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
- call_param_count, casted_args, fn_inline, false, nullptr, casted_new_stack,
+ IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
+ call_param_count, casted_args, fn_inline, false, casted_new_stack,
result_loc, return_type);
- return ir_finish_anal(ira, new_call_instruction);
+ parent_fn_entry->call_list.append(new_call_instruction);
+ return ir_finish_anal(ira, &new_call_instruction->base);
}
static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) {
@@ -16684,7 +15946,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source
zig_unreachable();
}
-static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
+static IrInstruction *ir_analyze_optional_type(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
Error err;
IrInstruction *value = un_op_instruction->value->child;
ZigType *type_entry = ir_resolve_type(ira, value);
@@ -16718,8 +15980,10 @@ static IrInstruction *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
- case ZigTypeIdPromise:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
return ir_const_type(ira, &un_op_instruction->base, get_optional_type(ira->codegen, type_entry));
+
case ZigTypeIdUnreachable:
case ZigTypeIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -16883,7 +16147,7 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction
return result;
}
case IrUnOpOptional:
- return ir_analyze_maybe(ira, instruction);
+ return ir_analyze_optional_type(ira, instruction);
}
zig_unreachable();
}
@@ -18443,6 +17707,20 @@ static IrInstruction *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *ir_analyze_instruction_any_frame_type(IrAnalyze *ira,
+ IrInstructionAnyFrameType *instruction)
+{
+ ZigType *payload_type = nullptr;
+ if (instruction->payload_type != nullptr) {
+ payload_type = ir_resolve_type(ira, instruction->payload_type->child);
+ if (type_is_invalid(payload_type))
+ return ira->codegen->invalid_instruction;
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, payload_type);
+ return ir_const_type(ira, &instruction->base, any_frame_type);
+}
+
static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
IrInstructionSliceType *slice_type_instruction)
{
@@ -18490,8 +17768,9 @@ static IrInstruction *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
ResolveStatus needed_status = (align_bytes == 0) ?
ResolveStatusZeroBitsKnown : ResolveStatusAlignmentKnown;
@@ -18605,8 +17884,9 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdBoundFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->invalid_instruction;
@@ -18617,22 +17897,6 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
zig_unreachable();
}
-static IrInstruction *ir_analyze_instruction_promise_type(IrAnalyze *ira, IrInstructionPromiseType *instruction) {
- ZigType *promise_type;
-
- if (instruction->payload_type == nullptr) {
- promise_type = ira->codegen->builtin_types.entry_promise;
- } else {
- ZigType *payload_type = ir_resolve_type(ira, instruction->payload_type->child);
- if (type_is_invalid(payload_type))
- return ira->codegen->invalid_instruction;
-
- promise_type = get_promise_type(ira->codegen, payload_type);
- }
-
- return ir_const_type(ira, &instruction->base, promise_type);
-}
-
static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
IrInstructionSizeOf *size_of_instruction)
{
@@ -18672,8 +17936,9 @@ static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
- case ZigTypeIdPromise:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
@@ -19159,7 +18424,6 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdComptimeInt:
case ZigTypeIdEnumLiteral:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdFn:
case ZigTypeIdErrorSet: {
if (pointee_val) {
@@ -19238,6 +18502,8 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
ir_add_error(ira, &switch_target_instruction->base,
buf_sprintf("invalid switch target type '%s'", buf_ptr(&target_type->name)));
return ira->codegen->invalid_instruction;
@@ -20672,32 +19938,22 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
- case ZigTypeIdPromise:
- {
- result = create_const_vals(1);
- result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Promise", nullptr);
-
- ConstExprValue *fields = create_const_vals(1);
- result->data.x_struct.fields = fields;
-
- // child: ?type
- ensure_field_index(result->type, "child", 0);
- fields[0].special = ConstValSpecialStatic;
- fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ case ZigTypeIdAnyFrame: {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "AnyFrame", nullptr);
- if (type_entry->data.promise.result_type == nullptr)
- fields[0].data.x_optional = nullptr;
- else {
- ConstExprValue *child_type = create_const_vals(1);
- child_type->special = ConstValSpecialStatic;
- child_type->type = ira->codegen->builtin_types.entry_type;
- child_type->data.x_type = type_entry->data.promise.result_type;
- fields[0].data.x_optional = child_type;
- }
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
- break;
- }
+ // child: ?type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ fields[0].data.x_optional = (type_entry->data.any_frame.result_type == nullptr) ? nullptr :
+ create_const_type(ira->codegen, type_entry->data.any_frame.result_type);
+ break;
+ }
case ZigTypeIdEnum:
{
result = create_const_vals(1);
@@ -21007,7 +20263,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Fn", nullptr);
- ConstExprValue *fields = create_const_vals(6);
+ ConstExprValue *fields = create_const_vals(5);
result->data.x_struct.fields = fields;
// calling_convention: TypeInfo.CallingConvention
@@ -21040,19 +20296,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
fields[3].data.x_optional = return_type;
}
- // async_allocator_type: type
- ensure_field_index(result->type, "async_allocator_type", 4);
- fields[4].special = ConstValSpecialStatic;
- fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
- if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr)
- fields[4].data.x_optional = nullptr;
- else {
- ConstExprValue *async_alloc_type = create_const_vals(1);
- async_alloc_type->special = ConstValSpecialStatic;
- async_alloc_type->type = ira->codegen->builtin_types.entry_type;
- async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
- fields[4].data.x_optional = async_alloc_type;
- }
// args: []TypeInfo.FnArg
ZigType *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr);
if ((err = type_resolve(ira->codegen, type_info_fn_arg_type, ResolveStatusSizeKnown))) {
@@ -21067,10 +20310,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
fn_arg_array->data.x_array.special = ConstArraySpecialNone;
fn_arg_array->data.x_array.data.s_none.elements = create_const_vals(fn_arg_count);
- init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false);
+ init_const_slice(ira->codegen, &fields[4], fn_arg_array, 0, fn_arg_count, false);
- for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++)
- {
+ for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) {
FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.data.s_none.elements[fn_arg_index];
@@ -21117,6 +20359,8 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInstruction *source_instr
break;
}
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO @typeInfo for async function frames");
}
assert(result != nullptr);
@@ -22830,11 +22074,45 @@ static IrInstruction *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIns
return result;
}
-static IrInstruction *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) {
+static IrInstruction *ir_analyze_instruction_frame_handle(IrAnalyze *ira, IrInstructionFrameHandle *instruction) {
+ ZigFn *fn = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn != nullptr, &instruction->base);
+
+ if (fn->inferred_async_node == nullptr) {
+ fn->inferred_async_node = instruction->base.source_node;
+ }
+
+ ZigType *frame_type = get_fn_frame_type(ira->codegen, fn);
+ ZigType *ptr_frame_type = get_pointer_to_type(ira->codegen, frame_type, false);
+
IrInstruction *result = ir_build_handle(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- assert(fn_entry != nullptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
+ result->value.type = ptr_frame_type;
+ return result;
+}
+
+static IrInstruction *ir_analyze_instruction_frame_type(IrAnalyze *ira, IrInstructionFrameType *instruction) {
+ ZigFn *fn = ir_resolve_fn(ira, instruction->fn->child);
+ if (fn == nullptr)
+ return ira->codegen->invalid_instruction;
+
+ ZigType *ty = get_fn_frame_type(ira->codegen, fn);
+ return ir_const_type(ira, &instruction->base, ty);
+}
+
+static IrInstruction *ir_analyze_instruction_frame_size(IrAnalyze *ira, IrInstructionFrameSizeSrc *instruction) {
+ IrInstruction *fn = instruction->fn->child;
+ if (type_is_invalid(fn->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (fn->value.type->id != ZigTypeIdFn) {
+ ir_add_error(ira, fn,
+ buf_sprintf("expected function, found '%s'", buf_ptr(&fn->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ IrInstruction *result = ir_build_frame_size_gen(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, fn);
+ result->value.type = ira->codegen->builtin_types.entry_usize;
return result;
}
@@ -22869,7 +22147,6 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdInt:
case ZigTypeIdFloat:
case ZigTypeIdPointer:
- case ZigTypeIdPromise:
case ZigTypeIdArray:
case ZigTypeIdStruct:
case ZigTypeIdOptional:
@@ -22879,6 +22156,8 @@ static IrInstruction *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruct
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
{
uint64_t align_in_bytes = get_abi_alignment(ira->codegen, type_entry);
return ir_const_unsigned(ira, &instruction->base, align_in_bytes);
@@ -22993,19 +22272,6 @@ static IrInstruction *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_result_ptr(IrAnalyze *ira, IrInstructionResultPtr *instruction) {
- IrInstruction *result = instruction->result->child;
- if (type_is_invalid(result->value.type))
- return result;
-
- if (instruction->result_loc->written && instruction->result_loc->resolved_loc != nullptr &&
- !instr_is_comptime(result))
- {
- return instruction->result_loc->resolved_loc;
- }
- return ir_get_ref(ira, &instruction->base, result, true, false);
-}
-
static void ir_eval_mul_add(IrAnalyze *ira, IrInstructionMulAdd *source_instr, ZigType *float_type,
ConstExprValue *op1, ConstExprValue *op2, ConstExprValue *op3, ConstExprValue *out_val) {
if (float_type->id == ZigTypeIdComptimeFloat) {
@@ -23130,11 +22396,16 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct
if (type_is_invalid(base_ptr->value.type))
return ira->codegen->invalid_instruction;
- IrInstruction *value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ IrInstruction *value;
+ if (instruction->base_ptr_is_payload) {
+ value = base_ptr;
+ } else {
+ value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr);
+ }
+
ZigType *type_entry = value->value.type;
if (type_is_invalid(type_entry))
return ira->codegen->invalid_instruction;
-
if (type_entry->id == ZigTypeIdErrorUnion) {
if (instr_is_comptime(value)) {
ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
@@ -23428,18 +22699,6 @@ static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruct
return ira->codegen->invalid_instruction;
}
- if (fn_type_id.cc == CallingConventionAsync) {
- if (instruction->async_allocator_type_value == nullptr) {
- ir_add_error(ira, &instruction->base,
- buf_sprintf("async fn proto missing allocator type"));
- return ira->codegen->invalid_instruction;
- }
- IrInstruction *async_allocator_type_value = instruction->async_allocator_type_value->child;
- fn_type_id.async_allocator_type = ir_resolve_type(ira, async_allocator_type_value);
- if (type_is_invalid(fn_type_id.async_allocator_type))
- return ira->codegen->invalid_instruction;
- }
-
return ir_const_type(ira, &instruction->base, get_fn_type(ira->codegen, &fn_type_id));
}
@@ -23678,7 +22937,7 @@ static IrInstruction *ir_analyze_instruction_check_statement_is_void(IrAnalyze *
if (type_is_invalid(statement_type))
return ira->codegen->invalid_instruction;
- if (statement_type->id != ZigTypeIdVoid) {
+ if (statement_type->id != ZigTypeIdVoid && statement_type->id != ZigTypeIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("expression value is ignored"));
}
@@ -23933,7 +23192,6 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
zig_unreachable();
@@ -24043,6 +23301,10 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_write_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_write_value_bytes union type");
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO buf_write_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_write_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24087,7 +23349,6 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
case ZigTypeIdEnumLiteral:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
- case ZigTypeIdPromise:
zig_unreachable();
case ZigTypeIdVoid:
return ErrorNone;
@@ -24223,6 +23484,10 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
zig_panic("TODO buf_read_value_bytes fn type");
case ZigTypeIdUnion:
zig_panic("TODO buf_read_value_bytes union type");
+ case ZigTypeIdFnFrame:
+ zig_panic("TODO buf_read_value_bytes async fn frame type");
+ case ZigTypeIdAnyFrame:
+ zig_panic("TODO buf_read_value_bytes anyframe type");
}
zig_unreachable();
}
@@ -24573,184 +23838,6 @@ static IrInstruction *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruct
}
}
-static IrInstruction *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) {
- IrInstruction *target_inst = instruction->target->child;
- if (type_is_invalid(target_inst->value.type))
- return ira->codegen->invalid_instruction;
- IrInstruction *casted_target = ir_implicit_cast(ira, target_inst, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- result->value.special = ConstValSpecialStatic;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) {
- IrInstruction *promise_ptr = instruction->promise_ptr->child;
- if (type_is_invalid(promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- promise_ptr);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id);
- result->value.type = ira->codegen->builtin_types.entry_bool;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) {
- IrInstruction *result = ir_build_coro_size(&ira->new_irb, instruction->base.scope, instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_mem_ptr = instruction->coro_mem_ptr->child;
- if (type_is_invalid(coro_mem_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
- IrInstruction *result = ir_build_coro_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
- coro_id, coro_mem_ptr);
- result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) {
- return ir_get_implicit_allocator(ira, &instruction->base, instruction->id);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, IrInstructionCoroAllocFail *instruction) {
- IrInstruction *err_val = instruction->err_val->child;
- if (type_is_invalid(err_val->value.type))
- return ir_unreach_error(ira);
-
- IrInstruction *result = ir_build_coro_alloc_fail(&ira->new_irb, instruction->base.scope, instruction->base.source_node, err_val);
- result->value.type = ira->codegen->builtin_types.entry_unreachable;
- return ir_finish_anal(ira, result);
-}
-
-static IrInstruction *ir_analyze_instruction_coro_suspend(IrAnalyze *ira, IrInstructionCoroSuspend *instruction) {
- IrInstruction *save_point = nullptr;
- if (instruction->save_point != nullptr) {
- save_point = instruction->save_point->child;
- if (type_is_invalid(save_point->value.type))
- return ira->codegen->invalid_instruction;
- }
-
- IrInstruction *is_final = instruction->is_final->child;
- if (type_is_invalid(is_final->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_suspend(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, save_point, is_final);
- result->value.type = ira->codegen->builtin_types.entry_u8;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_end(IrAnalyze *ira, IrInstructionCoroEnd *instruction) {
- IrInstruction *result = ir_build_coro_end(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstructionCoroFree *instruction) {
- IrInstruction *coro_id = instruction->coro_id->child;
- if (type_is_invalid(coro_id->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_free(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_id, coro_handle);
- ZigType *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, ptr_type);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) {
- IrInstruction *awaiter_handle = instruction->awaiter_handle->child;
- if (type_is_invalid(awaiter_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *casted_target = ir_implicit_cast(ira, awaiter_handle, ira->codegen->builtin_types.entry_promise);
- if (type_is_invalid(casted_target->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, casted_target);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstructionCoroSave *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_save(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = ira->codegen->builtin_types.entry_usize;
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInstructionCoroPromise *instruction) {
- IrInstruction *coro_handle = instruction->coro_handle->child;
- if (type_is_invalid(coro_handle->value.type))
- return ira->codegen->invalid_instruction;
-
- if (coro_handle->value.type->id != ZigTypeIdPromise ||
- coro_handle->value.type->data.promise.result_type == nullptr)
- {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&coro_handle->value.type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- ZigType *coro_frame_type = get_promise_frame_type(ira->codegen,
- coro_handle->value.type->data.promise.result_type);
-
- IrInstruction *result = ir_build_coro_promise(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_handle);
- result->value.type = get_pointer_to_type(ira->codegen, coro_frame_type, false);
- return result;
-}
-
-static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) {
- IrInstruction *realloc_fn = instruction->realloc_fn->child;
- if (type_is_invalid(realloc_fn->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *coro_size = instruction->coro_size->child;
- if (type_is_invalid(coro_size->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, realloc_fn, coro_size);
- ZigType *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
- return result;
-}
-
static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) {
ZigType *operand_type = ir_resolve_type(ira, op);
if (type_is_invalid(operand_type))
@@ -24882,65 +23969,6 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr
return result;
}
-static IrInstruction *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) {
- ZigType *promise_type = ir_resolve_type(ira, instruction->promise_type->child);
- if (type_is_invalid(promise_type))
- return ira->codegen->invalid_instruction;
-
- if (promise_type->id != ZigTypeIdPromise || promise_type->data.promise.result_type == nullptr) {
- ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'",
- buf_ptr(&promise_type->name)));
- return ira->codegen->invalid_instruction;
- }
-
- return ir_const_type(ira, &instruction->base, promise_type->data.promise.result_type);
-}
-
-static IrInstruction *ir_analyze_instruction_await_bookkeeping(IrAnalyze *ira, IrInstructionAwaitBookkeeping *instruction) {
- ZigType *promise_result_type = ir_resolve_type(ira, instruction->promise_result_type->child);
- if (type_is_invalid(promise_result_type))
- return ira->codegen->invalid_instruction;
-
- ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
- ir_assert(fn_entry != nullptr, &instruction->base);
-
- if (type_can_fail(promise_result_type)) {
- fn_entry->calls_or_awaits_errorable_fn = true;
- }
-
- return ir_const_void(ira, &instruction->base);
-}
-
-static IrInstruction *ir_analyze_instruction_merge_err_ret_traces(IrAnalyze *ira,
- IrInstructionMergeErrRetTraces *instruction)
-{
- IrInstruction *coro_promise_ptr = instruction->coro_promise_ptr->child;
- if (type_is_invalid(coro_promise_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- ir_assert(coro_promise_ptr->value.type->id == ZigTypeIdPointer, &instruction->base);
- ZigType *promise_frame_type = coro_promise_ptr->value.type->data.pointer.child_type;
- ir_assert(promise_frame_type->id == ZigTypeIdStruct, &instruction->base);
- ZigType *promise_result_type = promise_frame_type->data.structure.fields[1].type_entry;
-
- if (!type_can_fail(promise_result_type)) {
- return ir_const_void(ira, &instruction->base);
- }
-
- IrInstruction *src_err_ret_trace_ptr = instruction->src_err_ret_trace_ptr->child;
- if (type_is_invalid(src_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *dest_err_ret_trace_ptr = instruction->dest_err_ret_trace_ptr->child;
- if (type_is_invalid(dest_err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_merge_err_ret_traces(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) {
IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope,
instruction->base.source_node);
@@ -24948,17 +23976,6 @@ static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, I
return result;
}
-static IrInstruction *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *ira, IrInstructionMarkErrRetTracePtr *instruction) {
- IrInstruction *err_ret_trace_ptr = instruction->err_ret_trace_ptr->child;
- if (type_is_invalid(err_ret_trace_ptr->value.type))
- return ira->codegen->invalid_instruction;
-
- IrInstruction *result = ir_build_mark_err_ret_trace_ptr(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, err_ret_trace_ptr);
- result->value.type = ira->codegen->builtin_types.entry_void;
- return result;
-}
-
static void ir_eval_float_op(IrAnalyze *ira, IrInstructionFloatOp *source_instr, ZigType *float_type,
ConstExprValue *op, ConstExprValue *out_val) {
assert(ira && source_instr && float_type && out_val && op);
@@ -25485,6 +24502,162 @@ static IrInstruction *ir_analyze_instruction_union_init_named_field(IrAnalyze *i
union_type, field_name, field_result_loc, result_loc);
}
+static IrInstruction *ir_analyze_instruction_suspend_begin(IrAnalyze *ira, IrInstructionSuspendBegin *instruction) {
+ IrInstructionSuspendBegin *result = ir_build_suspend_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node);
+ return &result->base;
+}
+
+static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
+ IrInstructionSuspendFinish *instruction)
+{
+ IrInstruction *begin_base = instruction->begin->base.child;
+ if (type_is_invalid(begin_base->value.type))
+ return ira->codegen->invalid_instruction;
+ ir_assert(begin_base->id == IrInstructionIdSuspendBegin, &instruction->base);
+ IrInstructionSuspendBegin *begin = reinterpret_cast<IrInstructionSuspendBegin *>(begin_base);
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ return ir_build_suspend_finish(&ira->new_irb, instruction->base.scope, instruction->base.source_node, begin);
+}
+
+static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *frame_ptr)
+{
+ if (type_is_invalid(frame_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *result_type;
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
+ if (frame->value.type->id == ZigTypeIdPointer &&
+ frame->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ } else if (frame->value.type->id != ZigTypeIdAnyFrame ||
+ frame->value.type->data.any_frame.result_type == nullptr)
+ {
+ ir_add_error(ira, source_instr,
+ buf_sprintf("expected anyframe->T, found '%s'", buf_ptr(&frame->value.type->name)));
+ return ira->codegen->invalid_instruction;
+ } else {
+ result_type = frame->value.type->data.any_frame.result_type;
+ }
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, result_type);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return casted_frame;
+}
+
+static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
+ IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
+ if (type_is_invalid(frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ ZigType *result_type = frame->value.type->data.any_frame.result_type;
+
+ ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ ir_assert(fn_entry != nullptr, &instruction->base);
+
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
+
+ if (type_can_fail(result_type)) {
+ fn_entry->calls_or_awaits_errorable_fn = true;
+ }
+
+ IrInstruction *result_loc;
+ if (type_has_bits(result_type)) {
+ result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc,
+ result_type, nullptr, true, true, true);
+ if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)))
+ return result_loc;
+ } else {
+ result_loc = nullptr;
+ }
+
+ IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
+ return ir_finish_anal(ira, result);
+}
+
+static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) {
+ IrInstruction *frame_ptr = instruction->frame->child;
+ if (type_is_invalid(frame_ptr->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *frame;
+ if (frame_ptr->value.type->id == ZigTypeIdPointer &&
+ frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
+ frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ frame = frame_ptr;
+ } else {
+ frame = ir_get_deref(ira, &instruction->base, frame_ptr, nullptr);
+ }
+
+ ZigType *any_frame_type = get_any_frame_type(ira->codegen, nullptr);
+ IrInstruction *casted_frame = ir_implicit_cast(ira, frame, any_frame_type);
+ if (type_is_invalid(casted_frame->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return ir_build_resume(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_frame);
+}
+
+static IrInstruction *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstructionSpillBegin *instruction) {
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope))
+ return ir_const_void(ira, &instruction->base);
+
+ IrInstruction *operand = instruction->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (!type_has_bits(operand->value.type))
+ return ir_const_void(ira, &instruction->base);
+
+ ir_assert(instruction->spill_id == SpillIdRetErrCode, &instruction->base);
+ ira->new_irb.exec->need_err_code_spill = true;
+
+ IrInstructionSpillBegin *result = ir_build_spill_begin(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, operand, instruction->spill_id);
+ return &result->base;
+}
+
+static IrInstruction *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstructionSpillEnd *instruction) {
+ IrInstruction *operand = instruction->begin->operand->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+
+ if (ir_should_inline(ira->new_irb.exec, instruction->base.scope) || !type_has_bits(operand->value.type))
+ return operand;
+
+ ir_assert(instruction->begin->base.child->id == IrInstructionIdSpillBegin, &instruction->base);
+ IrInstructionSpillBegin *begin = reinterpret_cast<IrInstructionSpillBegin *>(instruction->begin->base.child);
+
+ IrInstruction *result = ir_build_spill_end(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, begin);
+ result->value.type = operand->value.type;
+ return result;
+}
+
static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
@@ -25512,6 +24685,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
case IrInstructionIdSliceGen:
case IrInstructionIdRefGen:
case IrInstructionIdTestErrGen:
+ case IrInstructionIdFrameSizeGen:
+ case IrInstructionIdAwaitGen:
zig_unreachable();
case IrInstructionIdReturn:
@@ -25552,6 +24727,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_set_runtime_safety(ira, (IrInstructionSetRuntimeSafety *)instruction);
case IrInstructionIdSetFloatMode:
return ir_analyze_instruction_set_float_mode(ira, (IrInstructionSetFloatMode *)instruction);
+ case IrInstructionIdAnyFrameType:
+ return ir_analyze_instruction_any_frame_type(ira, (IrInstructionAnyFrameType *)instruction);
case IrInstructionIdSliceType:
return ir_analyze_instruction_slice_type(ira, (IrInstructionSliceType *)instruction);
case IrInstructionIdGlobalAsm:
@@ -25560,8 +24737,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_asm(ira, (IrInstructionAsm *)instruction);
case IrInstructionIdArrayType:
return ir_analyze_instruction_array_type(ira, (IrInstructionArrayType *)instruction);
- case IrInstructionIdPromiseType:
- return ir_analyze_instruction_promise_type(ira, (IrInstructionPromiseType *)instruction);
case IrInstructionIdSizeOf:
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
@@ -25660,8 +24835,12 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction);
- case IrInstructionIdHandle:
- return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ return ir_analyze_instruction_frame_handle(ira, (IrInstructionFrameHandle *)instruction);
+ case IrInstructionIdFrameType:
+ return ir_analyze_instruction_frame_type(ira, (IrInstructionFrameType *)instruction);
+ case IrInstructionIdFrameSizeSrc:
+ return ir_analyze_instruction_frame_size(ira, (IrInstructionFrameSizeSrc *)instruction);
case IrInstructionIdAlignOf:
return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction);
case IrInstructionIdOverflowOp:
@@ -25716,8 +24895,6 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_resolve_result(ira, (IrInstructionResolveResult *)instruction);
case IrInstructionIdResetResult:
return ir_analyze_instruction_reset_result(ira, (IrInstructionResetResult *)instruction);
- case IrInstructionIdResultPtr:
- return ir_analyze_instruction_result_ptr(ira, (IrInstructionResultPtr *)instruction);
case IrInstructionIdOpaqueType:
return ir_analyze_instruction_opaque_type(ira, (IrInstructionOpaqueType *)instruction);
case IrInstructionIdSetAlignStack:
@@ -25732,50 +24909,14 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction);
case IrInstructionIdErrorUnion:
return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction);
- case IrInstructionIdCancel:
- return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction);
- case IrInstructionIdCoroId:
- return ir_analyze_instruction_coro_id(ira, (IrInstructionCoroId *)instruction);
- case IrInstructionIdCoroAlloc:
- return ir_analyze_instruction_coro_alloc(ira, (IrInstructionCoroAlloc *)instruction);
- case IrInstructionIdCoroSize:
- return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction);
- case IrInstructionIdCoroBegin:
- return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction);
- case IrInstructionIdGetImplicitAllocator:
- return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction);
- case IrInstructionIdCoroAllocFail:
- return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction);
- case IrInstructionIdCoroSuspend:
- return ir_analyze_instruction_coro_suspend(ira, (IrInstructionCoroSuspend *)instruction);
- case IrInstructionIdCoroEnd:
- return ir_analyze_instruction_coro_end(ira, (IrInstructionCoroEnd *)instruction);
- case IrInstructionIdCoroFree:
- return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction);
- case IrInstructionIdCoroResume:
- return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction);
- case IrInstructionIdCoroSave:
- return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction);
- case IrInstructionIdCoroPromise:
- return ir_analyze_instruction_coro_promise(ira, (IrInstructionCoroPromise *)instruction);
- case IrInstructionIdCoroAllocHelper:
- return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
- case IrInstructionIdPromiseResultType:
- return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction);
- case IrInstructionIdAwaitBookkeeping:
- return ir_analyze_instruction_await_bookkeeping(ira, (IrInstructionAwaitBookkeeping *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdAddImplicitReturnType:
return ir_analyze_instruction_add_implicit_return_type(ira, (IrInstructionAddImplicitReturnType *)instruction);
- case IrInstructionIdMergeErrRetTraces:
- return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction);
- case IrInstructionIdMarkErrRetTracePtr:
- return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdFloatOp:
return ir_analyze_instruction_float_op(ira, (IrInstructionFloatOp *)instruction);
case IrInstructionIdMulAdd:
@@ -25802,6 +24943,18 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
return ir_analyze_instruction_bit_cast_src(ira, (IrInstructionBitCastSrc *)instruction);
case IrInstructionIdUnionInitNamedField:
return ir_analyze_instruction_union_init_named_field(ira, (IrInstructionUnionInitNamedField *)instruction);
+ case IrInstructionIdSuspendBegin:
+ return ir_analyze_instruction_suspend_begin(ira, (IrInstructionSuspendBegin *)instruction);
+ case IrInstructionIdSuspendFinish:
+ return ir_analyze_instruction_suspend_finish(ira, (IrInstructionSuspendFinish *)instruction);
+ case IrInstructionIdResume:
+ return ir_analyze_instruction_resume(ira, (IrInstructionResume *)instruction);
+ case IrInstructionIdAwaitSrc:
+ return ir_analyze_instruction_await(ira, (IrInstructionAwaitSrc *)instruction);
+ case IrInstructionIdSpillBegin:
+ return ir_analyze_instruction_spill_begin(ira, (IrInstructionSpillBegin *)instruction);
+ case IrInstructionIdSpillEnd:
+ return ir_analyze_instruction_spill_end(ira, (IrInstructionSpillEnd *)instruction);
}
zig_unreachable();
}
@@ -25818,9 +24971,7 @@ ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_
old_exec->analysis = ira;
ira->codegen = codegen;
- ZigFn *fn_entry = exec_fn_entry(old_exec);
- bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync;
- ira->explicit_return_type = is_async ? get_promise_type(codegen, expected_type) : expected_type;
+ ira->explicit_return_type = expected_type;
ira->explicit_return_type_source_node = expected_type_source_node;
ira->old_irb.codegen = codegen;
@@ -25918,19 +25069,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
- case IrInstructionIdCancel:
- case IrInstructionIdCoroId:
- case IrInstructionIdCoroBegin:
- case IrInstructionIdCoroAllocFail:
- case IrInstructionIdCoroEnd:
- case IrInstructionIdCoroResume:
- case IrInstructionIdCoroSave:
- case IrInstructionIdCoroAllocHelper:
- case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdSaveErrRetAddr:
case IrInstructionIdAddImplicitReturnType:
- case IrInstructionIdMergeErrRetTraces:
- case IrInstructionIdMarkErrRetTracePtr:
case IrInstructionIdAtomicRmw:
case IrInstructionIdCmpxchgGen:
case IrInstructionIdCmpxchgSrc:
@@ -25945,6 +25085,12 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdOptionalWrap:
case IrInstructionIdVectorToArray:
case IrInstructionIdResetResult:
+ case IrInstructionIdSuspendBegin:
+ case IrInstructionIdSuspendFinish:
+ case IrInstructionIdResume:
+ case IrInstructionIdAwaitSrc:
+ case IrInstructionIdAwaitGen:
+ case IrInstructionIdSpillBegin:
return true;
case IrInstructionIdPhi:
@@ -25963,8 +25109,8 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTypeOf:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdArrayType:
- case IrInstructionIdPromiseType:
case IrInstructionIdSliceType:
+ case IrInstructionIdAnyFrameType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
case IrInstructionIdOptionalUnwrapPtr:
@@ -25990,7 +25136,10 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAlignOf:
case IrInstructionIdReturnAddress:
case IrInstructionIdFrameAddress:
- case IrInstructionIdHandle:
+ case IrInstructionIdFrameHandle:
+ case IrInstructionIdFrameType:
+ case IrInstructionIdFrameSizeSrc:
+ case IrInstructionIdFrameSizeGen:
case IrInstructionIdTestErrSrc:
case IrInstructionIdTestErrGen:
case IrInstructionIdFnProto:
@@ -26023,13 +25172,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdTagType:
case IrInstructionIdErrorReturnTrace:
case IrInstructionIdErrorUnion:
- case IrInstructionIdGetImplicitAllocator:
- case IrInstructionIdCoroAlloc:
- case IrInstructionIdCoroSize:
- case IrInstructionIdCoroSuspend:
- case IrInstructionIdCoroFree:
- case IrInstructionIdCoroPromise:
- case IrInstructionIdPromiseResultType:
case IrInstructionIdFloatOp:
case IrInstructionIdMulAdd:
case IrInstructionIdAtomicLoad:
@@ -26046,7 +25188,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdHasDecl:
case IrInstructionIdAllocaSrc:
case IrInstructionIdAllocaGen:
- case IrInstructionIdResultPtr:
+ case IrInstructionIdSpillEnd:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir.hpp b/src/ir.hpp
index 597624e2e6..3761c5a97d 100644
--- a/src/ir.hpp
+++ b/src/ir.hpp
@@ -28,4 +28,6 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal
AstNode *source_node);
const char *float_op_to_name(BuiltinFnId op, bool llvm_name);
+void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text);
+
#endif
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 588a9b2882..7580f19059 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -64,11 +64,9 @@ static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) {
}
}
-static void ir_print_return(IrPrint *irp, IrInstructionReturn *return_instruction) {
+static void ir_print_return(IrPrint *irp, IrInstructionReturn *instruction) {
fprintf(irp->f, "return ");
- if (return_instruction->value != nullptr) {
- ir_print_other_instruction(irp, return_instruction->value);
- }
+ ir_print_other_instruction(irp, instruction->operand);
}
static void ir_print_const(IrPrint *irp, IrInstructionConst *const_instruction) {
@@ -257,13 +255,7 @@ static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) {
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -284,13 +276,7 @@ static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instructi
static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) {
if (call_instruction->is_async) {
- fprintf(irp->f, "async");
- if (call_instruction->async_allocator != nullptr) {
- fprintf(irp->f, "<");
- ir_print_other_instruction(irp, call_instruction->async_allocator);
- fprintf(irp->f, ">");
- }
- fprintf(irp->f, " ");
+ fprintf(irp->f, "async ");
}
if (call_instruction->fn_entry) {
fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name));
@@ -477,20 +463,21 @@ static void ir_print_array_type(IrPrint *irp, IrInstructionArrayType *instructio
ir_print_other_instruction(irp, instruction->child_type);
}
-static void ir_print_promise_type(IrPrint *irp, IrInstructionPromiseType *instruction) {
- fprintf(irp->f, "promise");
- if (instruction->payload_type != nullptr) {
- fprintf(irp->f, "->");
- ir_print_other_instruction(irp, instruction->payload_type);
- }
-}
-
static void ir_print_slice_type(IrPrint *irp, IrInstructionSliceType *instruction) {
const char *const_kw = instruction->is_const ? "const " : "";
fprintf(irp->f, "[]%s", const_kw);
ir_print_other_instruction(irp, instruction->child_type);
}
+static void ir_print_any_frame_type(IrPrint *irp, IrInstructionAnyFrameType *instruction) {
+ if (instruction->payload_type == nullptr) {
+ fprintf(irp->f, "anyframe");
+ } else {
+ fprintf(irp->f, "anyframe->");
+ ir_print_other_instruction(irp, instruction->payload_type);
+ }
+}
+
static void ir_print_global_asm(IrPrint *irp, IrInstructionGlobalAsm *instruction) {
fprintf(irp->f, "asm(\"%s\")", buf_ptr(instruction->asm_code));
}
@@ -926,8 +913,26 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst
fprintf(irp->f, "@frameAddress()");
}
-static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) {
- fprintf(irp->f, "@handle()");
+static void ir_print_handle(IrPrint *irp, IrInstructionFrameHandle *instruction) {
+ fprintf(irp->f, "@frame()");
+}
+
+static void ir_print_frame_type(IrPrint *irp, IrInstructionFrameType *instruction) {
+ fprintf(irp->f, "@Frame(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_frame_size_src(IrPrint *irp, IrInstructionFrameSizeSrc *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_frame_size_gen(IrPrint *irp, IrInstructionFrameSizeGen *instruction) {
+ fprintf(irp->f, "@frameSize(");
+ ir_print_other_instruction(irp, instruction->fn);
+ fprintf(irp->f, ")");
}
static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) {
@@ -1322,14 +1327,6 @@ static void ir_print_reset_result(IrPrint *irp, IrInstructionResetResult *instru
fprintf(irp->f, ")");
}
-static void ir_print_result_ptr(IrPrint *irp, IrInstructionResultPtr *instruction) {
- fprintf(irp->f, "ResultPtr(");
- ir_print_result_loc(irp, instruction->result_loc);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->result);
- fprintf(irp->f, ")");
-}
-
static void ir_print_opaque_type(IrPrint *irp, IrInstructionOpaqueType *instruction) {
fprintf(irp->f, "@OpaqueType()");
}
@@ -1391,110 +1388,6 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct
ir_print_other_instruction(irp, instruction->payload);
}
-static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) {
- fprintf(irp->f, "cancel ");
- ir_print_other_instruction(irp, instruction->target);
-}
-
-static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) {
- fprintf(irp->f, "@getImplicitAllocator(");
- switch (instruction->id) {
- case ImplicitAllocatorIdArg:
- fprintf(irp->f, "Arg");
- break;
- case ImplicitAllocatorIdLocalVar:
- fprintf(irp->f, "LocalVar");
- break;
- }
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) {
- fprintf(irp->f, "@coroId(");
- ir_print_other_instruction(irp, instruction->promise_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) {
- fprintf(irp->f, "@coroAlloc(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_size(IrPrint *irp, IrInstructionCoroSize *instruction) {
- fprintf(irp->f, "@coroSize()");
-}
-
-static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instruction) {
- fprintf(irp->f, "@coroBegin(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_mem_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *instruction) {
- fprintf(irp->f, "@coroAllocFail(");
- ir_print_other_instruction(irp, instruction->err_val);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_suspend(IrPrint *irp, IrInstructionCoroSuspend *instruction) {
- fprintf(irp->f, "@coroSuspend(");
- if (instruction->save_point != nullptr) {
- ir_print_other_instruction(irp, instruction->save_point);
- } else {
- fprintf(irp->f, "null");
- }
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->is_final);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_end(IrPrint *irp, IrInstructionCoroEnd *instruction) {
- fprintf(irp->f, "@coroEnd()");
-}
-
-static void ir_print_coro_free(IrPrint *irp, IrInstructionCoroFree *instruction) {
- fprintf(irp->f, "@coroFree(");
- ir_print_other_instruction(irp, instruction->coro_id);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) {
- fprintf(irp->f, "@coroResume(");
- ir_print_other_instruction(irp, instruction->awaiter_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) {
- fprintf(irp->f, "@coroSave(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_promise(IrPrint *irp, IrInstructionCoroPromise *instruction) {
- fprintf(irp->f, "@coroPromise(");
- ir_print_other_instruction(irp, instruction->coro_handle);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResultType *instruction) {
- fprintf(irp->f, "@PromiseResultType(");
- ir_print_other_instruction(irp, instruction->promise_type);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) {
- fprintf(irp->f, "@coroAllocHelper(");
- ir_print_other_instruction(irp, instruction->realloc_fn);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->coro_size);
- fprintf(irp->f, ")");
-}
-
static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) {
fprintf(irp->f, "@atomicRmw(");
if (instruction->operand_type != nullptr) {
@@ -1539,12 +1432,6 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct
fprintf(irp->f, ")");
}
-static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) {
- fprintf(irp->f, "@awaitBookkeeping(");
- ir_print_other_instruction(irp, instruction->promise_result_type);
- fprintf(irp->f, ")");
-}
-
static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) {
fprintf(irp->f, "@saveErrRetAddr()");
}
@@ -1555,22 +1442,6 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl
fprintf(irp->f, ")");
}
-static void ir_print_merge_err_ret_traces(IrPrint *irp, IrInstructionMergeErrRetTraces *instruction) {
- fprintf(irp->f, "@mergeErrRetTraces(");
- ir_print_other_instruction(irp, instruction->coro_promise_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->src_err_ret_trace_ptr);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->dest_err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRetTracePtr *instruction) {
- fprintf(irp->f, "@markErrRetTracePtr(");
- ir_print_other_instruction(irp, instruction->err_ret_trace_ptr);
- fprintf(irp->f, ")");
-}
-
static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) {
fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false));
@@ -1638,6 +1509,47 @@ static void ir_print_union_init_named_field(IrPrint *irp, IrInstructionUnionInit
fprintf(irp->f, ")");
}
+static void ir_print_suspend_begin(IrPrint *irp, IrInstructionSuspendBegin *instruction) {
+ fprintf(irp->f, "@suspendBegin()");
+}
+
+static void ir_print_suspend_finish(IrPrint *irp, IrInstructionSuspendFinish *instruction) {
+ fprintf(irp->f, "@suspendFinish()");
+}
+
+static void ir_print_resume(IrPrint *irp, IrInstructionResume *instruction) {
+ fprintf(irp->f, "resume ");
+ ir_print_other_instruction(irp, instruction->frame);
+}
+
+static void ir_print_await_src(IrPrint *irp, IrInstructionAwaitSrc *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_result_loc(irp, instruction->result_loc);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_await_gen(IrPrint *irp, IrInstructionAwaitGen *instruction) {
+ fprintf(irp->f, "@await(");
+ ir_print_other_instruction(irp, instruction->frame);
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->result_loc);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_spill_begin(IrPrint *irp, IrInstructionSpillBegin *instruction) {
+ fprintf(irp->f, "@spillBegin(");
+ ir_print_other_instruction(irp, instruction->operand);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_spill_end(IrPrint *irp, IrInstructionSpillEnd *instruction) {
+ fprintf(irp->f, "@spillEnd(");
+ ir_print_other_instruction(irp, &instruction->begin->base);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -1727,12 +1639,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdArrayType:
ir_print_array_type(irp, (IrInstructionArrayType *)instruction);
break;
- case IrInstructionIdPromiseType:
- ir_print_promise_type(irp, (IrInstructionPromiseType *)instruction);
- break;
case IrInstructionIdSliceType:
ir_print_slice_type(irp, (IrInstructionSliceType *)instruction);
break;
+ case IrInstructionIdAnyFrameType:
+ ir_print_any_frame_type(irp, (IrInstructionAnyFrameType *)instruction);
+ break;
case IrInstructionIdGlobalAsm:
ir_print_global_asm(irp, (IrInstructionGlobalAsm *)instruction);
break;
@@ -1886,8 +1798,17 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction);
break;
- case IrInstructionIdHandle:
- ir_print_handle(irp, (IrInstructionHandle *)instruction);
+ case IrInstructionIdFrameHandle:
+ ir_print_handle(irp, (IrInstructionFrameHandle *)instruction);
+ break;
+ case IrInstructionIdFrameType:
+ ir_print_frame_type(irp, (IrInstructionFrameType *)instruction);
+ break;
+ case IrInstructionIdFrameSizeSrc:
+ ir_print_frame_size_src(irp, (IrInstructionFrameSizeSrc *)instruction);
+ break;
+ case IrInstructionIdFrameSizeGen:
+ ir_print_frame_size_gen(irp, (IrInstructionFrameSizeGen *)instruction);
break;
case IrInstructionIdAlignOf:
ir_print_align_of(irp, (IrInstructionAlignOf *)instruction);
@@ -2006,9 +1927,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdResetResult:
ir_print_reset_result(irp, (IrInstructionResetResult *)instruction);
break;
- case IrInstructionIdResultPtr:
- ir_print_result_ptr(irp, (IrInstructionResultPtr *)instruction);
- break;
case IrInstructionIdOpaqueType:
ir_print_opaque_type(irp, (IrInstructionOpaqueType *)instruction);
break;
@@ -2030,69 +1948,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdErrorUnion:
ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction);
break;
- case IrInstructionIdCancel:
- ir_print_cancel(irp, (IrInstructionCancel *)instruction);
- break;
- case IrInstructionIdGetImplicitAllocator:
- ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction);
- break;
- case IrInstructionIdCoroId:
- ir_print_coro_id(irp, (IrInstructionCoroId *)instruction);
- break;
- case IrInstructionIdCoroAlloc:
- ir_print_coro_alloc(irp, (IrInstructionCoroAlloc *)instruction);
- break;
- case IrInstructionIdCoroSize:
- ir_print_coro_size(irp, (IrInstructionCoroSize *)instruction);
- break;
- case IrInstructionIdCoroBegin:
- ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction);
- break;
- case IrInstructionIdCoroAllocFail:
- ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction);
- break;
- case IrInstructionIdCoroSuspend:
- ir_print_coro_suspend(irp, (IrInstructionCoroSuspend *)instruction);
- break;
- case IrInstructionIdCoroEnd:
- ir_print_coro_end(irp, (IrInstructionCoroEnd *)instruction);
- break;
- case IrInstructionIdCoroFree:
- ir_print_coro_free(irp, (IrInstructionCoroFree *)instruction);
- break;
- case IrInstructionIdCoroResume:
- ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction);
- break;
- case IrInstructionIdCoroSave:
- ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction);
- break;
- case IrInstructionIdCoroAllocHelper:
- ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction);
- break;
case IrInstructionIdAtomicRmw:
ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction);
break;
- case IrInstructionIdCoroPromise:
- ir_print_coro_promise(irp, (IrInstructionCoroPromise *)instruction);
- break;
- case IrInstructionIdPromiseResultType:
- ir_print_promise_result_type(irp, (IrInstructionPromiseResultType *)instruction);
- break;
- case IrInstructionIdAwaitBookkeeping:
- ir_print_await_bookkeeping(irp, (IrInstructionAwaitBookkeeping *)instruction);
- break;
case IrInstructionIdSaveErrRetAddr:
ir_print_save_err_ret_addr(irp, (IrInstructionSaveErrRetAddr *)instruction);
break;
case IrInstructionIdAddImplicitReturnType:
ir_print_add_implicit_return_type(irp, (IrInstructionAddImplicitReturnType *)instruction);
break;
- case IrInstructionIdMergeErrRetTraces:
- ir_print_merge_err_ret_traces(irp, (IrInstructionMergeErrRetTraces *)instruction);
- break;
- case IrInstructionIdMarkErrRetTracePtr:
- ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction);
- break;
case IrInstructionIdFloatOp:
ir_print_float_op(irp, (IrInstructionFloatOp *)instruction);
break;
@@ -2147,6 +2011,27 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnionInitNamedField:
ir_print_union_init_named_field(irp, (IrInstructionUnionInitNamedField *)instruction);
break;
+ case IrInstructionIdSuspendBegin:
+ ir_print_suspend_begin(irp, (IrInstructionSuspendBegin *)instruction);
+ break;
+ case IrInstructionIdSuspendFinish:
+ ir_print_suspend_finish(irp, (IrInstructionSuspendFinish *)instruction);
+ break;
+ case IrInstructionIdResume:
+ ir_print_resume(irp, (IrInstructionResume *)instruction);
+ break;
+ case IrInstructionIdAwaitSrc:
+ ir_print_await_src(irp, (IrInstructionAwaitSrc *)instruction);
+ break;
+ case IrInstructionIdAwaitGen:
+ ir_print_await_gen(irp, (IrInstructionAwaitGen *)instruction);
+ break;
+ case IrInstructionIdSpillBegin:
+ ir_print_spill_begin(irp, (IrInstructionSpillBegin *)instruction);
+ break;
+ case IrInstructionIdSpillEnd:
+ ir_print_spill_end(irp, (IrInstructionSpillEnd *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/src/parser.cpp b/src/parser.cpp
index fe1f89ac92..afe5735a06 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -282,8 +282,8 @@ static AstNode *ast_parse_prefix_op_expr(
case NodeTypeAwaitExpr:
right = &prefix->data.await_expr.expr;
break;
- case NodeTypePromiseType:
- right = &prefix->data.promise_type.payload_type;
+ case NodeTypeAnyFrameType:
+ right = &prefix->data.anyframe_type.payload_type;
break;
case NodeTypeArrayType:
right = &prefix->data.array_type.child_type;
@@ -1167,7 +1167,6 @@ static AstNode *ast_parse_prefix_expr(ParseContext *pc) {
// <- AsmExpr
// / IfExpr
// / KEYWORD_break BreakLabel? Expr?
-// / KEYWORD_cancel Expr
// / KEYWORD_comptime Expr
// / KEYWORD_continue BreakLabel?
// / KEYWORD_resume Expr
@@ -1195,14 +1194,6 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc) {
return res;
}
- Token *cancel = eat_token_if(pc, TokenIdKeywordCancel);
- if (cancel != nullptr) {
- AstNode *expr = ast_expect(pc, ast_parse_expr);
- AstNode *res = ast_create_node(pc, NodeTypeCancel, cancel);
- res->data.cancel_expr.expr = expr;
- return res;
- }
-
Token *comptime = eat_token_if(pc, TokenIdKeywordCompTime);
if (comptime != nullptr) {
AstNode *expr = ast_expect(pc, ast_parse_expr);
@@ -1643,9 +1634,9 @@ static AstNode *ast_parse_primary_type_expr(ParseContext *pc) {
if (null != nullptr)
return ast_create_node(pc, NodeTypeNullLiteral, null);
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr)
- return ast_create_node(pc, NodeTypePromiseType, promise);
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr)
+ return ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
Token *true_token = eat_token_if(pc, TokenIdKeywordTrue);
if (true_token != nullptr) {
@@ -2042,11 +2033,6 @@ static Optional<AstNodeFnProto> ast_parse_fn_cc(ParseContext *pc) {
}
if (eat_token_if(pc, TokenIdKeywordAsync) != nullptr) {
res.cc = CallingConventionAsync;
- if (eat_token_if(pc, TokenIdCmpLessThan) == nullptr)
- return Optional<AstNodeFnProto>::some(res);
-
- res.async_allocator_type = ast_expect(pc, ast_parse_type_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
return Optional<AstNodeFnProto>::some(res);
}
@@ -2522,7 +2508,7 @@ static AstNode *ast_parse_prefix_op(ParseContext *pc) {
// PrefixTypeOp
// <- QUESTIONMARK
-// / KEYWORD_promise MINUSRARROW
+// / KEYWORD_anyframe MINUSRARROW
// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile)*
// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile)*
static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
@@ -2533,10 +2519,10 @@ static AstNode *ast_parse_prefix_type_op(ParseContext *pc) {
return res;
}
- Token *promise = eat_token_if(pc, TokenIdKeywordPromise);
- if (promise != nullptr) {
+ Token *anyframe = eat_token_if(pc, TokenIdKeywordAnyFrame);
+ if (anyframe != nullptr) {
if (eat_token_if(pc, TokenIdArrow) != nullptr) {
- AstNode *res = ast_create_node(pc, NodeTypePromiseType, promise);
+ AstNode *res = ast_create_node(pc, NodeTypeAnyFrameType, anyframe);
return res;
}
@@ -2680,11 +2666,6 @@ static AstNode *ast_parse_async_prefix(ParseContext *pc) {
AstNode *res = ast_create_node(pc, NodeTypeFnCallExpr, async);
res->data.fn_call_expr.is_async = true;
res->data.fn_call_expr.seen = false;
- if (eat_token_if(pc, TokenIdCmpLessThan) != nullptr) {
- AstNode *prefix_expr = ast_expect(pc, ast_parse_prefix_expr);
- expect_token(pc, TokenIdCmpGreaterThan);
- res->data.fn_call_expr.async_allocator = prefix_expr;
- }
return res;
}
@@ -2858,7 +2839,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
visit_node_list(&node->data.fn_proto.params, visit, context);
visit_field(&node->data.fn_proto.align_expr, visit, context);
visit_field(&node->data.fn_proto.section_expr, visit, context);
- visit_field(&node->data.fn_proto.async_allocator_type, visit, context);
break;
case NodeTypeFnDef:
visit_field(&node->data.fn_def.fn_proto, visit, context);
@@ -2918,7 +2898,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeFnCallExpr:
visit_field(&node->data.fn_call_expr.fn_ref_expr, visit, context);
visit_node_list(&node->data.fn_call_expr.params, visit, context);
- visit_field(&node->data.fn_call_expr.async_allocator, visit, context);
break;
case NodeTypeArrayAccessExpr:
visit_field(&node->data.array_access_expr.array_ref_expr, visit, context);
@@ -3034,8 +3013,8 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeInferredArrayType:
visit_field(&node->data.array_type.child_type, visit, context);
break;
- case NodeTypePromiseType:
- visit_field(&node->data.promise_type.payload_type, visit, context);
+ case NodeTypeAnyFrameType:
+ visit_field(&node->data.anyframe_type.payload_type, visit, context);
break;
case NodeTypeErrorType:
// none
@@ -3047,9 +3026,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
break;
- case NodeTypeCancel:
- visit_field(&node->data.cancel_expr.expr, visit, context);
- break;
case NodeTypeResume:
visit_field(&node->data.resume_expr.expr, visit, context);
break;
diff --git a/src/target.cpp b/src/target.cpp
index 7bb248a35f..d1ae64acd4 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -1759,3 +1759,7 @@ bool target_supports_libunwind(const ZigTarget *target) {
return true;
}
+
+unsigned target_fn_align(const ZigTarget *target) {
+ return 16;
+}
diff --git a/src/target.hpp b/src/target.hpp
index 985a4c11b4..cc8da97777 100644
--- a/src/target.hpp
+++ b/src/target.hpp
@@ -197,4 +197,6 @@ uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch);
size_t target_libc_count(void);
void target_libc_enum(size_t index, ZigTarget *out_target);
+unsigned target_fn_align(const ZigTarget *target);
+
#endif
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 783b6e0e20..84f3f2c0ec 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -109,11 +109,11 @@ static const struct ZigKeyword zig_keywords[] = {
{"align", TokenIdKeywordAlign},
{"allowzero", TokenIdKeywordAllowZero},
{"and", TokenIdKeywordAnd},
+ {"anyframe", TokenIdKeywordAnyFrame},
{"asm", TokenIdKeywordAsm},
{"async", TokenIdKeywordAsync},
{"await", TokenIdKeywordAwait},
{"break", TokenIdKeywordBreak},
- {"cancel", TokenIdKeywordCancel},
{"catch", TokenIdKeywordCatch},
{"comptime", TokenIdKeywordCompTime},
{"const", TokenIdKeywordConst},
@@ -136,7 +136,6 @@ static const struct ZigKeyword zig_keywords[] = {
{"or", TokenIdKeywordOr},
{"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
- {"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
{"resume", TokenIdKeywordResume},
{"return", TokenIdKeywordReturn},
@@ -1531,9 +1530,9 @@ const char * token_name(TokenId id) {
case TokenIdKeywordAwait: return "await";
case TokenIdKeywordResume: return "resume";
case TokenIdKeywordSuspend: return "suspend";
- case TokenIdKeywordCancel: return "cancel";
case TokenIdKeywordAlign: return "align";
case TokenIdKeywordAnd: return "and";
+ case TokenIdKeywordAnyFrame: return "anyframe";
case TokenIdKeywordAsm: return "asm";
case TokenIdKeywordBreak: return "break";
case TokenIdKeywordCatch: return "catch";
@@ -1558,7 +1557,6 @@ const char * token_name(TokenId id) {
case TokenIdKeywordOr: return "or";
case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
- case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
case TokenIdKeywordReturn: return "return";
case TokenIdKeywordLinkSection: return "linksection";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 83dbe99471..ce62f5dc87 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -53,11 +53,11 @@ enum TokenId {
TokenIdKeywordAlign,
TokenIdKeywordAllowZero,
TokenIdKeywordAnd,
+ TokenIdKeywordAnyFrame,
TokenIdKeywordAsm,
TokenIdKeywordAsync,
TokenIdKeywordAwait,
TokenIdKeywordBreak,
- TokenIdKeywordCancel,
TokenIdKeywordCatch,
TokenIdKeywordCompTime,
TokenIdKeywordConst,
@@ -81,7 +81,6 @@ enum TokenId {
TokenIdKeywordOr,
TokenIdKeywordOrElse,
TokenIdKeywordPacked,
- TokenIdKeywordPromise,
TokenIdKeywordPub,
TokenIdKeywordResume,
TokenIdKeywordReturn,
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index c51c9e1a50..695f8b18ef 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -42,7 +42,6 @@
#include <llvm/Support/TargetRegistry.h>
#include <llvm/Target/TargetMachine.h>
#include <llvm/Target/CodeGenCWrappers.h>
-#include <llvm/Transforms/Coroutines.h>
#include <llvm/Transforms/IPO.h>
#include <llvm/Transforms/IPO/AlwaysInliner.h>
#include <llvm/Transforms/IPO/PassManagerBuilder.h>
@@ -203,8 +202,6 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM
PMBuilder->Inliner = createFunctionInliningPass(PMBuilder->OptLevel, PMBuilder->SizeLevel, false);
}
- addCoroutinePassesToExtensionPoints(*PMBuilder);
-
// Set up the per-function pass manager.
legacy::FunctionPassManager FPM = legacy::FunctionPassManager(module);
auto tliwp = new(std::nothrow) TargetLibraryInfoWrapperPass(tlii);
@@ -898,6 +895,14 @@ LLVMValueRef ZigLLVMBuildAShrExact(LLVMBuilderRef builder, LLVMValueRef LHS, LLV
return wrap(unwrap(builder)->CreateAShr(unwrap(LHS), unwrap(RHS), name, true));
}
+void ZigLLVMSetTailCall(LLVMValueRef Call) {
+ unwrap<CallInst>(Call)->setTailCallKind(CallInst::TCK_MustTail);
+}
+
+void ZigLLVMFunctionSetPrefixData(LLVMValueRef function, LLVMValueRef data) {
+ unwrap<Function>(function)->setPrefixData(unwrap<Constant>(data));
+}
+
class MyOStream: public raw_ostream {
public:
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 8b7b0775f7..2be119ba0c 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -211,6 +211,8 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDeclare(struct ZigLLVMDIBuilder *dibuilde
ZIG_EXTERN_C struct ZigLLVMDILocation *ZigLLVMGetDebugLoc(unsigned line, unsigned col, struct ZigLLVMDIScope *scope);
ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state);
+ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call);
+ZIG_EXTERN_C void ZigLLVMFunctionSetPrefixData(LLVMValueRef fn, LLVMValueRef data);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value);
ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn);