From 94bbb46ca602be0ea0df97c207a98734ac459a0f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 29 Aug 2019 10:24:24 -0400
Subject: fix not fully resolving debug info for structs causing llvm error
---
src/analyze.cpp | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
(limited to 'src/analyze.cpp')
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 9ae7e99547..2c934bcf69 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7271,7 +7271,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
di_scope, di_file, line);
struct_type->data.structure.resolve_status = ResolveStatusLLVMFwdDecl;
- if (ResolveStatusLLVMFwdDecl >= wanted_resolve_status) return;
+ if (ResolveStatusLLVMFwdDecl >= wanted_resolve_status) {
+ struct_type->data.structure.llvm_full_type_queue_index = g->type_resolve_stack.length;
+ g->type_resolve_stack.append(struct_type);
+ return;
+ } else {
+ struct_type->data.structure.llvm_full_type_queue_index = SIZE_MAX;
+ }
}
size_t field_count = struct_type->data.structure.src_field_count;
@@ -7475,6 +7481,13 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
ZigLLVMReplaceTemporary(g->dbuilder, struct_type->llvm_di_type, replacement_di_type);
struct_type->llvm_di_type = replacement_di_type;
struct_type->data.structure.resolve_status = ResolveStatusLLVMFull;
+ if (struct_type->data.structure.llvm_full_type_queue_index != SIZE_MAX) {
+ ZigType *last = g->type_resolve_stack.last();
+ assert(last->id == ZigTypeIdStruct);
+ last->data.structure.llvm_full_type_queue_index = struct_type->data.structure.llvm_full_type_queue_index;
+ g->type_resolve_stack.swap_remove(struct_type->data.structure.llvm_full_type_queue_index);
+ struct_type->data.structure.llvm_full_type_queue_index = SIZE_MAX;
+ }
}
static void resolve_llvm_types_enum(CodeGen *g, ZigType *enum_type, ResolveStatus wanted_resolve_status) {
--
cgit v1.2.3
From d9f0446b1f993c1b3c1bf5cc410b6d5f8a2f94fe Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 29 Aug 2019 12:43:56 -0400
Subject: make `@sizeOf` lazy
---
src/all_types.hpp | 8 ++++
src/analyze.cpp | 59 ++++++++++++++++++++++++++---
src/analyze.hpp | 2 +
src/codegen.cpp | 5 +++
src/ir.cpp | 111 ++++++++++++++++++++++++++++++++----------------------
5 files changed, 134 insertions(+), 51 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index e55f10d5e2..36d3c0a398 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -308,6 +308,7 @@ struct ConstGlobalRefs {
enum LazyValueId {
LazyValueIdInvalid,
LazyValueIdAlignOf,
+ LazyValueIdSizeOf,
LazyValueIdPtrType,
LazyValueIdOptType,
LazyValueIdSliceType,
@@ -326,6 +327,13 @@ struct LazyValueAlignOf {
IrInstruction *target_type;
};
+struct LazyValueSizeOf {
+ LazyValue base;
+
+ IrAnalyze *ira;
+ IrInstruction *target_type;
+};
+
struct LazyValueSliceType {
LazyValue base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 2c934bcf69..411a0d7a01 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -997,6 +997,7 @@ static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, Zi
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdPtrType: {
LazyValuePtrType *lazy_ptr_type = reinterpret_cast(type_val->data.x_lazy);
@@ -1036,6 +1037,7 @@ Error type_val_resolve_is_opaque_type(CodeGen *g, ConstExprValue *type_val, bool
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType:
case LazyValueIdPtrType:
@@ -1055,6 +1057,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType: {
LazyValueSliceType *lazy_slice_type = reinterpret_cast(type_val->data.x_lazy);
@@ -1105,7 +1108,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue
zig_unreachable();
}
-static Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
+Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
size_t *abi_size, size_t *size_in_bits)
{
Error err;
@@ -1123,12 +1126,42 @@ start_over:
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
- case LazyValueIdSliceType:
- *abi_size = g->builtin_types.entry_usize->abi_size * 2;
- *size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2;
+ case LazyValueIdSliceType: {
+ LazyValueSliceType *lazy_slice_type = reinterpret_cast(type_val->data.x_lazy);
+ bool is_zero_bits;
+ if ((err = type_val_resolve_zero_bits(g, &lazy_slice_type->elem_type->value, nullptr,
+ nullptr, &is_zero_bits)))
+ {
+ return err;
+ }
+ if (is_zero_bits) {
+ *abi_size = g->builtin_types.entry_usize->abi_size;
+ *size_in_bits = g->builtin_types.entry_usize->size_in_bits;
+ } else {
+ *abi_size = g->builtin_types.entry_usize->abi_size * 2;
+ *size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2;
+ }
return ErrorNone;
- case LazyValueIdPtrType:
+ }
+ case LazyValueIdPtrType: {
+ LazyValuePtrType *lazy_ptr_type = reinterpret_cast(type_val->data.x_lazy);
+ bool is_zero_bits;
+ if ((err = type_val_resolve_zero_bits(g, &lazy_ptr_type->elem_type->value, nullptr,
+ nullptr, &is_zero_bits)))
+ {
+ return err;
+ }
+ if (is_zero_bits) {
+ *abi_size = 0;
+ *size_in_bits = 0;
+ } else {
+ *abi_size = g->builtin_types.entry_usize->abi_size;
+ *size_in_bits = g->builtin_types.entry_usize->size_in_bits;
+ }
+ return ErrorNone;
+ }
case LazyValueIdFnType:
*abi_size = g->builtin_types.entry_usize->abi_size;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
@@ -1159,6 +1192,7 @@ Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType:
case LazyValueIdPtrType:
@@ -1193,6 +1227,7 @@ static OnePossibleValue type_val_resolve_has_one_possible_value(CodeGen *g, Cons
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
+ case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType: // it has the len field
case LazyValueIdOptType: // it has the optional bit
@@ -4202,7 +4237,12 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
return;
}
}
- assert(callee->anal_state == FnAnalStateComplete);
+ if (callee->anal_state != FnAnalStateComplete) {
+ add_node_error(g, call->base.source_node,
+ buf_sprintf("call to function '%s' depends on itself", buf_ptr(&callee->symbol_name)));
+ fn->anal_state = FnAnalStateInvalid;
+ return;
+ }
analyze_fn_async(g, callee, true);
if (callee->anal_state == FnAnalStateInvalid) {
fn->anal_state = FnAnalStateInvalid;
@@ -4480,6 +4520,8 @@ void semantic_analyze(CodeGen *g) {
ZigFn *fn = g->fn_defs.at(g->fn_defs_index);
g->trace_err = nullptr;
analyze_fn_async(g, fn, true);
+ if (fn->anal_state == FnAnalStateInvalid)
+ continue;
if (fn_is_async(fn) && fn->non_async_node != nullptr) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("'%s' cannot be async", buf_ptr(&fn->symbol_name)));
@@ -5632,6 +5674,11 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
return ErrorSemanticAnalyzeFail;
}
analyze_fn_async(g, callee, true);
+ if (callee->inferred_async_node == inferred_async_checking) {
+ assert(g->errors.length != 0);
+ frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
+ return ErrorSemanticAnalyzeFail;
+ }
if (!fn_is_async(callee))
continue;
diff --git a/src/analyze.hpp b/src/analyze.hpp
index ebfd11f514..7fa0143506 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -247,6 +247,8 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
bool fn_is_async(ZigFn *fn);
Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align);
+Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
+ size_t *abi_size, size_t *size_in_bits);
ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field);
ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field);
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 4065994d80..29ecb3a47d 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -6845,6 +6845,7 @@ static void set_global_tls(CodeGen *g, ZigVar *var, LLVMValueRef global_value) {
}
static void do_code_gen(CodeGen *g) {
+ Error err;
assert(!g->errors.length);
generate_error_name_table(g);
@@ -6858,6 +6859,8 @@ static void do_code_gen(CodeGen *g) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->const_value;
assert(const_val->special != ConstValSpecialRuntime);
+ if ((err = ir_resolve_lazy(g, var->decl_node, const_val)))
+ zig_unreachable();
if (const_val->type != var->var_type) {
zig_panic("TODO debug info for var with ptr casted value");
}
@@ -6875,6 +6878,8 @@ static void do_code_gen(CodeGen *g) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->const_value;
assert(const_val->special != ConstValSpecialRuntime);
+ if ((err = ir_resolve_lazy(g, var->decl_node, const_val)))
+ zig_unreachable();
if (const_val->type != var->var_type) {
zig_panic("TODO debug info for var with ptr casted value");
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 52cf69de82..b6a73638e3 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -18066,54 +18066,20 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
zig_unreachable();
}
-static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
- IrInstructionSizeOf *size_of_instruction)
-{
- Error err;
- IrInstruction *type_value = size_of_instruction->type_value->child;
- ZigType *type_entry = ir_resolve_type(ira, type_value);
+static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, IrInstructionSizeOf *instruction) {
+ IrInstruction *result = ir_const(ira, &instruction->base, ira->codegen->builtin_types.entry_num_lit_int);
+ result->value.special = ConstValSpecialLazy;
- if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
+ LazyValueSizeOf *lazy_size_of = allocate(1);
+ lazy_size_of->ira = ira;
+ result->value.data.x_lazy = &lazy_size_of->base;
+ lazy_size_of->base.id = LazyValueIdSizeOf;
+
+ lazy_size_of->target_type = instruction->type_value->child;
+ if (ir_resolve_type_lazy(ira, lazy_size_of->target_type) == nullptr)
return ira->codegen->invalid_instruction;
- switch (type_entry->id) {
- case ZigTypeIdInvalid: // handled above
- zig_unreachable();
- case ZigTypeIdUnreachable:
- case ZigTypeIdUndefined:
- case ZigTypeIdNull:
- case ZigTypeIdBoundFn:
- case ZigTypeIdArgTuple:
- case ZigTypeIdOpaque:
- ir_add_error_node(ira, type_value->source_node,
- buf_sprintf("no size available for type '%s'", buf_ptr(&type_entry->name)));
- return ira->codegen->invalid_instruction;
- case ZigTypeIdMetaType:
- case ZigTypeIdEnumLiteral:
- case ZigTypeIdComptimeFloat:
- case ZigTypeIdComptimeInt:
- case ZigTypeIdVoid:
- case ZigTypeIdBool:
- case ZigTypeIdInt:
- case ZigTypeIdFloat:
- case ZigTypeIdPointer:
- case ZigTypeIdArray:
- case ZigTypeIdStruct:
- case ZigTypeIdOptional:
- case ZigTypeIdErrorUnion:
- case ZigTypeIdErrorSet:
- case ZigTypeIdEnum:
- case ZigTypeIdUnion:
- case ZigTypeIdFn:
- case ZigTypeIdVector:
- case ZigTypeIdFnFrame:
- case ZigTypeIdAnyFrame:
- {
- uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
- return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
- }
- }
- zig_unreachable();
+ return result;
}
static IrInstruction *ir_analyze_test_non_null(IrAnalyze *ira, IrInstruction *source_inst, IrInstruction *value) {
@@ -25548,6 +25514,61 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) {
bigint_init_unsigned(&val->data.x_bigint, align_in_bytes);
return ErrorNone;
}
+ case LazyValueIdSizeOf: {
+ LazyValueSizeOf *lazy_size_of = reinterpret_cast(val->data.x_lazy);
+ IrAnalyze *ira = lazy_size_of->ira;
+
+ if (lazy_size_of->target_type->value.special == ConstValSpecialStatic) {
+ switch (lazy_size_of->target_type->value.data.x_type->id) {
+ case ZigTypeIdInvalid: // handled above
+ zig_unreachable();
+ case ZigTypeIdUnreachable:
+ case ZigTypeIdUndefined:
+ case ZigTypeIdNull:
+ case ZigTypeIdBoundFn:
+ case ZigTypeIdArgTuple:
+ case ZigTypeIdOpaque:
+ ir_add_error(ira, lazy_size_of->target_type,
+ buf_sprintf("no size available for type '%s'",
+ buf_ptr(&lazy_size_of->target_type->value.data.x_type->name)));
+ return ErrorSemanticAnalyzeFail;
+ case ZigTypeIdMetaType:
+ case ZigTypeIdEnumLiteral:
+ case ZigTypeIdComptimeFloat:
+ case ZigTypeIdComptimeInt:
+ case ZigTypeIdVoid:
+ case ZigTypeIdBool:
+ case ZigTypeIdInt:
+ case ZigTypeIdFloat:
+ case ZigTypeIdPointer:
+ case ZigTypeIdArray:
+ case ZigTypeIdStruct:
+ case ZigTypeIdOptional:
+ case ZigTypeIdErrorUnion:
+ case ZigTypeIdErrorSet:
+ case ZigTypeIdEnum:
+ case ZigTypeIdUnion:
+ case ZigTypeIdFn:
+ case ZigTypeIdVector:
+ case ZigTypeIdFnFrame:
+ case ZigTypeIdAnyFrame:
+ break;
+ }
+ }
+
+ uint64_t abi_size;
+ uint64_t size_in_bits;
+ if ((err = type_val_resolve_abi_size(ira->codegen, source_node, &lazy_size_of->target_type->value,
+ &abi_size, &size_in_bits)))
+ {
+ return err;
+ }
+
+ val->special = ConstValSpecialStatic;
+ assert(val->type->id == ZigTypeIdComptimeInt);
+ bigint_init_unsigned(&val->data.x_bigint, abi_size);
+ return ErrorNone;
+ }
case LazyValueIdSliceType: {
LazyValueSliceType *lazy_slice_type = reinterpret_cast(val->data.x_lazy);
IrAnalyze *ira = lazy_slice_type->ira;
--
cgit v1.2.3
From 0512beca9d694a667e3ad12a656835b44457fbcd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 29 Aug 2019 14:46:22 -0400
Subject: comparing against zero participates in lazy values
---
src/analyze.cpp | 2 +-
src/analyze.hpp | 2 +
src/error.cpp | 1 +
src/ir.cpp | 89 ++++++++++++++++++++++++++++++
src/userland.h | 1 +
test/stage1/behavior/sizeof_and_typeof.zig | 15 +++++
6 files changed, 109 insertions(+), 1 deletion(-)
(limited to 'src/analyze.cpp')
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 411a0d7a01..1ed4e19727 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -973,7 +973,7 @@ ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, Zig
nullptr, nullptr, node, type_name, nullptr, nullptr, undef);
}
-static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
+Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
ConstExprValue *parent_type_val, bool *is_zero_bits)
{
Error err;
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 7fa0143506..6e8897bf82 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -249,6 +249,8 @@ bool fn_is_async(ZigFn *fn);
Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align);
Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
size_t *abi_size, size_t *size_in_bits);
+Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
+ ConstExprValue *parent_type_val, bool *is_zero_bits);
ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field);
ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field);
diff --git a/src/error.cpp b/src/error.cpp
index 20d429e8bf..753aeb292a 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -55,6 +55,7 @@ const char *err_str(Error err) {
case ErrorBrokenPipe: return "broken pipe";
case ErrorNoSpaceLeft: return "no space left";
case ErrorNoCCompilerInstalled: return "no C compiler installed";
+ case ErrorNotLazy: return "not lazy";
}
return "(invalid error)";
}
diff --git a/src/ir.cpp b/src/ir.cpp
index b6a73638e3..67b5157c97 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -12932,7 +12932,52 @@ static bool optional_value_is_null(ConstExprValue *val) {
}
}
+// Returns ErrorNotLazy when the value cannot be determined
+static Error lazy_cmp_zero(AstNode *source_node, ConstExprValue *val, Cmp *result) {
+ Error err;
+
+ switch (val->special) {
+ case ConstValSpecialRuntime:
+ case ConstValSpecialUndef:
+ return ErrorNotLazy;
+ case ConstValSpecialStatic:
+ switch (val->type->id) {
+ case ZigTypeIdComptimeInt:
+ case ZigTypeIdInt:
+ *result = bigint_cmp_zero(&val->data.x_bigint);
+ return ErrorNone;
+ default:
+ return ErrorNotLazy;
+ }
+ case ConstValSpecialLazy:
+ switch (val->data.x_lazy->id) {
+ case LazyValueIdInvalid:
+ zig_unreachable();
+ case LazyValueIdAlignOf:
+ *result = CmpGT;
+ return ErrorNone;
+ case LazyValueIdSizeOf: {
+ LazyValueSizeOf *lazy_size_of = reinterpret_cast(val->data.x_lazy);
+ IrAnalyze *ira = lazy_size_of->ira;
+ bool is_zero_bits;
+ if ((err = type_val_resolve_zero_bits(ira->codegen, &lazy_size_of->target_type->value,
+ nullptr, nullptr, &is_zero_bits)))
+ {
+ return err;
+ }
+ *result = is_zero_bits ? CmpEQ : CmpGT;
+ return ErrorNone;
+ }
+ default:
+ return ErrorNotLazy;
+ }
+ }
+ zig_unreachable();
+}
+
static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
+ Error err;
+
IrInstruction *op1 = bin_op_instruction->op1->child;
if (type_is_invalid(op1->value.type))
return ira->codegen->invalid_instruction;
@@ -13182,6 +13227,50 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
}
if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) {
+ {
+ // Before resolving the values, we special case comparisons against zero. These can often be done
+ // without resolving lazy values, preventing potential dependency loops.
+ Cmp op1_cmp_zero;
+ if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op1->value, &op1_cmp_zero))) {
+ if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
+ return ira->codegen->invalid_instruction;
+ }
+ Cmp op2_cmp_zero;
+ if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op2->value, &op2_cmp_zero))) {
+ if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
+ return ira->codegen->invalid_instruction;
+ }
+ bool can_cmp_zero = false;
+ Cmp cmp_result;
+ if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpEQ) {
+ can_cmp_zero = true;
+ cmp_result = CmpEQ;
+ } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpEQ) {
+ can_cmp_zero = true;
+ cmp_result = CmpGT;
+ } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpGT) {
+ can_cmp_zero = true;
+ cmp_result = CmpLT;
+ } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpEQ) {
+ can_cmp_zero = true;
+ cmp_result = CmpLT;
+ } else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpLT) {
+ can_cmp_zero = true;
+ cmp_result = CmpGT;
+ } else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpGT) {
+ can_cmp_zero = true;
+ cmp_result = CmpLT;
+ } else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpLT) {
+ can_cmp_zero = true;
+ cmp_result = CmpGT;
+ }
+ if (can_cmp_zero) {
+ bool answer = resolve_cmp_op_id(op_id, cmp_result);
+ return ir_const_bool(ira, &bin_op_instruction->base, answer);
+ }
+ }
+never_mind_just_calculate_it_normally:
+
ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad);
if (op1_val == nullptr)
return ira->codegen->invalid_instruction;
diff --git a/src/userland.h b/src/userland.h
index 43bdbd18b1..43356438fd 100644
--- a/src/userland.h
+++ b/src/userland.h
@@ -75,6 +75,7 @@ enum Error {
ErrorOperationAborted,
ErrorBrokenPipe,
ErrorNoSpaceLeft,
+ ErrorNotLazy,
};
// ABI warning
diff --git a/test/stage1/behavior/sizeof_and_typeof.zig b/test/stage1/behavior/sizeof_and_typeof.zig
index cfad311e06..da79c3a270 100644
--- a/test/stage1/behavior/sizeof_and_typeof.zig
+++ b/test/stage1/behavior/sizeof_and_typeof.zig
@@ -74,3 +74,18 @@ test "@sizeOf on compile-time types" {
expect(@sizeOf(@typeOf(.hi)) == 0);
expect(@sizeOf(@typeOf(type)) == 0);
}
+
+test "@sizeOf(T) == 0 doesn't force resolving struct size" {
+ const S = struct {
+ const Foo = struct {
+ y: if (@sizeOf(Foo) == 0) u64 else u32,
+ };
+ const Bar = struct {
+ x: i32,
+ y: if (0 == @sizeOf(Bar)) u64 else u32,
+ };
+ };
+
+ expect(@sizeOf(S.Foo) == 4);
+ expect(@sizeOf(S.Bar) == 8);
+}
--
cgit v1.2.3
From 03910925f06f6127e81de47ff22ce4d24ca565b2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 29 Aug 2019 21:51:31 -0400
Subject: await does not force async if callee is blocking
closes #3067
---
src/all_types.hpp | 4 ++
src/analyze.cpp | 108 ++++++++++++++++++++++++++------------
src/codegen.cpp | 78 ++++++++++++++++++---------
src/error.cpp | 1 +
src/ir.cpp | 35 ++++++++----
src/userland.h | 1 +
test/stage1/behavior/async_fn.zig | 10 ++++
7 files changed, 169 insertions(+), 68 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 36d3c0a398..42b3e04f49 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -36,6 +36,7 @@ struct IrInstruction;
struct IrInstructionCast;
struct IrInstructionAllocaGen;
struct IrInstructionCallGen;
+struct IrInstructionAwaitGen;
struct IrBasicBlock;
struct ScopeDecls;
struct ZigWindowsSDK;
@@ -1486,6 +1487,7 @@ struct ZigFn {
AstNode **param_source_nodes;
Buf **param_names;
IrInstruction *err_code_spill;
+ AstNode *assumed_non_async;
AstNode *fn_no_inline_set_node;
AstNode *fn_static_eval_set_node;
@@ -1503,6 +1505,7 @@ struct ZigFn {
ZigList export_list;
ZigList call_list;
+ ZigList await_list;
LLVMValueRef valgrind_client_request_array;
@@ -3717,6 +3720,7 @@ struct IrInstructionAwaitGen {
IrInstruction *frame;
IrInstruction *result_loc;
+ ZigFn *target_fn;
};
struct IrInstructionResume {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 1ed4e19727..d1c79f9a1a 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -31,6 +31,7 @@ static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry);
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status);
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
+static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame);
// nullptr means not analyzed yet; this one means currently being analyzed
static const AstNode *inferred_async_checking = reinterpret_cast(0x1);
@@ -4196,6 +4197,54 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
}
}
+// ErrorNone - not async
+// ErrorIsAsync - yes async
+// ErrorSemanticAnalyzeFail - compile error emitted result is invalid
+static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode *call_node,
+ bool must_not_be_async)
+{
+ if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
+ return ErrorNone;
+ if (callee->anal_state == FnAnalStateReady) {
+ analyze_fn_body(g, callee);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ return ErrorSemanticAnalyzeFail;
+ }
+ }
+ bool callee_is_async;
+ if (callee->anal_state == FnAnalStateComplete) {
+ analyze_fn_async(g, callee, true);
+ if (callee->anal_state == FnAnalStateInvalid) {
+ return ErrorSemanticAnalyzeFail;
+ }
+ callee_is_async = fn_is_async(callee);
+ } else {
+ // If it's already been determined, use that value. Otherwise
+ // assume non-async, emit an error later if it turned out to be async.
+ if (callee->inferred_async_node == nullptr ||
+ callee->inferred_async_node == inferred_async_checking)
+ {
+ callee->assumed_non_async = call_node;
+ callee_is_async = false;
+ } else {
+ callee_is_async = callee->inferred_async_node != inferred_async_none;
+ }
+ }
+ if (callee_is_async) {
+ fn->inferred_async_node = call_node;
+ fn->inferred_async_fn = callee;
+ if (must_not_be_async) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("function with calling convention '%s' cannot be async",
+ calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
+ add_async_error_notes(g, msg, fn);
+ return ErrorSemanticAnalyzeFail;
+ }
+ return ErrorIsAsync;
+ }
+ return ErrorNone;
+}
+
// This function resolves functions being inferred async.
static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
if (fn->inferred_async_node == inferred_async_checking) {
@@ -4222,47 +4271,40 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
for (size_t i = 0; i < fn->call_list.length; i += 1) {
IrInstructionCallGen *call = fn->call_list.at(i);
- ZigFn *callee = call->fn_entry;
- if (callee == nullptr) {
+ if (call->fn_entry == nullptr) {
// TODO function pointer call here, could be anything
continue;
}
-
- if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
- continue;
- if (callee->anal_state == FnAnalStateReady) {
- analyze_fn_body(g, callee);
- if (callee->anal_state == FnAnalStateInvalid) {
+ switch (analyze_callee_async(g, fn, call->fn_entry, call->base.source_node, must_not_be_async)) {
+ case ErrorSemanticAnalyzeFail:
fn->anal_state = FnAnalStateInvalid;
return;
- }
- }
- if (callee->anal_state != FnAnalStateComplete) {
- add_node_error(g, call->base.source_node,
- buf_sprintf("call to function '%s' depends on itself", buf_ptr(&callee->symbol_name)));
- fn->anal_state = FnAnalStateInvalid;
- return;
- }
- analyze_fn_async(g, callee, true);
- if (callee->anal_state == FnAnalStateInvalid) {
- fn->anal_state = FnAnalStateInvalid;
- return;
+ case ErrorNone:
+ continue;
+ case ErrorIsAsync:
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ default:
+ zig_unreachable();
}
- if (fn_is_async(callee)) {
- fn->inferred_async_node = call->base.source_node;
- fn->inferred_async_fn = callee;
- if (must_not_be_async) {
- ErrorMsg *msg = add_node_error(g, fn->proto_node,
- buf_sprintf("function with calling convention '%s' cannot be async",
- calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
- add_async_error_notes(g, msg, fn);
+ }
+ for (size_t i = 0; i < fn->await_list.length; i += 1) {
+ IrInstructionAwaitGen *await = fn->await_list.at(i);
+ switch (analyze_callee_async(g, fn, await->target_fn, await->base.source_node, must_not_be_async)) {
+ case ErrorSemanticAnalyzeFail:
fn->anal_state = FnAnalStateInvalid;
return;
- }
- if (resolve_frame) {
- resolve_async_fn_frame(g, fn);
- }
- return;
+ case ErrorNone:
+ continue;
+ case ErrorIsAsync:
+ if (resolve_frame) {
+ resolve_async_fn_frame(g, fn);
+ }
+ return;
+ default:
+ zig_unreachable();
}
}
fn->inferred_async_node = inferred_async_none;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 29ecb3a47d..491ddcd4ea 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -3924,7 +3924,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
if (ret_has_bits) {
- LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
@@ -4067,6 +4067,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef store_instr = LLVMBuildStore(g->builder, result, result_loc);
LLVMSetAlignment(store_instr, get_ptr_align(g, instruction->result_loc->value.type));
return result_loc;
+ } else if (!callee_is_async && instruction->is_async) {
+ LLVMBuildStore(g->builder, result, ret_ptr);
+ return result_loc;
} else {
return result;
}
@@ -5498,6 +5501,44 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
+static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_instr,
+ LLVMValueRef target_frame_ptr, ZigType *result_type, ZigType *ptr_result_type,
+ LLVMValueRef result_loc, bool non_async)
+{
+ LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef their_result_ptr = nullptr;
+ if (type_has_bits(result_type) && (non_async || result_loc != nullptr)) {
+ LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
+ their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
+ if (result_loc != nullptr) {
+ LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
+ LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
+ bool is_volatile = false;
+ uint32_t abi_align = get_abi_alignment(g, result_type);
+ LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
+ ZigLLVMBuildMemCpy(g->builder,
+ dest_ptr_casted, abi_align,
+ src_ptr_casted, abi_align, byte_count_val, is_volatile);
+ }
+ }
+ if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
+ LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
+ frame_index_trace_arg(g, result_type), "");
+ LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
+ LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, source_instr->scope);
+ LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
+ ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
+ }
+ if (non_async && type_has_bits(result_type)) {
+ LLVMValueRef result_ptr = (result_loc == nullptr) ? their_result_ptr : result_loc;
+ return get_handle_value(g, result_ptr, result_type, ptr_result_type);
+ } else {
+ return nullptr;
+ }
+}
+
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
@@ -5505,6 +5546,14 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
ZigType *result_type = instruction->base.value.type;
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
+ LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
+ nullptr : ir_llvm_value(g, instruction->result_loc);
+
+ if (instruction->target_fn != nullptr && !fn_is_async(instruction->target_fn)) {
+ return gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type,
+ ptr_result_type, result_loc, true);
+ }
+
// Prepare to be suspended
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
@@ -5512,9 +5561,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// At this point resuming the function will continue from resume_bb.
// This code is as if it is running inside the suspend block.
+
// supply the awaiter return pointer
- LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
- nullptr : ir_llvm_value(g, instruction->result_loc);
if (type_has_bits(result_type)) {
LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
if (result_loc == nullptr) {
@@ -5562,28 +5610,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Early return: The async function has already completed. We must copy the result and
// the error return trace if applicable.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
- if (type_has_bits(result_type) && result_loc != nullptr) {
- LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
- LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
- LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
- LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
- LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
- bool is_volatile = false;
- uint32_t abi_align = get_abi_alignment(g, result_type);
- LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
- ZigLLVMBuildMemCpy(g->builder,
- dest_ptr_casted, abi_align,
- src_ptr_casted, abi_align, byte_count_val, is_volatile);
- }
- if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
- LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
- frame_index_trace_arg(g, result_type), "");
- LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
- LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
- LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
- ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
- get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
- }
+ gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type, ptr_result_type,
+ result_loc, false);
LLVMBuildBr(g->builder, end_bb);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
diff --git a/src/error.cpp b/src/error.cpp
index 753aeb292a..86df76ed4e 100644
--- a/src/error.cpp
+++ b/src/error.cpp
@@ -56,6 +56,7 @@ const char *err_str(Error err) {
case ErrorNoSpaceLeft: return "no space left";
case ErrorNoCCompilerInstalled: return "no C compiler installed";
case ErrorNotLazy: return "not lazy";
+ case ErrorIsAsync: return "is async";
}
return "(invalid error)";
}
diff --git a/src/ir.cpp b/src/ir.cpp
index ec414a5adc..d53042fedf 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3268,7 +3268,7 @@ static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
-static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
+static IrInstructionAwaitGen *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc)
{
IrInstructionAwaitGen *instruction = ir_build_instruction(&ira->new_irb,
@@ -3280,7 +3280,7 @@ static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_i
ir_ref_instruction(frame, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
- return &instruction->base;
+ return instruction;
}
static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
@@ -24763,18 +24763,22 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
}
static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr,
- IrInstruction *frame_ptr)
+ IrInstruction *frame_ptr, ZigFn **target_fn)
{
if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
+ *target_fn = nullptr;
+
ZigType *result_type;
IrInstruction *frame;
if (frame_ptr->value.type->id == ZigTypeIdPointer &&
frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
{
- result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ ZigFn *func = frame_ptr->value.type->data.pointer.child_type->data.frame.fn;
+ result_type = func->type_entry->data.fn.fn_type_id.return_type;
+ *target_fn = func;
frame = frame_ptr;
} else {
frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
@@ -24782,7 +24786,9 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
frame->value.type->data.pointer.ptr_len == PtrLenSingle &&
frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
{
- result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
+ ZigFn *func = frame->value.type->data.pointer.child_type->data.frame.fn;
+ result_type = func->type_entry->data.fn.fn_type_id.return_type;
+ *target_fn = func;
} else if (frame->value.type->id != ZigTypeIdAnyFrame ||
frame->value.type->data.any_frame.result_type == nullptr)
{
@@ -24803,7 +24809,11 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
}
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
- IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
+ IrInstruction *operand = instruction->frame->child;
+ if (type_is_invalid(operand->value.type))
+ return ira->codegen->invalid_instruction;
+ ZigFn *target_fn;
+ IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, operand, &target_fn);
if (type_is_invalid(frame->value.type))
return ira->codegen->invalid_instruction;
@@ -24812,8 +24822,11 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
- if (fn_entry->inferred_async_node == nullptr) {
- fn_entry->inferred_async_node = instruction->base.source_node;
+ // If it's not @Frame(func) then it's definitely a suspend point
+ if (target_fn == nullptr) {
+ if (fn_entry->inferred_async_node == nullptr) {
+ fn_entry->inferred_async_node = instruction->base.source_node;
+ }
}
if (type_can_fail(result_type)) {
@@ -24830,8 +24843,10 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
result_loc = nullptr;
}
- IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
- return ir_finish_anal(ira, result);
+ IrInstructionAwaitGen *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
+ result->target_fn = target_fn;
+ fn_entry->await_list.append(result);
+ return ir_finish_anal(ira, &result->base);
}
static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) {
diff --git a/src/userland.h b/src/userland.h
index 43356438fd..c92caf327e 100644
--- a/src/userland.h
+++ b/src/userland.h
@@ -76,6 +76,7 @@ enum Error {
ErrorBrokenPipe,
ErrorNoSpaceLeft,
ErrorNotLazy,
+ ErrorIsAsync,
};
// ABI warning
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
index ccfc4d1ea6..dfed1c4ab7 100644
--- a/test/stage1/behavior/async_fn.zig
+++ b/test/stage1/behavior/async_fn.zig
@@ -844,3 +844,13 @@ test "cast fn to async fn when it is inferred to be async" {
resume S.frame;
expect(S.ok);
}
+
+test "await does not force async if callee is blocking" {
+ const S = struct {
+ fn simple() i32 {
+ return 1234;
+ }
+ };
+ var x = async S.simple();
+ expect(await x == 1234);
+}
--
cgit v1.2.3
From e9a4bcbcc6ac89c5526a6baaf2b0df49d0577eb4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 29 Aug 2019 22:44:07 -0400
Subject: fix regressions
---
src/analyze.cpp | 22 +++++++++++++++++++---
src/ir.cpp | 4 +++-
test/compile_errors.zig | 8 ++++----
3 files changed, 26 insertions(+), 8 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/analyze.cpp b/src/analyze.cpp
index d1c79f9a1a..0bc42cc971 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -4174,8 +4174,14 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
assert(fn->inferred_async_node != inferred_async_checking);
assert(fn->inferred_async_node != inferred_async_none);
if (fn->inferred_async_fn != nullptr) {
- ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node,
- buf_sprintf("async function call here"));
+ ErrorMsg *new_msg;
+ if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
+ new_msg = add_error_note(g, msg, fn->inferred_async_node,
+ buf_create_from_str("await here is a suspend point"));
+ } else {
+ new_msg = add_error_note(g, msg, fn->inferred_async_node,
+ buf_sprintf("async function call here"));
+ }
return add_async_error_notes(g, new_msg, fn->inferred_async_fn);
} else if (fn->inferred_async_node->type == NodeTypeFnProto) {
add_error_note(g, msg, fn->inferred_async_node,
@@ -4185,7 +4191,7 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
buf_sprintf("suspends here"));
} else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
add_error_note(g, msg, fn->inferred_async_node,
- buf_sprintf("await is a suspend point"));
+ buf_sprintf("await here is a suspend point"));
} else if (fn->inferred_async_node->type == NodeTypeFnCallExpr &&
fn->inferred_async_node->data.fn_call_expr.is_builtin)
{
@@ -4240,6 +4246,16 @@ static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode
add_async_error_notes(g, msg, fn);
return ErrorSemanticAnalyzeFail;
}
+ if (fn->assumed_non_async != nullptr) {
+ ErrorMsg *msg = add_node_error(g, fn->proto_node,
+ buf_sprintf("unable to infer whether '%s' should be async",
+ buf_ptr(&fn->symbol_name)));
+ add_error_note(g, msg, fn->assumed_non_async,
+ buf_sprintf("assumed to be non-async here"));
+ add_async_error_notes(g, msg, fn);
+ fn->anal_state = FnAnalStateInvalid;
+ return ErrorSemanticAnalyzeFail;
+ }
return ErrorIsAsync;
}
return ErrorNone;
diff --git a/src/ir.cpp b/src/ir.cpp
index d53042fedf..dfe9132e2d 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -10640,7 +10640,9 @@ static void ir_finish_bb(IrAnalyze *ira) {
static IrInstruction *ir_unreach_error(IrAnalyze *ira) {
ira->old_bb_index = SIZE_MAX;
- assert(ira->new_irb.exec->first_err_trace_msg != nullptr);
+ if (ira->new_irb.exec->first_err_trace_msg == nullptr) {
+ ira->new_irb.exec->first_err_trace_msg = ira->codegen->trace_err;
+ }
return ira->codegen->unreach_instruction;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 812b236716..91916e6f38 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -273,7 +273,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
,
"tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async",
- "tmp.zig:3:18: note: await is a suspend point",
+ "tmp.zig:3:18: note: await here is a suspend point",
);
cases.add(
@@ -507,11 +507,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"@sizeOf bad type",
- \\export fn entry() void {
- \\ _ = @sizeOf(@typeOf(null));
+ \\export fn entry() usize {
+ \\ return @sizeOf(@typeOf(null));
\\}
,
- "tmp.zig:2:17: error: no size available for type '(null)'",
+ "tmp.zig:2:20: error: no size available for type '(null)'",
);
cases.add(
--
cgit v1.2.3
From 6ab8b2aab4b146a7d1d882686199eace19989011 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 30 Aug 2019 20:06:02 -0400
Subject: support recursive async and non-async functions
which heap allocate their own frames
related: #1006
---
src/all_types.hpp | 2 +-
src/analyze.cpp | 7 +++-
src/ast_render.cpp | 2 +-
src/codegen.cpp | 7 +++-
src/ir.cpp | 83 ++++++++++++++++++++++++++++++++-------
src/parser.cpp | 2 +-
std/mem.zig | 10 ++++-
test/compile_errors.zig | 2 +
test/stage1/behavior/async_fn.zig | 65 ++++++++++++++++++++++++++++++
9 files changed, 157 insertions(+), 23 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 42b3e04f49..87756d338f 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -627,7 +627,7 @@ struct AstNodeParamDecl {
AstNode *type;
Token *var_token;
bool is_noalias;
- bool is_inline;
+ bool is_comptime;
bool is_var_args;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 0bc42cc971..43c8d499db 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -1556,7 +1556,7 @@ static ZigType *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_sc
AstNode *param_node = fn_proto->params.at(fn_type_id.next_param_index);
assert(param_node->type == NodeTypeParamDecl);
- bool param_is_comptime = param_node->data.param_decl.is_inline;
+ bool param_is_comptime = param_node->data.param_decl.is_comptime;
bool param_is_var_args = param_node->data.param_decl.is_var_args;
if (param_is_comptime) {
@@ -8234,6 +8234,10 @@ static void resolve_llvm_types_anyerror(CodeGen *g) {
}
static void resolve_llvm_types_async_frame(CodeGen *g, ZigType *frame_type, ResolveStatus wanted_resolve_status) {
+ Error err;
+ if ((err = type_resolve(g, frame_type, ResolveStatusSizeKnown)))
+ zig_unreachable();
+
ZigType *passed_frame_type = fn_is_async(frame_type->data.frame.fn) ? frame_type : nullptr;
resolve_llvm_types_struct(g, frame_type->data.frame.locals_struct, wanted_resolve_status, passed_frame_type);
frame_type->llvm_type = frame_type->data.frame.locals_struct->llvm_type;
@@ -8375,7 +8379,6 @@ static void resolve_llvm_types_any_frame(CodeGen *g, ZigType *any_frame_type, Re
}
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status) {
- assert(type->id == ZigTypeIdOpaque || type_is_resolved(type, ResolveStatusSizeKnown));
assert(wanted_resolve_status > ResolveStatusSizeKnown);
switch (type->id) {
case ZigTypeIdInvalid:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 334dc37b59..54a659f7b1 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -448,7 +448,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
assert(param_decl->type == NodeTypeParamDecl);
if (param_decl->data.param_decl.name != nullptr) {
const char *noalias_str = param_decl->data.param_decl.is_noalias ? "noalias " : "";
- const char *inline_str = param_decl->data.param_decl.is_inline ? "inline " : "";
+ const char *inline_str = param_decl->data.param_decl.is_comptime ? "comptime " : "";
fprintf(ar->f, "%s%s", noalias_str, inline_str);
print_symbol(ar, param_decl->data.param_decl.name);
fprintf(ar->f, ": ");
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 491ddcd4ea..d87b5d0aeb 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -6340,9 +6340,12 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
ZigType *type_entry = const_val->type;
assert(type_has_bits(type_entry));
- switch (const_val->special) {
+check: switch (const_val->special) {
case ConstValSpecialLazy:
- zig_unreachable();
+ if ((err = ir_resolve_lazy(g, nullptr, const_val))) {
+ report_errors_and_exit(g);
+ }
+ goto check;
case ConstValSpecialRuntime:
zig_unreachable();
case ConstValSpecialUndef:
diff --git a/src/ir.cpp b/src/ir.cpp
index 6f740cc937..02a134c62e 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -9012,7 +9012,42 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
return false;
}
- ConstExprValue *const_val = ir_resolve_const(ira, instruction, UndefBad);
+ ConstExprValue *const_val = ir_resolve_const(ira, instruction, LazyOkNoUndef);
+ if (const_val == nullptr)
+ return false;
+
+ if (const_val->special == ConstValSpecialLazy) {
+ switch (const_val->data.x_lazy->id) {
+ case LazyValueIdAlignOf: {
+ // This is guaranteed to fit into a u29
+ if (other_type->id == ZigTypeIdComptimeInt)
+ return true;
+ size_t align_bits = get_align_amt_type(ira->codegen)->data.integral.bit_count;
+ if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
+ other_type->data.integral.bit_count >= align_bits)
+ {
+ return true;
+ }
+ break;
+ }
+ case LazyValueIdSizeOf: {
+ // This is guaranteed to fit into a usize
+ if (other_type->id == ZigTypeIdComptimeInt)
+ return true;
+ size_t usize_bits = ira->codegen->builtin_types.entry_usize->data.integral.bit_count;
+ if (other_type->id == ZigTypeIdInt && !other_type->data.integral.is_signed &&
+ other_type->data.integral.bit_count >= usize_bits)
+ {
+ return true;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ const_val = ir_resolve_const(ira, instruction, UndefBad);
if (const_val == nullptr)
return false;
@@ -10262,7 +10297,7 @@ static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_
memcpy(dest, src, sizeof(ConstExprValue));
if (!same_global_refs) {
dest->global_refs = global_refs;
- if (src->special == ConstValSpecialUndef)
+ if (src->special != ConstValSpecialStatic)
return;
if (dest->type->id == ZigTypeIdStruct) {
dest->data.x_struct.fields = create_const_vals(dest->type->data.structure.src_field_count);
@@ -11213,7 +11248,7 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
return ira->codegen->invalid_instruction;
if (instr_is_comptime(value)) {
- ConstExprValue *val = ir_resolve_const(ira, value, UndefOk);
+ ConstExprValue *val = ir_resolve_const(ira, value, LazyOk);
if (!val)
return ira->codegen->invalid_instruction;
return ir_get_const_ptr(ira, source_instruction, val, value->value.type,
@@ -12125,7 +12160,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
if (wanted_type->id == ZigTypeIdComptimeInt || wanted_type->id == ZigTypeIdInt) {
IrInstruction *result = ir_const(ira, source_instr, wanted_type);
if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdInt) {
- bigint_init_bigint(&result->value.data.x_bigint, &value->value.data.x_bigint);
+ copy_const_val(&result->value, &value->value, false);
+ result->value.type = wanted_type;
} else {
float_init_bigint(&result->value.data.x_bigint, &value->value);
}
@@ -15301,7 +15337,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
}
}
- bool comptime_arg = param_decl_node->data.param_decl.is_inline ||
+ bool comptime_arg = param_decl_node->data.param_decl.is_comptime ||
casted_arg->value.type->id == ZigTypeIdComptimeInt || casted_arg->value.type->id == ZigTypeIdComptimeFloat;
ConstExprValue *arg_val;
@@ -17594,6 +17630,11 @@ static IrInstruction *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstruc
ConstExprValue *child_val = const_ptr_pointee(ira, ira->codegen, container_ptr_val, source_node);
if (child_val == nullptr)
return ira->codegen->invalid_instruction;
+ if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec,
+ field_ptr_instruction->base.source_node, child_val, UndefBad)))
+ {
+ return ira->codegen->invalid_instruction;
+ }
ZigType *child_type = child_val->data.x_type;
if (type_is_invalid(child_type)) {
@@ -21293,8 +21334,10 @@ static IrInstruction *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstru
src_ptr_align = get_abi_alignment(ira->codegen, target->value.type);
}
- if ((err = type_resolve(ira->codegen, dest_child_type, ResolveStatusSizeKnown)))
- return ira->codegen->invalid_instruction;
+ if (src_ptr_align != 0) {
+ if ((err = type_resolve(ira->codegen, dest_child_type, ResolveStatusAlignmentKnown)))
+ return ira->codegen->invalid_instruction;
+ }
ZigType *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_child_type,
src_ptr_const, src_ptr_volatile, PtrLenUnknown,
@@ -21337,6 +21380,8 @@ static IrInstruction *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstru
}
if (have_known_len) {
+ if ((err = type_resolve(ira->codegen, dest_child_type, ResolveStatusSizeKnown)))
+ return ira->codegen->invalid_instruction;
uint64_t child_type_size = type_size(ira->codegen, dest_child_type);
uint64_t remainder = known_len % child_type_size;
if (remainder != 0) {
@@ -23963,15 +24008,23 @@ static IrInstruction *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruct
}
static IrInstruction *ir_analyze_instruction_align_cast(IrAnalyze *ira, IrInstructionAlignCast *instruction) {
- uint32_t align_bytes;
- IrInstruction *align_bytes_inst = instruction->align_bytes->child;
- if (!ir_resolve_align(ira, align_bytes_inst, nullptr, &align_bytes))
- return ira->codegen->invalid_instruction;
-
IrInstruction *target = instruction->target->child;
if (type_is_invalid(target->value.type))
return ira->codegen->invalid_instruction;
+ ZigType *elem_type = nullptr;
+ if (is_slice(target->value.type)) {
+ ZigType *slice_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry;
+ elem_type = slice_ptr_type->data.pointer.child_type;
+ } else if (target->value.type->id == ZigTypeIdPointer) {
+ elem_type = target->value.type->data.pointer.child_type;
+ }
+
+ uint32_t align_bytes;
+ IrInstruction *align_bytes_inst = instruction->align_bytes->child;
+ if (!ir_resolve_align(ira, align_bytes_inst, elem_type, &align_bytes))
+ return ira->codegen->invalid_instruction;
+
IrInstruction *result = ir_align_cast(ira, target, align_bytes, true);
if (type_is_invalid(result->value.type))
return ira->codegen->invalid_instruction;
@@ -25644,7 +25697,7 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) {
}
val->special = ConstValSpecialStatic;
- assert(val->type->id == ZigTypeIdComptimeInt);
+ assert(val->type->id == ZigTypeIdComptimeInt || val->type->id == ZigTypeIdInt);
bigint_init_unsigned(&val->data.x_bigint, align_in_bytes);
return ErrorNone;
}
@@ -25699,7 +25752,7 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) {
}
val->special = ConstValSpecialStatic;
- assert(val->type->id == ZigTypeIdComptimeInt);
+ assert(val->type->id == ZigTypeIdComptimeInt || val->type->id == ZigTypeIdInt);
bigint_init_unsigned(&val->data.x_bigint, abi_size);
return ErrorNone;
}
@@ -25885,7 +25938,7 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) {
Error ir_resolve_lazy(CodeGen *codegen, AstNode *source_node, ConstExprValue *val) {
Error err;
if ((err = ir_resolve_lazy_raw(source_node, val))) {
- if (codegen->trace_err != nullptr && !source_node->already_traced_this_node) {
+ if (codegen->trace_err != nullptr && source_node != nullptr && !source_node->already_traced_this_node) {
source_node->already_traced_this_node = true;
codegen->trace_err = add_error_note(codegen, codegen->trace_err, source_node,
buf_create_from_str("referenced here"));
diff --git a/src/parser.cpp b/src/parser.cpp
index 6cd6c2f045..21bbc4d246 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -2075,7 +2075,7 @@ static AstNode *ast_parse_param_decl(ParseContext *pc) {
res->column = first->start_column;
res->data.param_decl.name = token_buf(name);
res->data.param_decl.is_noalias = first->id == TokenIdKeywordNoAlias;
- res->data.param_decl.is_inline = first->id == TokenIdKeywordCompTime;
+ res->data.param_decl.is_comptime = first->id == TokenIdKeywordCompTime;
return res;
}
diff --git a/std/mem.zig b/std/mem.zig
index 014be487cc..61dc5c7a30 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -117,7 +117,15 @@ pub const Allocator = struct {
const byte_slice = try self.reallocFn(self, ([*]u8)(undefined)[0..0], undefined, byte_count, a);
assert(byte_slice.len == byte_count);
@memset(byte_slice.ptr, undefined, byte_slice.len);
- return @bytesToSlice(T, @alignCast(a, byte_slice));
+ if (alignment == null) {
+ // TODO This is a workaround for zig not being able to successfully do
+ // @bytesToSlice(T, @alignCast(a, byte_slice)) without resolving alignment of T,
+ // which causes a circular dependency in async functions which try to heap-allocate
+ // their own frame with @Frame(func).
+ return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
+ } else {
+ return @bytesToSlice(T, @alignCast(a, byte_slice));
+ }
}
/// This function requests a new byte size for an existing allocation,
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 91916e6f38..a9e99f4799 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1051,6 +1051,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\const Foo = struct {};
\\export fn a() void {
\\ const T = [*c]Foo;
+ \\ var t: T = undefined;
\\}
,
"tmp.zig:3:19: error: C pointers cannot point to non-C-ABI-compatible type 'Foo'",
@@ -2290,6 +2291,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"error union operator with non error set LHS",
\\comptime {
\\ const z = i32!i32;
+ \\ var x: z = undefined;
\\}
,
"tmp.zig:2:15: error: expected error set type, found type 'i32'",
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
index dfed1c4ab7..76a2780737 100644
--- a/test/stage1/behavior/async_fn.zig
+++ b/test/stage1/behavior/async_fn.zig
@@ -854,3 +854,68 @@ test "await does not force async if callee is blocking" {
var x = async S.simple();
expect(await x == 1234);
}
+
+test "recursive async function" {
+ expect(recursiveAsyncFunctionTest(false).doTheTest() == 55);
+ expect(recursiveAsyncFunctionTest(true).doTheTest() == 55);
+}
+
+fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
+ return struct {
+ fn fib(allocator: *std.mem.Allocator, x: u32) error{OutOfMemory}!u32 {
+ if (x <= 1) return x;
+
+ if (suspending_implementation) {
+ suspend {
+ resume @frame();
+ }
+ }
+
+ const f1 = try allocator.create(@Frame(fib));
+ defer allocator.destroy(f1);
+
+ const f2 = try allocator.create(@Frame(fib));
+ defer allocator.destroy(f2);
+
+ f1.* = async fib(allocator, x - 1);
+ var f1_awaited = false;
+ errdefer if (!f1_awaited) {
+ _ = await f1;
+ };
+
+ f2.* = async fib(allocator, x - 2);
+ var f2_awaited = false;
+ errdefer if (!f2_awaited) {
+ _ = await f2;
+ };
+
+ var sum: u32 = 0;
+
+ f1_awaited = true;
+ const result_f1 = await f1; // TODO https://github.com/ziglang/zig/issues/3077
+ sum += try result_f1;
+
+ f2_awaited = true;
+ const result_f2 = await f2; // TODO https://github.com/ziglang/zig/issues/3077
+ sum += try result_f2;
+
+ return sum;
+ }
+
+ fn doTheTest() u32 {
+ if (suspending_implementation) {
+ var result: u32 = undefined;
+ _ = async amain(&result);
+ return result;
+ } else {
+ return fib(std.heap.direct_allocator, 10) catch unreachable;
+ }
+ }
+
+ fn amain(result: *u32) void {
+ var x = async fib(std.heap.direct_allocator, 10);
+ const res = await x; // TODO https://github.com/ziglang/zig/issues/3077
+ result.* = res catch unreachable;
+ }
+ };
+}
--
cgit v1.2.3
From a2230639232c069e4052a2e994dd5c0bd4e2517f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 31 Aug 2019 10:38:18 -0400
Subject: `@typeOf` now guarantees no runtime side effects
related: #1627
---
doc/langref.html.in | 16 ++++++++++++++++
src/all_types.hpp | 8 ++++++++
src/analyze.cpp | 22 ++++++++++++++++++++++
src/analyze.hpp | 2 ++
src/codegen.cpp | 7 +++++++
src/ir.cpp | 10 +++++++++-
test/stage1/behavior/sizeof_and_typeof.zig | 26 ++++++++++++++++++++++++++
7 files changed, 90 insertions(+), 1 deletion(-)
(limited to 'src/analyze.cpp')
diff --git a/doc/langref.html.in b/doc/langref.html.in
index c1fe08ddb6..44b2256813 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -8114,7 +8114,23 @@ pub const TypeInfo = union(TypeId) {
This function returns a compile-time constant, which is the type of the
expression passed as an argument. The expression is evaluated.
+ {#syntax#}@typeOf{#endsyntax#} guarantees no run-time side-effects within the expression:
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "no runtime side effects" {
+ var data: i32 = 0;
+ const T = @typeOf(foo(i32, &data));
+ comptime assert(T == i32);
+ assert(data == 0);
+}
+fn foo(comptime T: type, ptr: *T) T {
+ ptr.* += 1;
+ return ptr.*;
+}
+ {#code_end#}
{#header_close#}
{#header_open|@unionInit#}
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 87756d338f..aee6d3994f 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2104,6 +2104,7 @@ enum ScopeId {
ScopeIdFnDef,
ScopeIdCompTime,
ScopeIdRuntime,
+ ScopeIdTypeOf,
};
struct Scope {
@@ -2244,6 +2245,13 @@ struct ScopeFnDef {
ZigFn *fn_entry;
};
+// This scope is created for a @typeOf.
+// All runtime side-effects are elided within it.
+// NodeTypeFnCallExpr
+struct ScopeTypeOf {
+ Scope base;
+};
+
// synchronized with code in define_builtin_compile_vars
enum AtomicOrder {
AtomicOrderUnordered,
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 43c8d499db..df5b27784a 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -197,6 +197,12 @@ Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
return &scope->base;
}
+Scope *create_typeof_scope(CodeGen *g, AstNode *node, Scope *parent) {
+ ScopeTypeOf *scope = allocate(1);
+ init_scope(g, &scope->base, ScopeIdTypeOf, node, parent);
+ return &scope->base;
+}
+
ZigType *get_scope_import(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDecls) {
@@ -209,6 +215,22 @@ ZigType *get_scope_import(Scope *scope) {
zig_unreachable();
}
+ScopeTypeOf *get_scope_typeof(Scope *scope) {
+ while (scope) {
+ switch (scope->id) {
+ case ScopeIdTypeOf:
+ return reinterpret_cast(scope);
+ case ScopeIdFnDef:
+ case ScopeIdDecls:
+ return nullptr;
+ default:
+ scope = scope->parent;
+ continue;
+ }
+ }
+ zig_unreachable();
+}
+
static ZigType *new_container_type_entry(CodeGen *g, ZigTypeId id, AstNode *source_node, Scope *parent_scope,
Buf *bare_name)
{
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 6e8897bf82..dc702167e7 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -85,6 +85,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node);
ZigFn *scope_fn_entry(Scope *scope);
ZigPackage *scope_package(Scope *scope);
ZigType *get_scope_import(Scope *scope);
+ScopeTypeOf *get_scope_typeof(Scope *scope);
void init_tld(Tld *tld, TldId id, Buf *name, VisibMod visib_mod, AstNode *source_node, Scope *parent_scope);
ZigVar *add_variable(CodeGen *g, AstNode *source_node, Scope *parent_scope, Buf *name,
bool is_const, ConstExprValue *init_value, Tld *src_tld, ZigType *var_type);
@@ -112,6 +113,7 @@ ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent);
ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry);
Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent);
Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstruction *is_comptime);
+Scope *create_typeof_scope(CodeGen *g, AstNode *node, Scope *parent);
void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str);
ConstExprValue *create_const_str_lit(CodeGen *g, Buf *str);
diff --git a/src/codegen.cpp b/src/codegen.cpp
index d87b5d0aeb..33713a9b30 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -645,6 +645,7 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdRuntime:
+ case ScopeIdTypeOf:
return get_di_scope(g, scope->parent);
}
zig_unreachable();
@@ -3757,6 +3758,7 @@ static void render_async_var_decls(CodeGen *g, Scope *scope) {
case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdRuntime:
+ case ScopeIdTypeOf:
scope = scope->parent;
continue;
}
@@ -5942,12 +5944,17 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) {
for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(block_i);
+ if (get_scope_typeof(current_block->scope) != nullptr) {
+ LLVMBuildBr(g->builder, current_block->llvm_block);
+ }
assert(current_block->llvm_block);
LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block);
for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) {
IrInstruction *instruction = current_block->instruction_list.at(instr_i);
if (instruction->ref_count == 0 && !ir_has_side_effects(instruction))
continue;
+ if (get_scope_typeof(instruction->scope) != nullptr)
+ continue;
if (!g->strip_debug_symbols) {
set_debug_location(g, instruction);
diff --git a/src/ir.cpp b/src/ir.cpp
index 02a134c62e..ad81b27a93 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -3344,6 +3344,7 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco
case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdRuntime:
+ case ScopeIdTypeOf:
scope = scope->parent;
continue;
case ScopeIdDeferExpr:
@@ -3399,6 +3400,7 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o
case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdRuntime:
+ case ScopeIdTypeOf:
scope = scope->parent;
continue;
case ScopeIdDeferExpr:
@@ -4379,8 +4381,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
zig_unreachable();
case BuiltinFnIdTypeof:
{
+ Scope *sub_scope = create_typeof_scope(irb->codegen, node, scope);
+
AstNode *arg_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
+ IrInstruction *arg = ir_gen_node(irb, arg_node, sub_scope);
if (arg == irb->codegen->invalid_instruction)
return arg;
@@ -8269,6 +8273,10 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec
break;
}
}
+ if (get_scope_typeof(instruction->scope) != nullptr) {
+ // doesn't count, it's inside a @typeOf()
+ continue;
+ }
exec_add_error_node(codegen, exec, instruction->source_node,
buf_sprintf("unable to evaluate constant expression"));
return &codegen->invalid_instruction->value;
diff --git a/test/stage1/behavior/sizeof_and_typeof.zig b/test/stage1/behavior/sizeof_and_typeof.zig
index da79c3a270..6f57bfedd5 100644
--- a/test/stage1/behavior/sizeof_and_typeof.zig
+++ b/test/stage1/behavior/sizeof_and_typeof.zig
@@ -89,3 +89,29 @@ test "@sizeOf(T) == 0 doesn't force resolving struct size" {
expect(@sizeOf(S.Foo) == 4);
expect(@sizeOf(S.Bar) == 8);
}
+
+test "@typeOf() has no runtime side effects" {
+ const S = struct {
+ fn foo(comptime T: type, ptr: *T) T {
+ ptr.* += 1;
+ return ptr.*;
+ }
+ };
+ var data: i32 = 0;
+ const T = @typeOf(S.foo(i32, &data));
+ comptime expect(T == i32);
+ expect(data == 0);
+}
+
+test "branching logic inside @typeOf" {
+ const S = struct {
+ var data: i32 = 0;
+ fn foo() anyerror!i32 {
+ data += 1;
+ return undefined;
+ }
+ };
+ const T = @typeOf(S.foo() catch undefined);
+ comptime expect(T == i32);
+ expect(S.data == 0);
+}
--
cgit v1.2.3
From 5c3a9a1a3eef82ffad17bc295da05ecccd9006a5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 31 Aug 2019 18:50:16 -0400
Subject: improvements to `@asyncCall`
* `await @asyncCall` generates better code. See #3065
* `@asyncCall` works with a real `@Frame(func)` in addition to
a byte slice. Closes #3072
* `@asyncCall` allows passing `{}` (a void value) as the result
pointer, which uses the result location inside the frame.
Closes #3068
* support `await @asyncCall` on a non-async function. This is in
preparation for safe recursion (#1006).
---
src/all_types.hpp | 2 +
src/analyze.cpp | 4 +
src/codegen.cpp | 61 +++++++----
src/ir.cpp | 225 ++++++++++++++++++++++++--------------
test/compile_errors.zig | 16 +++
test/stage1/behavior/async_fn.zig | 105 +++++++++++++++++-
6 files changed, 308 insertions(+), 105 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/all_types.hpp b/src/all_types.hpp
index aee6d3994f..d9e1dc44ca 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2719,6 +2719,7 @@ struct IrInstructionCallSrc {
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
+ bool is_async_call_builtin;
bool is_comptime;
};
@@ -2735,6 +2736,7 @@ struct IrInstructionCallGen {
IrInstruction *new_stack;
FnInline fn_inline;
bool is_async;
+ bool is_async_call_builtin;
};
struct IrInstructionConst {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index df5b27784a..dfdf06aa5a 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5727,6 +5727,10 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
for (size_t i = 0; i < fn->call_list.length; i += 1) {
IrInstructionCallGen *call = fn->call_list.at(i);
+ if (call->new_stack != nullptr) {
+ // don't need to allocate a frame for this
+ continue;
+ }
ZigFn *callee = call->fn_entry;
if (callee == nullptr) {
add_node_error(g, call->base.source_node,
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 33713a9b30..890724d950 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -3826,17 +3826,18 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef awaiter_init_val;
LLVMValueRef ret_ptr;
if (callee_is_async) {
- if (instruction->is_async) {
- if (instruction->new_stack == nullptr) {
- awaiter_init_val = zero;
+ if (instruction->new_stack == nullptr) {
+ if (instruction->is_async) {
frame_result_loc = result_loc;
-
- if (ret_has_bits) {
- // Use the result location which is inside the frame if this is an async call.
- ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
- }
- } else if (cc == CallingConventionAsync) {
- awaiter_init_val = zero;
+ } else {
+ frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
+ }
+ } else {
+ if (instruction->new_stack->value.type->id == ZigTypeIdPointer &&
+ instruction->new_stack->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ frame_result_loc = ir_llvm_value(g, instruction->new_stack);
+ } else {
LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef given_len_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_len_index, "");
@@ -3856,15 +3857,37 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
- frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
- get_llvm_type(g, instruction->base.value.type), "");
+ if (instruction->fn_entry == nullptr) {
+ ZigType *anyframe_type = get_any_frame_type(g, src_return_type);
+ frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, get_llvm_type(g, anyframe_type), "");
+ } else {
+ ZigType *ptr_frame_type = get_pointer_to_type(g,
+ get_fn_frame_type(g, instruction->fn_entry), false);
+ frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
+ get_llvm_type(g, ptr_frame_type), "");
+ }
+ }
+ }
+ if (instruction->is_async) {
+ if (instruction->new_stack == nullptr) {
+ awaiter_init_val = zero;
if (ret_has_bits) {
- // Use the result location provided to the @asyncCall builtin
- ret_ptr = result_loc;
+ // Use the result location which is inside the frame if this is an async call.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
}
} else {
- zig_unreachable();
+ awaiter_init_val = zero;
+
+ if (ret_has_bits) {
+ if (result_loc != nullptr) {
+ // Use the result location provided to the @asyncCall builtin
+ ret_ptr = result_loc;
+ } else {
+ // no result location provided to @asyncCall - use the one inside the frame.
+ ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
+ }
+ }
}
// even if prefix_arg_err_ret_stack is true, let the async function do its own
@@ -3872,7 +3895,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
} else {
// async function called as a normal function
- frame_result_loc = ir_llvm_value(g, instruction->frame_result_loc);
awaiter_init_val = LLVMBuildPtrToInt(g->builder, g->cur_frame_ptr, usize_type_ref, ""); // caller's own frame pointer
if (ret_has_bits) {
if (result_loc == nullptr) {
@@ -3988,7 +4010,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
LLVMValueRef casted_frame;
- if (instruction->new_stack != nullptr) {
+ if (instruction->new_stack != nullptr && instruction->fn_entry == nullptr) {
// We need the frame type to be a pointer to a struct that includes the args
size_t field_count = arg_start_i + gen_param_values.length;
LLVMTypeRef *field_types = allocate_nonzero(field_count);
@@ -4014,7 +4036,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
if (instruction->is_async) {
gen_resume(g, fn_val, frame_result_loc, ResumeIdCall);
if (instruction->new_stack != nullptr) {
- return frame_result_loc;
+ return LLVMBuildBitCast(g->builder, frame_result_loc,
+ get_llvm_type(g, instruction->base.value.type), "");
}
return nullptr;
} else {
@@ -4041,7 +4064,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
}
- if (instruction->new_stack == nullptr) {
+ if (instruction->new_stack == nullptr || instruction->is_async_call_builtin) {
result = ZigLLVMBuildCall(g->builder, fn_val,
gen_param_values.items, (unsigned)gen_param_values.length, llvm_cc, fn_inline, "");
} else if (instruction->is_async) {
diff --git a/src/ir.cpp b/src/ir.cpp
index ad81b27a93..b8a81ba5c9 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1382,7 +1382,7 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async,
+ bool is_comptime, FnInline fn_inline, bool is_async, bool is_async_call_builtin,
IrInstruction *new_stack, ResultLoc *result_loc)
{
IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node);
@@ -1393,6 +1393,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
+ call_instruction->is_async_call_builtin = is_async_call_builtin;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
@@ -1410,7 +1411,7 @@ static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *s
static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction,
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- FnInline fn_inline, bool is_async, IrInstruction *new_stack,
+ FnInline fn_inline, bool is_async, IrInstruction *new_stack, bool is_async_call_builtin,
IrInstruction *result_loc, ZigType *return_type)
{
IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb,
@@ -1422,6 +1423,7 @@ static IrInstructionCallGen *ir_build_call_gen(IrAnalyze *ira, IrInstruction *so
call_instruction->args = args;
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
+ call_instruction->is_async_call_builtin = is_async_call_builtin;
call_instruction->new_stack = new_stack;
call_instruction->result_loc = result_loc;
@@ -4351,6 +4353,54 @@ static IrInstruction *ir_gen_this(IrBuilder *irb, Scope *orig_scope, AstNode *no
zig_unreachable();
}
+static IrInstruction *ir_gen_async_call(IrBuilder *irb, Scope *scope, AstNode *await_node, AstNode *call_node,
+ LVal lval, ResultLoc *result_loc)
+{
+ size_t arg_offset = 3;
+ if (call_node->data.fn_call_expr.params.length < arg_offset) {
+ add_node_error(irb->codegen, call_node,
+ buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
+ arg_offset, call_node->data.fn_call_expr.params.length));
+ return irb->codegen->invalid_instruction;
+ }
+
+ AstNode *bytes_node = call_node->data.fn_call_expr.params.at(0);
+ IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope);
+ if (bytes == irb->codegen->invalid_instruction)
+ return bytes;
+
+ AstNode *ret_ptr_node = call_node->data.fn_call_expr.params.at(1);
+ IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
+ if (ret_ptr == irb->codegen->invalid_instruction)
+ return ret_ptr;
+
+ AstNode *fn_ref_node = call_node->data.fn_call_expr.params.at(2);
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ size_t arg_count = call_node->data.fn_call_expr.params.length - arg_offset;
+
+ // last "arg" is return pointer
+ IrInstruction **args = allocate(arg_count + 1);
+
+ for (size_t i = 0; i < arg_count; i += 1) {
+ AstNode *arg_node = call_node->data.fn_call_expr.params.at(i + arg_offset);
+ IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
+ if (arg == irb->codegen->invalid_instruction)
+ return arg;
+ args[i] = arg;
+ }
+
+ args[arg_count] = ret_ptr;
+
+ bool is_async = await_node == nullptr;
+ bool is_async_call_builtin = true;
+ IrInstruction *call = ir_build_call_src(irb, scope, call_node, nullptr, fn_ref, arg_count, args, false,
+ FnInlineAuto, is_async, is_async_call_builtin, bytes, result_loc);
+ return ir_lval_wrap(irb, scope, call, lval, result_loc);
+}
+
static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
ResultLoc *result_loc)
{
@@ -4360,7 +4410,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
Buf *name = fn_ref_expr->data.symbol_expr.symbol;
auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
- if (!entry) { // new built in not found
+ if (!entry) {
add_node_error(irb->codegen, node,
buf_sprintf("invalid builtin function: '%s'", buf_ptr(name)));
return irb->codegen->invalid_instruction;
@@ -5224,7 +5274,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- fn_inline, false, nullptr, result_loc);
+ fn_inline, false, false, nullptr, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdNewStackCall:
@@ -5257,53 +5307,11 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, false, new_stack, result_loc);
+ FnInlineAuto, false, false, new_stack, result_loc);
return ir_lval_wrap(irb, scope, call, lval, result_loc);
}
case BuiltinFnIdAsyncCall:
- {
- size_t arg_offset = 3;
- if (node->data.fn_call_expr.params.length < arg_offset) {
- add_node_error(irb->codegen, node,
- buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
- arg_offset, node->data.fn_call_expr.params.length));
- return irb->codegen->invalid_instruction;
- }
-
- AstNode *bytes_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *bytes = ir_gen_node(irb, bytes_node, scope);
- if (bytes == irb->codegen->invalid_instruction)
- return bytes;
-
- AstNode *ret_ptr_node = node->data.fn_call_expr.params.at(1);
- IrInstruction *ret_ptr = ir_gen_node(irb, ret_ptr_node, scope);
- if (ret_ptr == irb->codegen->invalid_instruction)
- return ret_ptr;
-
- AstNode *fn_ref_node = node->data.fn_call_expr.params.at(2);
- IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
- if (fn_ref == irb->codegen->invalid_instruction)
- return fn_ref;
-
- size_t arg_count = node->data.fn_call_expr.params.length - arg_offset;
-
- // last "arg" is return pointer
- IrInstruction **args = allocate(arg_count + 1);
-
- for (size_t i = 0; i < arg_count; i += 1) {
- AstNode *arg_node = node->data.fn_call_expr.params.at(i + arg_offset);
- IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
- if (arg == irb->codegen->invalid_instruction)
- return arg;
- args[i] = arg;
- }
-
- args[arg_count] = ret_ptr;
-
- IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, true, bytes, result_loc);
- return ir_lval_wrap(irb, scope, call, lval, result_loc);
- }
+ return ir_gen_async_call(irb, scope, nullptr, node, lval, result_loc);
case BuiltinFnIdTypeId:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -5607,7 +5615,7 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
bool is_async = node->data.fn_call_expr.is_async;
IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false,
- FnInlineAuto, is_async, nullptr, result_loc);
+ FnInlineAuto, is_async, false, nullptr, result_loc);
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
}
@@ -7900,6 +7908,19 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
{
assert(node->type == NodeTypeAwaitExpr);
+ AstNode *expr_node = node->data.await_expr.expr;
+ if (expr_node->type == NodeTypeFnCallExpr && expr_node->data.fn_call_expr.is_builtin) {
+ AstNode *fn_ref_expr = expr_node->data.fn_call_expr.fn_ref_expr;
+ Buf *name = fn_ref_expr->data.symbol_expr.symbol;
+ auto entry = irb->codegen->builtin_fn_table.maybe_get(name);
+ if (entry != nullptr) {
+ BuiltinFnEntry *builtin_fn = entry->value;
+ if (builtin_fn->id == BuiltinFnIdAsyncCall) {
+ return ir_gen_async_call(irb, scope, node, expr_node, lval, result_loc);
+ }
+ }
+ }
+
ZigFn *fn_entry = exec_fn_entry(irb->exec);
if (!fn_entry) {
add_node_error(irb->codegen, node, buf_sprintf("await outside function definition"));
@@ -7915,7 +7936,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n
return irb->codegen->invalid_instruction;
}
- IrInstruction *target_inst = ir_gen_node_extra(irb, node->data.await_expr.expr, scope, LValPtr, nullptr);
+ IrInstruction *target_inst = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -15244,44 +15265,61 @@ static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInst
return ir_const_void(ira, &instruction->base);
}
+static IrInstruction *get_async_call_result_loc(IrAnalyze *ira, IrInstructionCallSrc *call_instruction,
+ ZigType *fn_ret_type)
+{
+ ir_assert(call_instruction->is_async_call_builtin, &call_instruction->base);
+ IrInstruction *ret_ptr_uncasted = call_instruction->args[call_instruction->arg_count]->child;
+ if (type_is_invalid(ret_ptr_uncasted->value.type))
+ return ira->codegen->invalid_instruction;
+ if (ret_ptr_uncasted->value.type->id == ZigTypeIdVoid) {
+ // Result location will be inside the async frame.
+ return nullptr;
+ }
+ return ir_implicit_cast(ira, ret_ptr_uncasted, get_pointer_to_type(ira->codegen, fn_ret_type, false));
+}
+
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry,
ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count,
IrInstruction *casted_new_stack)
{
- if (casted_new_stack != nullptr) {
- // this is an @asyncCall
-
+ if (fn_entry == nullptr) {
if (fn_type->data.fn.fn_type_id.cc != CallingConventionAsync) {
ir_add_error(ira, fn_ref,
buf_sprintf("expected async function, found '%s'", buf_ptr(&fn_type->name)));
return ira->codegen->invalid_instruction;
}
-
- IrInstruction *ret_ptr = call_instruction->args[call_instruction->arg_count]->child;
- if (type_is_invalid(ret_ptr->value.type))
+ if (casted_new_stack == nullptr) {
+ ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
+ return ira->codegen->invalid_instruction;
+ }
+ }
+ if (casted_new_stack != nullptr) {
+ ZigType *fn_ret_type = fn_type->data.fn.fn_type_id.return_type;
+ IrInstruction *ret_ptr = get_async_call_result_loc(ira, call_instruction, fn_ret_type);
+ if (ret_ptr != nullptr && type_is_invalid(ret_ptr->value.type))
return ira->codegen->invalid_instruction;
- ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_type->data.fn.fn_type_id.return_type);
+ ZigType *anyframe_type = get_any_frame_type(ira->codegen, fn_ret_type);
- IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, nullptr, fn_ref,
- arg_count, casted_args, FnInlineAuto, true, casted_new_stack, ret_ptr, anyframe_type);
+ IrInstructionCallGen *call_gen = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
+ arg_count, casted_args, FnInlineAuto, true, casted_new_stack,
+ call_instruction->is_async_call_builtin, ret_ptr, anyframe_type);
return &call_gen->base;
- } else if (fn_entry == nullptr) {
- ir_add_error(ira, fn_ref, buf_sprintf("function is not comptime-known; @asyncCall required"));
- return ira->codegen->invalid_instruction;
- }
-
- ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
- IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
- frame_type, nullptr, true, true, false);
- if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
- return result_loc;
+ } else {
+ ZigType *frame_type = get_fn_frame_type(ira->codegen, fn_entry);
+ IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
+ frame_type, nullptr, true, true, false);
+ if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) {
+ return result_loc;
+ }
+ result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
+ if (type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
+ return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
+ casted_args, FnInlineAuto, true, casted_new_stack, call_instruction->is_async_call_builtin,
+ result_loc, frame_type)->base;
}
- result_loc = ir_implicit_cast(ira, result_loc, get_pointer_to_type(ira->codegen, frame_type, false));
- if (type_is_invalid(result_loc->value.type))
- return ira->codegen->invalid_instruction;
- return &ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count,
- casted_args, FnInlineAuto, true, nullptr, result_loc, frame_type)->base;
}
static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node,
IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i)
@@ -15790,16 +15828,27 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstruction *casted_new_stack = nullptr;
if (call_instruction->new_stack != nullptr) {
- ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
- false, false, PtrLenUnknown, target_fn_align(ira->codegen->zig_target), 0, 0, false);
- ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
IrInstruction *new_stack = call_instruction->new_stack->child;
if (type_is_invalid(new_stack->value.type))
return ira->codegen->invalid_instruction;
- casted_new_stack = ir_implicit_cast(ira, new_stack, u8_slice);
- if (type_is_invalid(casted_new_stack->value.type))
- return ira->codegen->invalid_instruction;
+ if (call_instruction->is_async_call_builtin &&
+ fn_entry != nullptr && new_stack->value.type->id == ZigTypeIdPointer &&
+ new_stack->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
+ {
+ ZigType *needed_frame_type = get_pointer_to_type(ira->codegen,
+ get_fn_frame_type(ira->codegen, fn_entry), false);
+ casted_new_stack = ir_implicit_cast(ira, new_stack, needed_frame_type);
+ if (type_is_invalid(casted_new_stack->value.type))
+ return ira->codegen->invalid_instruction;
+ } else {
+ ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ false, false, PtrLenUnknown, target_fn_align(ira->codegen->zig_target), 0, 0, false);
+ ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
+ casted_new_stack = ir_implicit_cast(ira, new_stack, u8_slice);
+ if (type_is_invalid(casted_new_stack->value.type))
+ return ira->codegen->invalid_instruction;
+ }
}
if (fn_type->data.fn.is_generic) {
@@ -16010,7 +16059,11 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
FnTypeId *impl_fn_type_id = &impl_fn->type_entry->data.fn.fn_type_id;
IrInstruction *result_loc;
- if (handle_is_ptr(impl_fn_type_id->return_type)) {
+ if (call_instruction->is_async_call_builtin) {
+ result_loc = get_async_call_result_loc(ira, call_instruction, impl_fn_type_id->return_type);
+ if (result_loc != nullptr && type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
+ } else if (handle_is_ptr(impl_fn_type_id->return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
impl_fn_type_id->return_type, nullptr, true, true, false);
if (result_loc != nullptr) {
@@ -16044,7 +16097,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
- false, casted_new_stack, result_loc,
+ false, casted_new_stack, call_instruction->is_async_call_builtin, result_loc,
impl_fn_type_id->return_type);
parent_fn_entry->call_list.append(new_call_instruction);
@@ -16167,7 +16220,11 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
}
IrInstruction *result_loc;
- if (handle_is_ptr(return_type)) {
+ if (call_instruction->is_async_call_builtin) {
+ result_loc = get_async_call_result_loc(ira, call_instruction, return_type);
+ if (result_loc != nullptr && type_is_invalid(result_loc->value.type))
+ return ira->codegen->invalid_instruction;
+ } else if (handle_is_ptr(return_type)) {
result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc,
return_type, nullptr, true, true, false);
if (result_loc != nullptr) {
@@ -16185,7 +16242,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref,
call_param_count, casted_args, fn_inline, false, casted_new_stack,
- result_loc, return_type);
+ call_instruction->is_async_call_builtin, result_loc, return_type);
parent_fn_entry->call_list.append(new_call_instruction);
return ir_finish_anal(ira, &new_call_instruction->base);
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index a9e99f4799..12f17ec790 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,22 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "wrong type for result ptr to @asyncCall",
+ \\export fn entry() void {
+ \\ _ = async amain();
+ \\}
+ \\fn amain() i32 {
+ \\ var frame: @Frame(foo) = undefined;
+ \\ return await @asyncCall(&frame, false, foo);
+ \\}
+ \\fn foo() i32 {
+ \\ return 1234;
+ \\}
+ ,
+ "tmp.zig:6:37: error: expected type '*i32', found 'bool'",
+ );
+
cases.add(
"struct depends on itself via optional field",
\\const LhsExpr = struct {
diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig
index 76a2780737..28a9ade1b3 100644
--- a/test/stage1/behavior/async_fn.zig
+++ b/test/stage1/behavior/async_fn.zig
@@ -331,8 +331,9 @@ test "async fn with inferred error set" {
fn doTheTest() void {
var frame: [1]@Frame(middle) = undefined;
- var result: anyerror!void = undefined;
- _ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle);
+ var fn_ptr = middle;
+ var result: @typeOf(fn_ptr).ReturnType.ErrorSet!void = undefined;
+ _ = @asyncCall(@sliceToBytes(frame[0..]), &result, fn_ptr);
resume global_frame;
std.testing.expectError(error.Fail, result);
}
@@ -819,6 +820,34 @@ test "struct parameter to async function is copied to the frame" {
}
test "cast fn to async fn when it is inferred to be async" {
+ const S = struct {
+ var frame: anyframe = undefined;
+ var ok = false;
+
+ fn doTheTest() void {
+ var ptr: async fn () i32 = undefined;
+ ptr = func;
+ var buf: [100]u8 align(16) = undefined;
+ var result: i32 = undefined;
+ const f = @asyncCall(&buf, &result, ptr);
+ _ = await f;
+ expect(result == 1234);
+ ok = true;
+ }
+
+ fn func() i32 {
+ suspend {
+ frame = @frame();
+ }
+ return 1234;
+ }
+ };
+ _ = async S.doTheTest();
+ resume S.frame;
+ expect(S.ok);
+}
+
+test "cast fn to async fn when it is inferred to be async, awaited directly" {
const S = struct {
var frame: anyframe = undefined;
var ok = false;
@@ -919,3 +948,75 @@ fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
}
};
}
+
+test "@asyncCall with comptime-known function, but not awaited directly" {
+ const S = struct {
+ var global_frame: anyframe = undefined;
+
+ fn doTheTest() void {
+ var frame: [1]@Frame(middle) = undefined;
+ var result: @typeOf(middle).ReturnType.ErrorSet!void = undefined;
+ _ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle);
+ resume global_frame;
+ std.testing.expectError(error.Fail, result);
+ }
+
+ async fn middle() !void {
+ var f = async middle2();
+ return await f;
+ }
+
+ fn middle2() !void {
+ return failing();
+ }
+
+ fn failing() !void {
+ global_frame = @frame();
+ suspend;
+ return error.Fail;
+ }
+ };
+ S.doTheTest();
+}
+
+test "@asyncCall with actual frame instead of byte buffer" {
+ const S = struct {
+ fn func() i32 {
+ suspend;
+ return 1234;
+ }
+ };
+ var frame: @Frame(S.func) = undefined;
+ var result: i32 = undefined;
+ const ptr = @asyncCall(&frame, &result, S.func);
+ resume ptr;
+ expect(result == 1234);
+}
+
+test "@asyncCall using the result location inside the frame" {
+ const S = struct {
+ async fn simple2(y: *i32) i32 {
+ defer y.* += 2;
+ y.* += 1;
+ suspend;
+ return 1234;
+ }
+ fn getAnswer(f: anyframe->i32, out: *i32) void {
+ var res = await f; // TODO https://github.com/ziglang/zig/issues/3077
+ out.* = res;
+ }
+ };
+ var data: i32 = 1;
+ const Foo = struct {
+ bar: async fn (*i32) i32,
+ };
+ var foo = Foo{ .bar = S.simple2 };
+ var bytes: [64]u8 align(16) = undefined;
+ const f = @asyncCall(&bytes, {}, foo.bar, &data);
+ comptime expect(@typeOf(f) == anyframe->i32);
+ expect(data == 2);
+ resume f;
+ expect(data == 4);
+ _ = async S.getAnswer(f, &data);
+ expect(data == 1234);
+}
--
cgit v1.2.3
From 1f99899408367a16c13806369f94645c2001e68b Mon Sep 17 00:00:00 2001
From: Michael Dusan
Date: Sat, 31 Aug 2019 12:30:26 -0400
Subject: stage1 enhance IR print
- pass2 now prints missing instructions in a trailing fashion
- instruction struct name added to print as column 2
---
src/analyze.cpp | 4 +-
src/ir.cpp | 4 +-
src/ir_print.cpp | 385 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
src/ir_print.hpp | 4 +-
4 files changed, 383 insertions(+), 14 deletions(-)
(limited to 'src/analyze.cpp')
diff --git a/src/analyze.cpp b/src/analyze.cpp
index dfdf06aa5a..9b361baa56 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -4415,7 +4415,7 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) {
if (g->verbose_ir) {
fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn->symbol_name));
- ir_print(g, stderr, &fn->analyzed_executable, 4);
+ ir_print(g, stderr, &fn->analyzed_executable, 4, 2);
fprintf(stderr, "}\n");
}
fn->anal_state = FnAnalStateComplete;
@@ -4449,7 +4449,7 @@ static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry) {
fprintf(stderr, "\n");
ast_render(stderr, fn_table_entry->body_node, 4);
fprintf(stderr, "\n{ // (IR)\n");
- ir_print(g, stderr, &fn_table_entry->ir_executable, 4);
+ ir_print(g, stderr, &fn_table_entry->ir_executable, 4, 1);
fprintf(stderr, "}\n");
}
diff --git a/src/ir.cpp b/src/ir.cpp
index b8a81ba5c9..393b5c52e2 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -10867,7 +10867,7 @@ ConstExprValue *ir_eval_const_value(CodeGen *codegen, Scope *scope, AstNode *nod
fprintf(stderr, "\nSource: ");
ast_render(stderr, node, 4);
fprintf(stderr, "\n{ // (IR)\n");
- ir_print(codegen, stderr, ir_executable, 2);
+ ir_print(codegen, stderr, ir_executable, 2, 1);
fprintf(stderr, "}\n");
}
IrExecutable *analyzed_executable = allocate(1);
@@ -10888,7 +10888,7 @@ ConstExprValue *ir_eval_const_value(CodeGen *codegen, Scope *scope, AstNode *nod
if (codegen->verbose_ir) {
fprintf(stderr, "{ // (analyzed)\n");
- ir_print(codegen, stderr, analyzed_executable, 2);
+ ir_print(codegen, stderr, analyzed_executable, 2, 2);
fprintf(stderr, "}\n");
}
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 7580f19059..6de585ec6f 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -10,27 +10,374 @@
#include "ir_print.hpp"
#include "os.hpp"
+static uint32_t hash_instruction_ptr(IrInstruction* instruction) {
+ return (uint32_t)(uintptr_t)instruction;
+}
+
+static bool instruction_ptr_equal(IrInstruction* a, IrInstruction* b) {
+ return a == b;
+}
+
+using InstructionSet = HashMap;
+using InstructionList = ZigList;
+
struct IrPrint {
+ size_t pass_num;
CodeGen *codegen;
FILE *f;
int indent;
int indent_size;
+
+ // When printing pass 2 instructions referenced var instructions are not
+ // present in the instruction list. Thus we track which instructions
+ // are printed (per executable) and after each pass 2 instruction those
+ // var instructions are rendered in a trailing fashion.
+ InstructionSet printed;
+ InstructionList pending;
};
static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction);
+static const char* ir_instruction_type_str(IrInstruction* instruction) {
+ switch (instruction->id) {
+ case IrInstructionIdInvalid:
+ return "Invalid";
+ case IrInstructionIdDeclVarSrc:
+ return "DeclVarSrc";
+ case IrInstructionIdDeclVarGen:
+ return "DeclVarGen";
+ case IrInstructionIdBr:
+ return "Br";
+ case IrInstructionIdCondBr:
+ return "CondBr";
+ case IrInstructionIdSwitchBr:
+ return "SwitchBr";
+ case IrInstructionIdSwitchVar:
+ return "SwitchVar";
+ case IrInstructionIdSwitchElseVar:
+ return "SwitchElseVar";
+ case IrInstructionIdSwitchTarget:
+ return "SwitchTarget";
+ case IrInstructionIdPhi:
+ return "Phi";
+ case IrInstructionIdUnOp:
+ return "UnOp";
+ case IrInstructionIdBinOp:
+ return "BinOp";
+ case IrInstructionIdLoadPtr:
+ return "LoadPtr";
+ case IrInstructionIdLoadPtrGen:
+ return "LoadPtrGen";
+ case IrInstructionIdStorePtr:
+ return "StorePtr";
+ case IrInstructionIdFieldPtr:
+ return "FieldPtr";
+ case IrInstructionIdStructFieldPtr:
+ return "StructFieldPtr";
+ case IrInstructionIdUnionFieldPtr:
+ return "UnionFieldPtr";
+ case IrInstructionIdElemPtr:
+ return "ElemPtr";
+ case IrInstructionIdVarPtr:
+ return "VarPtr";
+ case IrInstructionIdReturnPtr:
+ return "ReturnPtr";
+ case IrInstructionIdCallSrc:
+ return "CallSrc";
+ case IrInstructionIdCallGen:
+ return "CallGen";
+ case IrInstructionIdConst:
+ return "Const";
+ case IrInstructionIdReturn:
+ return "Return";
+ case IrInstructionIdCast:
+ return "Cast";
+ case IrInstructionIdResizeSlice:
+ return "ResizeSlice";
+ case IrInstructionIdContainerInitList:
+ return "ContainerInitList";
+ case IrInstructionIdContainerInitFields:
+ return "ContainerInitFields";
+ case IrInstructionIdUnreachable:
+ return "Unreachable";
+ case IrInstructionIdTypeOf:
+ return "TypeOf";
+ case IrInstructionIdSetCold:
+ return "SetCold";
+ case IrInstructionIdSetRuntimeSafety:
+ return "SetRuntimeSafety";
+ case IrInstructionIdSetFloatMode:
+ return "SetFloatMode";
+ case IrInstructionIdArrayType:
+ return "ArrayType";
+ case IrInstructionIdAnyFrameType:
+ return "AnyFrameType";
+ case IrInstructionIdSliceType:
+ return "SliceType";
+ case IrInstructionIdGlobalAsm:
+ return "GlobalAsm";
+ case IrInstructionIdAsm:
+ return "Asm";
+ case IrInstructionIdSizeOf:
+ return "SizeOf";
+ case IrInstructionIdTestNonNull:
+ return "TestNonNull";
+ case IrInstructionIdOptionalUnwrapPtr:
+ return "OptionalUnwrapPtr";
+ case IrInstructionIdOptionalWrap:
+ return "OptionalWrap";
+ case IrInstructionIdUnionTag:
+ return "UnionTag";
+ case IrInstructionIdClz:
+ return "Clz";
+ case IrInstructionIdCtz:
+ return "Ctz";
+ case IrInstructionIdPopCount:
+ return "PopCount";
+ case IrInstructionIdBswap:
+ return "Bswap";
+ case IrInstructionIdBitReverse:
+ return "BitReverse";
+ case IrInstructionIdImport:
+ return "Import";
+ case IrInstructionIdCImport:
+ return "CImport";
+ case IrInstructionIdCInclude:
+ return "CInclude";
+ case IrInstructionIdCDefine:
+ return "CDefine";
+ case IrInstructionIdCUndef:
+ return "CUndef";
+ case IrInstructionIdRef:
+ return "Ref";
+ case IrInstructionIdRefGen:
+ return "RefGen";
+ case IrInstructionIdCompileErr:
+ return "CompileErr";
+ case IrInstructionIdCompileLog:
+ return "CompileLog";
+ case IrInstructionIdErrName:
+ return "ErrName";
+ case IrInstructionIdEmbedFile:
+ return "EmbedFile";
+ case IrInstructionIdCmpxchgSrc:
+ return "CmpxchgSrc";
+ case IrInstructionIdCmpxchgGen:
+ return "CmpxchgGen";
+ case IrInstructionIdFence:
+ return "Fence";
+ case IrInstructionIdTruncate:
+ return "Truncate";
+ case IrInstructionIdIntCast:
+ return "IntCast";
+ case IrInstructionIdFloatCast:
+ return "FloatCast";
+ case IrInstructionIdIntToFloat:
+ return "IntToFloat";
+ case IrInstructionIdFloatToInt:
+ return "FloatToInt";
+ case IrInstructionIdBoolToInt:
+ return "BoolToInt";
+ case IrInstructionIdIntType:
+ return "IntType";
+ case IrInstructionIdVectorType:
+ return "VectorType";
+ case IrInstructionIdBoolNot:
+ return "BoolNot";
+ case IrInstructionIdMemset:
+ return "Memset";
+ case IrInstructionIdMemcpy:
+ return "Memcpy";
+ case IrInstructionIdSliceSrc:
+ return "SliceSrc";
+ case IrInstructionIdSliceGen:
+ return "SliceGen";
+ case IrInstructionIdMemberCount:
+ return "MemberCount";
+ case IrInstructionIdMemberType:
+ return "MemberType";
+ case IrInstructionIdMemberName:
+ return "MemberName";
+ case IrInstructionIdBreakpoint:
+ return "Breakpoint";
+ case IrInstructionIdReturnAddress:
+ return "ReturnAddress";
+ case IrInstructionIdFrameAddress:
+ return "FrameAddress";
+ case IrInstructionIdFrameHandle:
+ return "FrameHandle";
+ case IrInstructionIdFrameType:
+ return "FrameType";
+ case IrInstructionIdFrameSizeSrc:
+ return "FrameSizeSrc";
+ case IrInstructionIdFrameSizeGen:
+ return "FrameSizeGen";
+ case IrInstructionIdAlignOf:
+ return "AlignOf";
+ case IrInstructionIdOverflowOp:
+ return "OverflowOp";
+ case IrInstructionIdTestErrSrc:
+ return "TestErrSrc";
+ case IrInstructionIdTestErrGen:
+ return "TestErrGen";
+ case IrInstructionIdMulAdd:
+ return "MulAdd";
+ case IrInstructionIdFloatOp:
+ return "FloatOp";
+ case IrInstructionIdUnwrapErrCode:
+ return "UnwrapErrCode";
+ case IrInstructionIdUnwrapErrPayload:
+ return "UnwrapErrPayload";
+ case IrInstructionIdErrWrapCode:
+ return "ErrWrapCode";
+ case IrInstructionIdErrWrapPayload:
+ return "ErrWrapPayload";
+ case IrInstructionIdFnProto:
+ return "FnProto";
+ case IrInstructionIdTestComptime:
+ return "TestComptime";
+ case IrInstructionIdPtrCastSrc:
+ return "PtrCastSrc";
+ case IrInstructionIdPtrCastGen:
+ return "PtrCastGen";
+ case IrInstructionIdBitCastSrc:
+ return "BitCastSrc";
+ case IrInstructionIdBitCastGen:
+ return "BitCastGen";
+ case IrInstructionIdWidenOrShorten:
+ return "WidenOrShorten";
+ case IrInstructionIdIntToPtr:
+ return "IntToPtr";
+ case IrInstructionIdPtrToInt:
+ return "PtrToInt";
+ case IrInstructionIdIntToEnum:
+ return "IntToEnum";
+ case IrInstructionIdEnumToInt:
+ return "EnumToInt";
+ case IrInstructionIdIntToErr:
+ return "IntToErr";
+ case IrInstructionIdErrToInt:
+ return "ErrToInt";
+ case IrInstructionIdCheckSwitchProngs:
+ return "CheckSwitchProngs";
+ case IrInstructionIdCheckStatementIsVoid:
+ return "CheckStatementIsVoid";
+ case IrInstructionIdTypeName:
+ return "TypeName";
+ case IrInstructionIdDeclRef:
+ return "DeclRef";
+ case IrInstructionIdPanic:
+ return "Panic";
+ case IrInstructionIdTagName:
+ return "TagName";
+ case IrInstructionIdTagType:
+ return "TagType";
+ case IrInstructionIdFieldParentPtr:
+ return "FieldParentPtr";
+ case IrInstructionIdByteOffsetOf:
+ return "ByteOffsetOf";
+ case IrInstructionIdBitOffsetOf:
+ return "BitOffsetOf";
+ case IrInstructionIdTypeInfo:
+ return "TypeInfo";
+ case IrInstructionIdHasField:
+ return "HasField";
+ case IrInstructionIdTypeId:
+ return "TypeId";
+ case IrInstructionIdSetEvalBranchQuota:
+ return "SetEvalBranchQuota";
+ case IrInstructionIdPtrType:
+ return "PtrType";
+ case IrInstructionIdAlignCast:
+ return "AlignCast";
+ case IrInstructionIdImplicitCast:
+ return "ImplicitCast";
+ case IrInstructionIdResolveResult:
+ return "ResolveResult";
+ case IrInstructionIdResetResult:
+ return "ResetResult";
+ case IrInstructionIdOpaqueType:
+ return "OpaqueType";
+ case IrInstructionIdSetAlignStack:
+ return "SetAlignStack";
+ case IrInstructionIdArgType:
+ return "ArgType";
+ case IrInstructionIdExport:
+ return "Export";
+ case IrInstructionIdErrorReturnTrace:
+ return "ErrorReturnTrace";
+ case IrInstructionIdErrorUnion:
+ return "ErrorUnion";
+ case IrInstructionIdAtomicRmw:
+ return "AtomicRmw";
+ case IrInstructionIdAtomicLoad:
+ return "AtomicLoad";
+ case IrInstructionIdSaveErrRetAddr:
+ return "SaveErrRetAddr";
+ case IrInstructionIdAddImplicitReturnType:
+ return "AddImplicitReturnType";
+ case IrInstructionIdErrSetCast:
+ return "ErrSetCast";
+ case IrInstructionIdToBytes:
+ return "ToBytes";
+ case IrInstructionIdFromBytes:
+ return "FromBytes";
+ case IrInstructionIdCheckRuntimeScope:
+ return "CheckRuntimeScope";
+ case IrInstructionIdVectorToArray:
+ return "VectorToArray";
+ case IrInstructionIdArrayToVector:
+ return "ArrayToVector";
+ case IrInstructionIdAssertZero:
+ return "AssertZero";
+ case IrInstructionIdAssertNonNull:
+ return "AssertNonNull";
+ case IrInstructionIdHasDecl:
+ return "HasDecl";
+ case IrInstructionIdUndeclaredIdent:
+ return "UndeclaredIdent";
+ case IrInstructionIdAllocaSrc:
+ return "AllocaSrc";
+ case IrInstructionIdAllocaGen:
+ return "AllocaGen";
+ case IrInstructionIdEndExpr:
+ return "EndExpr";
+ case IrInstructionIdPtrOfArrayToSlice:
+ return "PtrOfArrayToSlice";
+ case IrInstructionIdUnionInitNamedField:
+ return "UnionInitNamedField";
+ case IrInstructionIdSuspendBegin:
+ return "SuspendBegin";
+ case IrInstructionIdSuspendFinish:
+ return "SuspendFinish";
+ case IrInstructionIdAwaitSrc:
+ return "AwaitSrc";
+ case IrInstructionIdAwaitGen:
+ return "AwaitGen";
+ case IrInstructionIdResume:
+ return "Resume";
+ case IrInstructionIdSpillBegin:
+ return "SpillBegin";
+ case IrInstructionIdSpillEnd:
+ return "SpillEnd";
+ }
+ zig_unreachable();
+}
+
static void ir_print_indent(IrPrint *irp) {
for (int i = 0; i < irp->indent; i += 1) {
fprintf(irp->f, " ");
}
}
-static void ir_print_prefix(IrPrint *irp, IrInstruction *instruction) {
+static void ir_print_prefix(IrPrint *irp, IrInstruction *instruction, bool trailing) {
ir_print_indent(irp);
+ const char mark = trailing ? ':' : '#';
const char *type_name = instruction->value.type ? buf_ptr(&instruction->value.type->name) : "(unknown)";
const char *ref_count = ir_has_side_effects(instruction) ?
"-" : buf_ptr(buf_sprintf("%" ZIG_PRI_usize "", instruction->ref_count));
- fprintf(irp->f, "#%-3zu| %-12s| %-2s| ", instruction->debug_id, type_name, ref_count);
+ fprintf(irp->f, "%c%-3zu| %-22s| %-12s| %-2s| ", mark, instruction->debug_id,
+ ir_instruction_type_str(instruction), type_name, ref_count);
}
static void ir_print_const_value(IrPrint *irp, ConstExprValue *const_val) {
@@ -42,6 +389,10 @@ static void ir_print_const_value(IrPrint *irp, ConstExprValue *const_val) {
static void ir_print_var_instruction(IrPrint *irp, IrInstruction *instruction) {
fprintf(irp->f, "#%" ZIG_PRI_usize "", instruction->debug_id);
+ if (irp->pass_num == 2 && irp->printed.maybe_get(instruction) == nullptr) {
+ irp->printed.put(instruction, 0);
+ irp->pending.append(instruction);
+ }
}
static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) {
@@ -49,6 +400,7 @@ static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction)
fprintf(irp->f, "(null)");
return;
}
+
if (instruction->value.special != ConstValSpecialRuntime) {
ir_print_const_value(irp, &instruction->value);
} else {
@@ -1550,8 +1902,8 @@ static void ir_print_spill_end(IrPrint *irp, IrInstructionSpillEnd *instruction)
fprintf(irp->f, ")");
}
-static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
- ir_print_prefix(irp, instruction);
+static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool trailing) {
+ ir_print_prefix(irp, instruction, trailing);
switch (instruction->id) {
case IrInstructionIdInvalid:
zig_unreachable();
@@ -2036,31 +2388,48 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
fprintf(irp->f, "\n");
}
-void ir_print(CodeGen *codegen, FILE *f, IrExecutable *executable, int indent_size) {
+void ir_print(CodeGen *codegen, FILE *f, IrExecutable *executable, int indent_size, size_t pass_num) {
IrPrint ir_print = {};
IrPrint *irp = &ir_print;
+ irp->pass_num = pass_num;
irp->codegen = codegen;
irp->f = f;
irp->indent = indent_size;
irp->indent_size = indent_size;
+ irp->printed = {};
+ irp->printed.init(64);
+ irp->pending = {};
for (size_t bb_i = 0; bb_i < executable->basic_block_list.length; bb_i += 1) {
IrBasicBlock *current_block = executable->basic_block_list.at(bb_i);
fprintf(irp->f, "%s_%" ZIG_PRI_usize ":\n", current_block->name_hint, current_block->debug_id);
for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) {
IrInstruction *instruction = current_block->instruction_list.at(instr_i);
- ir_print_instruction(irp, instruction);
+ if (irp->pass_num == 2) {
+ irp->printed.put(instruction, 0);
+ irp->pending.clear();
+ }
+ ir_print_instruction(irp, instruction, false);
+ for (size_t j = 0; j < irp->pending.length; ++j)
+ ir_print_instruction(irp, irp->pending.at(j), true);
}
}
+
+ irp->pending.deinit();
+ irp->printed.deinit();
}
-void ir_print_instruction(CodeGen *codegen, FILE *f, IrInstruction *instruction, int indent_size) {
+void ir_print_instruction(CodeGen *codegen, FILE *f, IrInstruction *instruction, int indent_size, size_t pass_num) {
IrPrint ir_print = {};
IrPrint *irp = &ir_print;
+ irp->pass_num = pass_num;
irp->codegen = codegen;
irp->f = f;
irp->indent = indent_size;
irp->indent_size = indent_size;
+ irp->printed = {};
+ irp->printed.init(4);
+ irp->pending = {};
- ir_print_instruction(irp, instruction);
+ ir_print_instruction(irp, instruction, false);
}
diff --git a/src/ir_print.hpp b/src/ir_print.hpp
index 3c784757d5..3e554ceb95 100644
--- a/src/ir_print.hpp
+++ b/src/ir_print.hpp
@@ -12,7 +12,7 @@
#include
-void ir_print(CodeGen *codegen, FILE *f, IrExecutable *executable, int indent_size);
-void ir_print_instruction(CodeGen *codegen, FILE *f, IrInstruction *instruction, int indent_size);
+void ir_print(CodeGen *codegen, FILE *f, IrExecutable *executable, int indent_size, size_t pass_num);
+void ir_print_instruction(CodeGen *codegen, FILE *f, IrInstruction *instruction, int indent_size, size_t pass_num);
#endif
--
cgit v1.2.3