diff options
| author | Andrea Orru <andrea@orru.io> | 2018-08-06 01:43:19 -0400 |
|---|---|---|
| committer | Andrea Orru <andrea@orru.io> | 2018-08-06 01:43:19 -0400 |
| commit | d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d (patch) | |
| tree | e9fa3caec533a0d1e2b434868b2fde1f9240e5c8 /src | |
| parent | 06614b3fa09954464c2e2f32756cacedc178a282 (diff) | |
| parent | 63a23e848a62d5f167f8d5478de9766cb24aa6eb (diff) | |
| download | zig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.tar.gz zig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.zip | |
Merge branch 'master' into zen_stdlib
Diffstat (limited to 'src')
| -rw-r--r-- | src/all_types.hpp | 333 | ||||
| -rw-r--r-- | src/analyze.cpp | 969 | ||||
| -rw-r--r-- | src/analyze.hpp | 20 | ||||
| -rw-r--r-- | src/ast_render.cpp | 66 | ||||
| -rw-r--r-- | src/bigfloat.cpp | 12 | ||||
| -rw-r--r-- | src/bigfloat.hpp | 3 | ||||
| -rw-r--r-- | src/bigint.cpp | 59 | ||||
| -rw-r--r-- | src/bigint.hpp | 2 | ||||
| -rw-r--r-- | src/codegen.cpp | 1335 | ||||
| -rw-r--r-- | src/codegen.hpp | 2 | ||||
| -rw-r--r-- | src/ir.cpp | 6185 | ||||
| -rw-r--r-- | src/ir_print.cpp | 217 | ||||
| -rw-r--r-- | src/link.cpp | 35 | ||||
| -rw-r--r-- | src/main.cpp | 37 | ||||
| -rw-r--r-- | src/os.cpp | 275 | ||||
| -rw-r--r-- | src/os.hpp | 10 | ||||
| -rw-r--r-- | src/parser.cpp | 131 | ||||
| -rw-r--r-- | src/target.cpp | 121 | ||||
| -rw-r--r-- | src/target.hpp | 2 | ||||
| -rw-r--r-- | src/tokenizer.cpp | 98 | ||||
| -rw-r--r-- | src/tokenizer.hpp | 8 | ||||
| -rw-r--r-- | src/translate_c.cpp | 330 | ||||
| -rw-r--r-- | src/util.hpp | 48 | ||||
| -rw-r--r-- | src/windows_sdk.cpp | 352 | ||||
| -rw-r--r-- | src/windows_sdk.h | 47 | ||||
| -rw-r--r-- | src/zig_llvm.cpp | 19 | ||||
| -rw-r--r-- | src/zig_llvm.h | 10 |
27 files changed, 7611 insertions, 3115 deletions
diff --git a/src/all_types.hpp b/src/all_types.hpp index d27a5c7a1c..b1e8a3746d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -60,7 +60,7 @@ struct IrExecutable { ZigList<Tld *> tld_list; IrInstruction *coro_handle; - IrInstruction *coro_awaiter_field_ptr; // this one is shared and in the promise + IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise IrInstruction *coro_result_ptr_field_ptr; IrInstruction *coro_result_field_ptr; IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise @@ -83,6 +83,7 @@ enum ConstParentId { ConstParentIdStruct, ConstParentIdArray, ConstParentIdUnion, + ConstParentIdScalar, }; struct ConstParent { @@ -100,6 +101,9 @@ struct ConstParent { struct { ConstExprValue *union_val; } p_union; + struct { + ConstExprValue *scalar_val; + } p_scalar; } data; }; @@ -140,6 +144,9 @@ enum ConstPtrSpecial { // understand the value of pointee at compile time. However, we will still // emit a binary with a compile time known address. // In this case index is the numeric address value. + // We also use this for null pointer. We need the data layout for ConstCastOnly == true + // types to be the same, so all optionals of pointer types use x_ptr + // instead of x_optional ConstPtrSpecialHardCodedAddr, // This means that the pointer represents memory of assigning to _. // That is, storing discards the data, and loading is invalid. @@ -215,10 +222,10 @@ enum RuntimeHintErrorUnion { RuntimeHintErrorUnionNonError, }; -enum RuntimeHintMaybe { - RuntimeHintMaybeUnknown, - RuntimeHintMaybeNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known. - RuntimeHintMaybeNonNull, +enum RuntimeHintOptional { + RuntimeHintOptionalUnknown, + RuntimeHintOptionalNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known. + RuntimeHintOptionalNonNull, }; enum RuntimeHintPtr { @@ -227,6 +234,16 @@ enum RuntimeHintPtr { RuntimeHintPtrNonStack, }; +enum RuntimeHintSliceId { + RuntimeHintSliceIdUnknown, + RuntimeHintSliceIdLen, +}; + +struct RuntimeHintSlice { + enum RuntimeHintSliceId id; + uint64_t len; +}; + struct ConstGlobalRefs { LLVMValueRef llvm_value; LLVMValueRef llvm_global; @@ -241,13 +258,14 @@ struct ConstExprValue { // populated if special == ConstValSpecialStatic BigInt x_bigint; BigFloat x_bigfloat; + float16_t x_f16; float x_f32; double x_f64; float128_t x_f128; bool x_bool; ConstBoundFnValue x_bound_fn; TypeTableEntry *x_type; - ConstExprValue *x_maybe; + ConstExprValue *x_optional; ConstErrValue x_err_union; ErrorTableEntry *x_err_set; BigInt x_enum_tag; @@ -261,8 +279,9 @@ struct ConstExprValue { // populated if special == ConstValSpecialRuntime RuntimeHintErrorUnion rh_error_union; - RuntimeHintMaybe rh_maybe; + RuntimeHintOptional rh_maybe; RuntimeHintPtr rh_ptr; + RuntimeHintSlice rh_slice; } data; }; @@ -374,11 +393,13 @@ enum NodeType { NodeTypeCharLiteral, NodeTypeSymbol, NodeTypePrefixOpExpr, - NodeTypeAddrOfExpr, + NodeTypePointerType, NodeTypeFnCallExpr, NodeTypeArrayAccessExpr, NodeTypeSliceExpr, NodeTypeFieldAccessExpr, + NodeTypePtrDeref, + NodeTypeUnwrapOptional, NodeTypeUse, NodeTypeBoolLiteral, NodeTypeNullLiteral, @@ -548,7 +569,7 @@ enum BinOpType { BinOpTypeMultWrap, BinOpTypeDiv, BinOpTypeMod, - BinOpTypeUnwrapMaybe, + BinOpTypeUnwrapOptional, BinOpTypeArrayCat, BinOpTypeArrayMult, BinOpTypeErrorUnion, @@ -567,6 +588,10 @@ struct AstNodeCatchExpr { AstNode *op2; }; +struct AstNodeUnwrapOptional { + AstNode *expr; +}; + enum CastOp { CastOpNoCast, // signifies the function call expression is not a cast CastOpNoop, // fn call expr is a cast, but does nothing @@ -577,6 +602,8 @@ enum CastOp { CastOpBytesToSlice, CastOpNumLitToConcrete, CastOpErrSet, + CastOpBitCast, + CastOpPtrOfArrayToSlice, }; struct AstNodeFnCallExpr { @@ -603,15 +630,18 @@ struct AstNodeFieldAccessExpr { Buf *field_name; }; +struct AstNodePtrDerefExpr { + AstNode *target; +}; + enum PrefixOp { PrefixOpInvalid, PrefixOpBoolNot, PrefixOpBinNot, PrefixOpNegation, PrefixOpNegationWrap, - PrefixOpDereference, - PrefixOpMaybe, - PrefixOpUnwrapMaybe, + PrefixOpOptional, + PrefixOpAddrOf, }; struct AstNodePrefixOpExpr { @@ -619,7 +649,8 @@ struct AstNodePrefixOpExpr { AstNode *primary_expr; }; -struct AstNodeAddrOfExpr { +struct AstNodePointerType { + Token *star_token; AstNode *align_expr; BigInt *bit_offset_start; BigInt *bit_offset_end; @@ -868,7 +899,6 @@ struct AstNodeAwaitExpr { struct AstNodeSuspend { AstNode *block; - AstNode *promise_symbol; }; struct AstNodePromiseType { @@ -893,8 +923,9 @@ struct AstNode { AstNodeTestDecl test_decl; AstNodeBinOpExpr bin_op_expr; AstNodeCatchExpr unwrap_err_expr; + AstNodeUnwrapOptional unwrap_optional; AstNodePrefixOpExpr prefix_op_expr; - AstNodeAddrOfExpr addr_of_expr; + AstNodePointerType pointer_type; AstNodeFnCallExpr fn_call_expr; AstNodeArrayAccessExpr array_access_expr; AstNodeSliceExpr slice_expr; @@ -910,6 +941,7 @@ struct AstNode { AstNodeCompTime comptime_expr; AstNodeAsmExpr asm_expr; AstNodeFieldAccessExpr field_access_expr; + AstNodePtrDerefExpr ptr_deref_expr; AstNodeContainerDecl container_decl; AstNodeStructField struct_field; AstNodeStringLiteral string_literal; @@ -966,8 +998,14 @@ struct FnTypeId { uint32_t fn_type_id_hash(FnTypeId*); bool fn_type_id_eql(FnTypeId *a, FnTypeId *b); +enum PtrLen { + PtrLenUnknown, + PtrLenSingle, +}; + struct TypeTableEntryPointer { TypeTableEntry *child_type; + PtrLen ptr_len; bool is_const; bool is_volatile; uint32_t alignment; @@ -1018,6 +1056,10 @@ struct TypeTableEntryStruct { // whether we've finished resolving it bool complete; + // whether any of the fields require comptime + // the value is not valid until zero_bits_known == true + bool requires_comptime; + bool zero_bits_loop_flag; bool zero_bits_known; uint32_t abi_alignment; // also figured out with zero_bits pass @@ -1025,7 +1067,7 @@ struct TypeTableEntryStruct { HashMap<Buf *, TypeStructField *, buf_hash, buf_eql_buf> fields_by_name; }; -struct TypeTableEntryMaybe { +struct TypeTableEntryOptional { TypeTableEntry *child_type; }; @@ -1059,8 +1101,7 @@ struct TypeTableEntryEnum { bool zero_bits_loop_flag; bool zero_bits_known; - bool generate_name_table; - LLVMValueRef name_table; + LLVMValueRef name_function; HashMap<Buf *, TypeEnumField *, buf_hash, buf_eql_buf> fields_by_name; }; @@ -1086,6 +1127,10 @@ struct TypeTableEntryUnion { // whether we've finished resolving it bool complete; + // whether any of the fields require comptime + // the value is not valid until zero_bits_known == true + bool requires_comptime; + bool zero_bits_loop_flag; bool zero_bits_known; uint32_t abi_alignment; // also figured out with zero_bits pass @@ -1140,11 +1185,11 @@ enum TypeTableEntryId { TypeTableEntryIdPointer, TypeTableEntryIdArray, TypeTableEntryIdStruct, - TypeTableEntryIdNumLitFloat, - TypeTableEntryIdNumLitInt, - TypeTableEntryIdUndefLit, - TypeTableEntryIdNullLit, - TypeTableEntryIdMaybe, + TypeTableEntryIdComptimeFloat, + TypeTableEntryIdComptimeInt, + TypeTableEntryIdUndefined, + TypeTableEntryIdNull, + TypeTableEntryIdOptional, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, TypeTableEntryIdEnum, @@ -1175,7 +1220,7 @@ struct TypeTableEntry { TypeTableEntryFloat floating; TypeTableEntryArray array; TypeTableEntryStruct structure; - TypeTableEntryMaybe maybe; + TypeTableEntryOptional maybe; TypeTableEntryErrorUnion error_union; TypeTableEntryErrorSet error_set; TypeTableEntryEnum enumeration; @@ -1187,7 +1232,7 @@ struct TypeTableEntry { // use these fields to make sure we don't duplicate type table entries for the same type TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const] - TypeTableEntry *maybe_parent; + TypeTableEntry *optional_parent; TypeTableEntry *promise_parent; TypeTableEntry *promise_frame_parent; // If we generate a constant name value for this type, we memoize it here. @@ -1291,6 +1336,8 @@ enum BuiltinFnId { BuiltinFnIdMemberCount, BuiltinFnIdMemberType, BuiltinFnIdMemberName, + BuiltinFnIdField, + BuiltinFnIdTypeInfo, BuiltinFnIdTypeof, BuiltinFnIdAddWithOverflow, BuiltinFnIdSubWithOverflow, @@ -1303,27 +1350,42 @@ enum BuiltinFnId { BuiltinFnIdCompileLog, BuiltinFnIdCtz, BuiltinFnIdClz, + BuiltinFnIdPopCount, BuiltinFnIdImport, BuiltinFnIdCImport, BuiltinFnIdErrName, BuiltinFnIdBreakpoint, BuiltinFnIdReturnAddress, BuiltinFnIdFrameAddress, + BuiltinFnIdHandle, BuiltinFnIdEmbedFile, - BuiltinFnIdCmpExchange, + BuiltinFnIdCmpxchgWeak, + BuiltinFnIdCmpxchgStrong, BuiltinFnIdFence, BuiltinFnIdDivExact, BuiltinFnIdDivTrunc, BuiltinFnIdDivFloor, BuiltinFnIdRem, BuiltinFnIdMod, + BuiltinFnIdSqrt, BuiltinFnIdTruncate, + BuiltinFnIdIntCast, + BuiltinFnIdFloatCast, + BuiltinFnIdErrSetCast, + BuiltinFnIdToBytes, + BuiltinFnIdFromBytes, + BuiltinFnIdIntToFloat, + BuiltinFnIdFloatToInt, + BuiltinFnIdBoolToInt, + BuiltinFnIdErrToInt, + BuiltinFnIdIntToErr, + BuiltinFnIdEnumToInt, + BuiltinFnIdIntToEnum, BuiltinFnIdIntType, BuiltinFnIdSetCold, BuiltinFnIdSetRuntimeSafety, BuiltinFnIdSetFloatMode, BuiltinFnIdTypeName, - BuiltinFnIdCanImplicitCast, BuiltinFnIdPanic, BuiltinFnIdPtrCast, BuiltinFnIdBitCast, @@ -1335,6 +1397,7 @@ enum BuiltinFnId { BuiltinFnIdOffsetOf, BuiltinFnIdInlineCall, BuiltinFnIdNoInlineCall, + BuiltinFnIdNewStackCall, BuiltinFnIdTypeId, BuiltinFnIdShlExact, BuiltinFnIdShrExact, @@ -1346,6 +1409,7 @@ enum BuiltinFnId { BuiltinFnIdExport, BuiltinFnIdErrorReturnTrace, BuiltinFnIdAtomicRmw, + BuiltinFnIdAtomicLoad, }; struct BuiltinFnEntry { @@ -1366,10 +1430,12 @@ enum PanicMsgId { PanicMsgIdRemainderDivisionByZero, PanicMsgIdExactDivisionRemainder, PanicMsgIdSliceWidenRemainder, - PanicMsgIdUnwrapMaybeFail, + PanicMsgIdUnwrapOptionalFail, PanicMsgIdInvalidErrorCode, PanicMsgIdIncorrectAlignment, PanicMsgIdBadUnionField, + PanicMsgIdBadEnumValue, + PanicMsgIdFloatToInt, PanicMsgIdCount, }; @@ -1383,6 +1449,7 @@ struct TypeId { union { struct { TypeTableEntry *child_type; + PtrLen ptr_len; bool is_const; bool is_volatile; uint32_t alignment; @@ -1410,9 +1477,11 @@ bool type_id_eql(TypeId a, TypeId b); enum ZigLLVMFnId { ZigLLVMFnIdCtz, ZigLLVMFnIdClz, + ZigLLVMFnIdPopCount, ZigLLVMFnIdOverflowArithmetic, ZigLLVMFnIdFloor, ZigLLVMFnIdCeil, + ZigLLVMFnIdSqrt, }; enum AddSubMul { @@ -1433,7 +1502,10 @@ struct ZigLLVMFnKey { } clz; struct { uint32_t bit_count; - } floor_ceil; + } pop_count; + struct { + uint32_t bit_count; + } floating; struct { AddSubMul add_sub_mul; uint32_t bit_count; @@ -1454,6 +1526,7 @@ enum BuildMode { BuildModeDebug, BuildModeFastRelease, BuildModeSafeRelease, + BuildModeSmallRelease, }; enum EmitFileType { @@ -1499,6 +1572,7 @@ struct CodeGen { HashMap<Buf *, AstNode *, buf_hash, buf_eql_buf> exported_symbol_names; HashMap<Buf *, Tld *, buf_hash, buf_eql_buf> external_prototypes; HashMap<Buf *, ConstExprValue *, buf_hash, buf_eql_buf> string_literals_table; + HashMap<const TypeTableEntry *, ConstExprValue *, type_ptr_hash, type_ptr_eql> type_info_cache; ZigList<ImportTableEntry *> import_queue; @@ -1512,7 +1586,6 @@ struct CodeGen { struct { TypeTableEntry *entry_bool; - TypeTableEntry *entry_int[2][12]; // [signed,unsigned][2,3,4,5,6,7,8,16,29,32,64,128] TypeTableEntry *entry_c_int[CIntTypeCount]; TypeTableEntry *entry_c_longdouble; TypeTableEntry *entry_c_void; @@ -1521,14 +1594,12 @@ struct CodeGen { TypeTableEntry *entry_u32; TypeTableEntry *entry_u29; TypeTableEntry *entry_u64; - TypeTableEntry *entry_u128; TypeTableEntry *entry_i8; - TypeTableEntry *entry_i16; TypeTableEntry *entry_i32; TypeTableEntry *entry_i64; - TypeTableEntry *entry_i128; TypeTableEntry *entry_isize; TypeTableEntry *entry_usize; + TypeTableEntry *entry_f16; TypeTableEntry *entry_f32; TypeTableEntry *entry_f64; TypeTableEntry *entry_f128; @@ -1645,10 +1716,16 @@ struct CodeGen { LLVMValueRef coro_save_fn_val; LLVMValueRef coro_promise_fn_val; LLVMValueRef coro_alloc_helper_fn_val; + LLVMValueRef coro_frame_fn_val; LLVMValueRef merge_err_ret_traces_fn_val; LLVMValueRef add_error_return_trace_addr_fn_val; + LLVMValueRef stacksave_fn_val; + LLVMValueRef stackrestore_fn_val; + LLVMValueRef write_register_fn_val; bool error_during_imports; + LLVMValueRef sp_md_node; + const char **clang_argv; size_t clang_argv_len; ZigList<const char *> lib_dirs; @@ -1680,8 +1757,6 @@ struct CodeGen { ZigList<Buf *> link_objects; ZigList<Buf *> assembly_files; - ZigList<TypeTableEntry *> name_table_enums; - Buf *test_filter; Buf *test_name_prefix; @@ -1700,6 +1775,8 @@ struct CodeGen { ZigList<ZigLLVMDIType **> error_di_types; ZigList<Buf *> forbidden_libs; + + bool no_rosegment_workaround; }; enum VarLinkage { @@ -1750,6 +1827,7 @@ enum ScopeId { ScopeIdVarDecl, ScopeIdCImport, ScopeIdLoop, + ScopeIdSuspend, ScopeIdFnDef, ScopeIdCompTime, ScopeIdCoroPrelude, @@ -1845,6 +1923,16 @@ struct ScopeLoop { ZigList<IrBasicBlock *> *incoming_blocks; }; +// This scope is created for a suspend block in order to have labeled +// suspend for breaking out of a suspend and for detecting if a suspend +// block is inside a suspend block. +struct ScopeSuspend { + Scope base; + + IrBasicBlock *resume_block; + bool reported_err; +}; + // This scope is created for a comptime expression. // NodeTypeCompTime, NodeTypeSwitchExpr struct ScopeCompTime { @@ -1912,12 +2000,6 @@ struct IrBasicBlock { IrInstruction *must_be_comptime_source_instr; }; -struct LVal { - bool is_ptr; - bool is_const; - bool is_volatile; -}; - enum IrInstructionId { IrInstructionIdInvalid, IrInstructionIdBr, @@ -1957,11 +2039,12 @@ enum IrInstructionId { IrInstructionIdAsm, IrInstructionIdSizeOf, IrInstructionIdTestNonNull, - IrInstructionIdUnwrapMaybe, - IrInstructionIdMaybeWrap, + IrInstructionIdUnwrapOptional, + IrInstructionIdOptionalWrap, IrInstructionIdUnionTag, IrInstructionIdClz, IrInstructionIdCtz, + IrInstructionIdPopCount, IrInstructionIdImport, IrInstructionIdCImport, IrInstructionIdCInclude, @@ -1978,6 +2061,11 @@ enum IrInstructionId { IrInstructionIdCmpxchg, IrInstructionIdFence, IrInstructionIdTruncate, + IrInstructionIdIntCast, + IrInstructionIdFloatCast, + IrInstructionIdIntToFloat, + IrInstructionIdFloatToInt, + IrInstructionIdBoolToInt, IrInstructionIdIntType, IrInstructionIdBoolNot, IrInstructionIdMemset, @@ -1989,6 +2077,7 @@ enum IrInstructionId { IrInstructionIdBreakpoint, IrInstructionIdReturnAddress, IrInstructionIdFrameAddress, + IrInstructionIdHandle, IrInstructionIdAlignOf, IrInstructionIdOverflowOp, IrInstructionIdTestErr, @@ -2004,21 +2093,22 @@ enum IrInstructionId { IrInstructionIdIntToPtr, IrInstructionIdPtrToInt, IrInstructionIdIntToEnum, + IrInstructionIdEnumToInt, IrInstructionIdIntToErr, IrInstructionIdErrToInt, IrInstructionIdCheckSwitchProngs, IrInstructionIdCheckStatementIsVoid, IrInstructionIdTypeName, - IrInstructionIdCanImplicitCast, IrInstructionIdDeclRef, IrInstructionIdPanic, IrInstructionIdTagName, IrInstructionIdTagType, IrInstructionIdFieldParentPtr, IrInstructionIdOffsetOf, + IrInstructionIdTypeInfo, IrInstructionIdTypeId, IrInstructionIdSetEvalBranchQuota, - IrInstructionIdPtrTypeOf, + IrInstructionIdPtrType, IrInstructionIdAlignCast, IrInstructionIdOpaqueType, IrInstructionIdSetAlignStack, @@ -2041,12 +2131,17 @@ enum IrInstructionId { IrInstructionIdCoroPromise, IrInstructionIdCoroAllocHelper, IrInstructionIdAtomicRmw, + IrInstructionIdAtomicLoad, IrInstructionIdPromiseResultType, IrInstructionIdAwaitBookkeeping, IrInstructionIdSaveErrRetAddr, IrInstructionIdAddImplicitReturnType, IrInstructionIdMergeErrRetTraces, IrInstructionIdMarkErrRetTracePtr, + IrInstructionIdSqrt, + IrInstructionIdErrSetCast, + IrInstructionIdToBytes, + IrInstructionIdFromBytes, }; struct IrInstruction { @@ -2094,6 +2189,7 @@ struct IrInstructionSwitchBr { size_t case_count; IrInstructionSwitchBrCase *cases; IrInstruction *is_comptime; + IrInstruction *switch_prongs_void; }; struct IrInstructionSwitchVar { @@ -2123,7 +2219,7 @@ enum IrUnOp { IrUnOpNegation, IrUnOpNegationWrap, IrUnOpDereference, - IrUnOpMaybe, + IrUnOpOptional, }; struct IrInstructionUnOp { @@ -2203,7 +2299,8 @@ struct IrInstructionFieldPtr { IrInstruction base; IrInstruction *container_ptr; - Buf *field_name; + Buf *field_name_buffer; + IrInstruction *field_name_expr; bool is_const; }; @@ -2228,6 +2325,7 @@ struct IrInstructionElemPtr { IrInstruction *array_ptr; IrInstruction *elem_index; + PtrLen ptr_len; bool is_const; bool safety_check_on; }; @@ -2236,8 +2334,6 @@ struct IrInstructionVarPtr { IrInstruction base; VariableTableEntry *var; - bool is_const; - bool is_volatile; }; struct IrInstructionCall { @@ -2253,6 +2349,7 @@ struct IrInstructionCall { bool is_async; IrInstruction *async_allocator; + IrInstruction *new_stack; }; struct IrInstructionConst { @@ -2373,6 +2470,18 @@ struct IrInstructionArrayType { IrInstruction *child_type; }; +struct IrInstructionPtrType { + IrInstruction base; + + IrInstruction *align_value; + IrInstruction *child_type; + uint32_t bit_offset_start; + uint32_t bit_offset_end; + PtrLen ptr_len; + bool is_const; + bool is_volatile; +}; + struct IrInstructionPromiseType { IrInstruction base; @@ -2413,7 +2522,7 @@ struct IrInstructionTestNonNull { IrInstruction *value; }; -struct IrInstructionUnwrapMaybe { +struct IrInstructionUnwrapOptional { IrInstruction base; IrInstruction *value; @@ -2432,6 +2541,12 @@ struct IrInstructionClz { IrInstruction *value; }; +struct IrInstructionPopCount { + IrInstruction base; + + IrInstruction *value; +}; + struct IrInstructionUnionTag { IrInstruction base; @@ -2522,6 +2637,7 @@ struct IrInstructionEmbedFile { struct IrInstructionCmpxchg { IrInstruction base; + IrInstruction *type_value; IrInstruction *ptr; IrInstruction *cmp_value; IrInstruction *new_value; @@ -2529,8 +2645,13 @@ struct IrInstructionCmpxchg { IrInstruction *failure_order_value; // if this instruction gets to runtime then we know these values: + TypeTableEntry *type; AtomicOrder success_order; AtomicOrder failure_order; + + bool is_weak; + + LLVMValueRef tmp_ptr; }; struct IrInstructionFence { @@ -2549,6 +2670,60 @@ struct IrInstructionTruncate { IrInstruction *target; }; +struct IrInstructionIntCast { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionFloatCast { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionErrSetCast { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionToBytes { + IrInstruction base; + + IrInstruction *target; +}; + +struct IrInstructionFromBytes { + IrInstruction base; + + IrInstruction *dest_child_type; + IrInstruction *target; +}; + +struct IrInstructionIntToFloat { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionFloatToInt { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionBoolToInt { + IrInstruction base; + + IrInstruction *target; +}; + struct IrInstructionIntType { IrInstruction base; @@ -2620,6 +2795,10 @@ struct IrInstructionFrameAddress { IrInstruction base; }; +struct IrInstructionHandle { + IrInstruction base; +}; + enum IrOverflowOp { IrOverflowOpAdd, IrOverflowOpSub, @@ -2665,7 +2844,7 @@ struct IrInstructionUnwrapErrPayload { bool safety_check_on; }; -struct IrInstructionMaybeWrap { +struct IrInstructionOptionalWrap { IrInstruction base; IrInstruction *value; @@ -2739,6 +2918,13 @@ struct IrInstructionIntToPtr { struct IrInstructionIntToEnum { IrInstruction base; + IrInstruction *dest_type; + IrInstruction *target; +}; + +struct IrInstructionEnumToInt { + IrInstruction base; + IrInstruction *target; }; @@ -2780,11 +2966,9 @@ struct IrInstructionTypeName { IrInstruction *type_value; }; -struct IrInstructionCanImplicitCast { - IrInstruction base; - - IrInstruction *type_value; - IrInstruction *target_value; +enum LVal { + LValNone, + LValPtr, }; struct IrInstructionDeclRef { @@ -2828,27 +3012,22 @@ struct IrInstructionOffsetOf { IrInstruction *field_name; }; -struct IrInstructionTypeId { +struct IrInstructionTypeInfo { IrInstruction base; IrInstruction *type_value; }; -struct IrInstructionSetEvalBranchQuota { +struct IrInstructionTypeId { IrInstruction base; - IrInstruction *new_quota; + IrInstruction *type_value; }; -struct IrInstructionPtrTypeOf { +struct IrInstructionSetEvalBranchQuota { IrInstruction base; - IrInstruction *align_value; - IrInstruction *child_type; - uint32_t bit_offset_start; - uint32_t bit_offset_end; - bool is_const; - bool is_volatile; + IrInstruction *new_quota; }; struct IrInstructionAlignCast { @@ -2886,10 +3065,10 @@ struct IrInstructionExport { struct IrInstructionErrorReturnTrace { IrInstruction base; - enum Nullable { + enum Optional { Null, NonNull, - } nullable; + } optional; }; struct IrInstructionErrorUnion { @@ -3000,6 +3179,15 @@ struct IrInstructionAtomicRmw { AtomicOrder resolved_ordering; }; +struct IrInstructionAtomicLoad { + IrInstruction base; + + IrInstruction *operand_type; + IrInstruction *ptr; + IrInstruction *ordering; + AtomicOrder resolved_ordering; +}; + struct IrInstructionPromiseResultType { IrInstruction base; @@ -3036,6 +3224,13 @@ struct IrInstructionMarkErrRetTracePtr { IrInstruction *err_ret_trace_ptr; }; +struct IrInstructionSqrt { + IrInstruction base; + + IrInstruction *type; + IrInstruction *op; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; @@ -3054,7 +3249,7 @@ static const size_t stack_trace_ptr_count = 30; #define RESULT_FIELD_NAME "result" #define ASYNC_ALLOC_FIELD_NAME "allocFn" #define ASYNC_FREE_FIELD_NAME "freeFn" -#define AWAITER_HANDLE_FIELD_NAME "awaiter_handle" +#define ATOMIC_STATE_FIELD_NAME "atomic_state" // these point to data belonging to the awaiter #define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr" #define RESULT_PTR_FIELD_NAME "result_ptr" diff --git a/src/analyze.cpp b/src/analyze.cpp index c73e6b39e3..03cfa5b67b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -25,6 +25,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type); static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type); static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type); static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type); +static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry); ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) { if (node->owner->c_import_node != nullptr) { @@ -156,6 +157,13 @@ ScopeLoop *create_loop_scope(AstNode *node, Scope *parent) { return scope; } +ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent) { + assert(node->type == NodeTypeSuspend); + ScopeSuspend *scope = allocate<ScopeSuspend>(1); + init_scope(&scope->base, ScopeIdSuspend, node, parent); + return scope; +} + ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry) { ScopeFnDef *scope = allocate<ScopeFnDef>(1); init_scope(&scope->base, ScopeIdFnDef, node, parent); @@ -203,6 +211,43 @@ static uint8_t bits_needed_for_unsigned(uint64_t x) { return (upper >= x) ? base : (base + 1); } +AstNode *type_decl_node(TypeTableEntry *type_entry) { + switch (type_entry->id) { + case TypeTableEntryIdInvalid: + zig_unreachable(); + case TypeTableEntryIdStruct: + return type_entry->data.structure.decl_node; + case TypeTableEntryIdEnum: + return type_entry->data.enumeration.decl_node; + case TypeTableEntryIdUnion: + return type_entry->data.unionation.decl_node; + case TypeTableEntryIdOpaque: + case TypeTableEntryIdMetaType: + case TypeTableEntryIdVoid: + case TypeTableEntryIdBool: + case TypeTableEntryIdUnreachable: + case TypeTableEntryIdInt: + case TypeTableEntryIdFloat: + case TypeTableEntryIdPointer: + case TypeTableEntryIdArray: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: + case TypeTableEntryIdErrorUnion: + case TypeTableEntryIdErrorSet: + case TypeTableEntryIdFn: + case TypeTableEntryIdNamespace: + case TypeTableEntryIdBlock: + case TypeTableEntryIdBoundFn: + case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: + return nullptr; + } + zig_unreachable(); +} + bool type_is_complete(TypeTableEntry *type_entry) { switch (type_entry->id) { case TypeTableEntryIdInvalid: @@ -223,11 +268,11 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -259,11 +304,11 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -372,14 +417,15 @@ TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type) { } TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const, - bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) + bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) { assert(!type_is_invalid(child_type)); + assert(ptr_len == PtrLenSingle || child_type->id != TypeTableEntryIdOpaque); TypeId type_id = {}; TypeTableEntry **parent_pointer = nullptr; uint32_t abi_alignment = get_abi_alignment(g, child_type); - if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment) { + if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment || ptr_len != PtrLenSingle) { type_id.id = TypeTableEntryIdPointer; type_id.data.pointer.child_type = child_type; type_id.data.pointer.is_const = is_const; @@ -387,6 +433,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type type_id.data.pointer.alignment = byte_alignment; type_id.data.pointer.bit_offset = bit_offset; type_id.data.pointer.unaligned_bit_count = unaligned_bit_count; + type_id.data.pointer.ptr_len = ptr_len; auto existing_entry = g->type_table.maybe_get(type_id); if (existing_entry) @@ -405,16 +452,17 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer); entry->is_copyable = true; + const char *star_str = ptr_len == PtrLenSingle ? "*" : "[*]"; const char *const_str = is_const ? "const " : ""; const char *volatile_str = is_volatile ? "volatile " : ""; buf_resize(&entry->name, 0); if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) { - buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name)); + buf_appendf(&entry->name, "%s%s%s%s", star_str, const_str, volatile_str, buf_ptr(&child_type->name)); } else if (unaligned_bit_count == 0) { - buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "%salign(%" PRIu32 ") %s%s%s", star_str, byte_alignment, const_str, volatile_str, buf_ptr(&child_type->name)); } else { - buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "%salign(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", star_str, byte_alignment, bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name)); } @@ -424,7 +472,9 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type if (!entry->zero_bits) { assert(byte_alignment > 0); - if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment) { + if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment || + ptr_len != PtrLenSingle) + { TypeTableEntry *peer_type = get_pointer_to_type(g, child_type, false); entry->type_ref = peer_type->type_ref; entry->di_type = peer_type->di_type; @@ -442,6 +492,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type entry->di_type = g->builtin_types.entry_void->di_type; } + entry->data.pointer.ptr_len = ptr_len; entry->data.pointer.child_type = child_type; entry->data.pointer.is_const = is_const; entry->data.pointer.is_volatile = is_volatile; @@ -458,7 +509,8 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type } TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const) { - return get_pointer_to_type_extra(g, child_type, is_const, false, get_abi_alignment(g, child_type), 0, 0); + return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, + get_abi_alignment(g, child_type), 0, 0); } TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) { @@ -466,11 +518,11 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) return return_type->promise_frame_parent; } - TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise); + TypeTableEntry *atomic_state_type = g->builtin_types.entry_usize; TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false); ZigList<const char *> field_names = {}; - field_names.append(AWAITER_HANDLE_FIELD_NAME); + field_names.append(ATOMIC_STATE_FIELD_NAME); field_names.append(RESULT_FIELD_NAME); field_names.append(RESULT_PTR_FIELD_NAME); if (g->have_err_ret_tracing) { @@ -480,7 +532,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) } ZigList<TypeTableEntry *> field_types = {}; - field_types.append(awaiter_handle_type); + field_types.append(atomic_state_type); field_types.append(return_type); field_types.append(result_ptr_type); if (g->have_err_ret_tracing) { @@ -497,16 +549,15 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) return entry; } -TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { - if (child_type->maybe_parent) { - TypeTableEntry *entry = child_type->maybe_parent; +TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) { + if (child_type->optional_parent) { + TypeTableEntry *entry = child_type->optional_parent; return entry; } else { ensure_complete_type(g, child_type); - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdMaybe); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional); assert(child_type->type_ref || child_type->zero_bits); - assert(child_type->di_type); entry->is_copyable = type_is_copyable(g, child_type); buf_resize(&entry->name, 0); @@ -516,12 +567,14 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { entry->type_ref = LLVMInt1Type(); entry->di_type = g->builtin_types.entry_bool->di_type; } else if (type_is_codegen_pointer(child_type)) { + assert(child_type->di_type); // this is an optimization but also is necessary for calling C // functions where all pointers are maybe pointers // function types are technically pointers entry->type_ref = child_type->type_ref; entry->di_type = child_type->di_type; } else { + assert(child_type->di_type); // create a struct with a boolean whether this is the null value LLVMTypeRef elem_types[] = { child_type->type_ref, @@ -575,7 +628,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { entry->data.maybe.child_type = child_type; - child_type->maybe_parent = entry; + child_type->optional_parent = entry; return entry; } } @@ -748,6 +801,7 @@ static void slice_type_common_init(CodeGen *g, TypeTableEntry *pointer_type, Typ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { assert(ptr_type->id == TypeTableEntryIdPointer); + assert(ptr_type->data.pointer.ptr_len == PtrLenUnknown); TypeTableEntry **parent_pointer = &ptr_type->data.pointer.slice_parent; if (*parent_pointer) { @@ -759,14 +813,16 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { // replace the & with [] to go from a ptr type name to a slice type name buf_resize(&entry->name, 0); - buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + 1); + size_t name_offset = (ptr_type->data.pointer.ptr_len == PtrLenSingle) ? 1 : 3; + buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + name_offset); TypeTableEntry *child_type = ptr_type->data.pointer.child_type; - uint32_t abi_alignment; + uint32_t abi_alignment = get_abi_alignment(g, child_type); if (ptr_type->data.pointer.is_const || ptr_type->data.pointer.is_volatile || - ptr_type->data.pointer.alignment != (abi_alignment = get_abi_alignment(g, child_type))) + ptr_type->data.pointer.alignment != abi_alignment) { - TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, child_type, false); + TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, child_type, false, false, + PtrLenUnknown, abi_alignment, 0, 0); TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type); slice_type_common_init(g, ptr_type, entry); @@ -790,9 +846,11 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { if (child_ptr_type->data.pointer.is_const || child_ptr_type->data.pointer.is_volatile || child_ptr_type->data.pointer.alignment != get_abi_alignment(g, grand_child_type)) { - TypeTableEntry *bland_child_ptr_type = get_pointer_to_type(g, grand_child_type, false); + TypeTableEntry *bland_child_ptr_type = get_pointer_to_type_extra(g, grand_child_type, false, false, + PtrLenUnknown, get_abi_alignment(g, grand_child_type), 0, 0); TypeTableEntry *bland_child_slice = get_slice_type(g, bland_child_ptr_type); - TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, bland_child_slice, false); + TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, bland_child_slice, false, false, + PtrLenUnknown, get_abi_alignment(g, bland_child_slice), 0, 0); TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type); entry->type_ref = peer_slice_type->type_ref; @@ -998,8 +1056,11 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { } if (fn_type_id->return_type != nullptr) { ensure_complete_type(g, fn_type_id->return_type); + if (type_is_invalid(fn_type_id->return_type)) + return g->builtin_types.entry_invalid; + assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque); } else { - zig_panic("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447"); + zig_panic("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"); } TypeTableEntry *fn_type = new_type_table_entry(TypeTableEntryIdFn); @@ -1111,7 +1172,10 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { gen_param_info->src_index = i; gen_param_info->gen_index = SIZE_MAX; - type_ensure_zero_bits_known(g, type_entry); + ensure_complete_type(g, type_entry); + if (type_is_invalid(type_entry)) + return g->builtin_types.entry_invalid; + if (type_has_bits(type_entry)) { TypeTableEntry *gen_type; if (handle_is_ptr(type_entry)) { @@ -1250,7 +1314,7 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou } fn_type_id->param_count = fn_proto->params.length; - fn_type_id->param_info = allocate_nonzero<FnTypeParamInfo>(param_count_alloc); + fn_type_id->param_info = allocate<FnTypeParamInfo>(param_count_alloc); fn_type_id->next_param_index = 0; fn_type_id->is_var_args = fn_proto->is_var_args; } @@ -1275,7 +1339,8 @@ static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_ } static bool analyze_const_string(CodeGen *g, Scope *scope, AstNode *node, Buf **out_buffer) { - TypeTableEntry *ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, ptr_type); IrInstruction *instr = analyze_const_value(g, scope, node, str_type, nullptr); if (type_is_invalid(instr->value.type)) @@ -1312,10 +1377,10 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { zig_unreachable(); case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -1337,7 +1402,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { return type_entry->data.structure.layout == ContainerLayoutPacked; case TypeTableEntryIdUnion: return type_entry->data.unionation.layout == ContainerLayoutPacked; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; return type_is_codegen_pointer(child_type); @@ -1353,10 +1418,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdInvalid: zig_unreachable(); case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -1364,10 +1429,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: + case TypeTableEntryIdVoid: return false; case TypeTableEntryIdOpaque: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdVoid: case TypeTableEntryIdBool: return true; case TypeTableEntryIdInt: @@ -1388,13 +1453,18 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdFn: return type_entry->data.fn.fn_type_id.cc == CallingConventionC; case TypeTableEntryIdPointer: - return type_allowed_in_extern(g, type_entry->data.pointer.child_type); + if (type_size(g, type_entry) == 0) + return false; + return true; case TypeTableEntryIdStruct: return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; - return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn; + if (child_type->id != TypeTableEntryIdPointer && child_type->id != TypeTableEntryIdFn) { + return false; + } + return type_allowed_in_extern(g, child_type); } case TypeTableEntryIdEnum: return type_entry->data.enumeration.layout == ContainerLayoutExtern || type_entry->data.enumeration.layout == ContainerLayoutPacked; @@ -1441,6 +1511,17 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c calling_convention_name(fn_type_id.cc))); return g->builtin_types.entry_invalid; } + if (param_node->data.param_decl.type != nullptr) { + TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type); + if (type_is_invalid(type_entry)) { + return g->builtin_types.entry_invalid; + } + FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index]; + param_info->type = type_entry; + param_info->is_noalias = param_node->data.param_decl.is_noalias; + fn_type_id.next_param_index += 1; + } + return get_generic_fn_type(g, &fn_type_id); } else if (param_is_var_args) { if (fn_type_id.cc == CallingConventionC) { @@ -1490,23 +1571,19 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdInvalid: return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: add_node_error(g, param_node->data.param_decl.type, buf_sprintf("parameter of type '%s' not allowed", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdMetaType: - add_node_error(g, param_node->data.param_decl.type, - buf_sprintf("parameter of type '%s' must be declared comptime", - buf_ptr(&type_entry->name))); - return g->builtin_types.entry_invalid; case TypeTableEntryIdVoid: case TypeTableEntryIdBool: case TypeTableEntryIdInt: @@ -1514,17 +1591,18 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: case TypeTableEntryIdFn: case TypeTableEntryIdPromise: - ensure_complete_type(g, type_entry); - if (calling_convention_allows_zig_types(fn_type_id.cc) && !type_is_copyable(g, type_entry)) { + type_ensure_zero_bits_known(g, type_entry); + if (type_requires_comptime(type_entry)) { add_node_error(g, param_node->data.param_decl.type, - buf_sprintf("type '%s' is not copyable; cannot pass by value", buf_ptr(&type_entry->name))); + buf_sprintf("parameter of type '%s' must be declared comptime", + buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; } break; @@ -1548,7 +1626,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c return g->builtin_types.entry_invalid; } add_node_error(g, proto_node, - buf_sprintf("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447")); + buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447")); return g->builtin_types.entry_invalid; //return get_generic_fn_type(g, &fn_type_id); } @@ -1566,7 +1644,10 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c fn_type_id.return_type = specified_return_type; } - if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, fn_type_id.return_type)) { + if (!calling_convention_allows_zig_types(fn_type_id.cc) && + fn_type_id.return_type->id != TypeTableEntryIdVoid && + !type_allowed_in_extern(g, fn_type_id.return_type)) + { add_node_error(g, fn_proto->return_type, buf_sprintf("return type '%s' not allowed in function with calling convention '%s'", buf_ptr(&fn_type_id.return_type->name), @@ -1578,16 +1659,16 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdInvalid: zig_unreachable(); - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: add_node_error(g, fn_proto->return_type, buf_sprintf("return type '%s' not allowed", buf_ptr(&fn_type_id.return_type->name))); return g->builtin_types.entry_invalid; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -1608,7 +1689,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -1839,7 +1920,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { } assert(!struct_type->data.structure.zero_bits_loop_flag); - assert(struct_type->data.structure.fields); + assert(struct_type->data.structure.fields || struct_type->data.structure.src_field_count == 0); assert(decl_node->type == NodeTypeContainerDecl); size_t field_count = struct_type->data.structure.src_field_count; @@ -1868,6 +1949,17 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { break; } + if (struct_type->data.structure.layout == ContainerLayoutExtern) { + if (!type_allowed_in_extern(g, field_type)) { + AstNode *field_source_node = decl_node->data.container_decl.fields.at(i); + add_node_error(g, field_source_node, + buf_sprintf("extern structs cannot contain fields of type '%s'", + buf_ptr(&field_type->name))); + struct_type->data.structure.is_invalid = true; + break; + } + } + if (!type_has_bits(field_type)) continue; @@ -2284,8 +2376,9 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { return; if (enum_type->data.enumeration.zero_bits_loop_flag) { - enum_type->data.enumeration.zero_bits_known = true; - enum_type->data.enumeration.zero_bits_loop_flag = false; + add_node_error(g, enum_type->data.enumeration.decl_node, + buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name))); + enum_type->data.enumeration.is_invalid = true; return; } @@ -2317,8 +2410,14 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { HashMap<BigInt, AstNode *, bigint_hash, bigint_eql> occupied_tag_values = {}; occupied_tag_values.init(field_count); - TypeTableEntry *tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1); + TypeTableEntry *tag_int_type; + if (enum_type->data.enumeration.layout == ContainerLayoutExtern) { + tag_int_type = get_c_int_type(g, CIntTypeInt); + } else { + tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1); + } + // TODO: Are extern enums allowed to have an init_arg_expr? if (decl_node->data.container_decl.init_arg_expr != nullptr) { TypeTableEntry *wanted_tag_int_type = analyze_type_expr(g, scope, decl_node->data.container_decl.init_arg_expr); if (type_is_invalid(wanted_tag_int_type)) { @@ -2503,6 +2602,10 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) { continue; } + if (type_requires_comptime(field_type)) { + struct_type->data.structure.requires_comptime = true; + } + if (!type_has_bits(field_type)) continue; @@ -2650,8 +2753,8 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { return; } tag_type = enum_type; + abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count covered_enum_fields = allocate<bool>(enum_type->data.enumeration.src_field_count); - abi_alignment_so_far = get_abi_alignment(g, enum_type); } else { tag_type = nullptr; abi_alignment_so_far = 0; @@ -2694,6 +2797,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { } union_field->type_entry = field_type; + if (type_requires_comptime(field_type)) { + union_type->data.unionation.requires_comptime = true; + } + + if (field_node->data.struct_field.value != nullptr && !decl_node->data.container_decl.auto_enum) { ErrorMsg *msg = add_node_error(g, field_node->data.struct_field.value, buf_sprintf("non-enum union field assignment")); @@ -2939,14 +3047,15 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { if (fn_type_id->param_count != 2) { return wrong_panic_prototype(g, proto_node, fn_type); } - TypeTableEntry *const_u8_ptr = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *const_u8_ptr = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *const_u8_slice = get_slice_type(g, const_u8_ptr); if (fn_type_id->param_info[0].type != const_u8_slice) { return wrong_panic_prototype(g, proto_node, fn_type); } - TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g)); - if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) { + TypeTableEntry *optional_ptr_to_stack_trace_type = get_optional_type(g, get_ptr_to_stack_trace_type(g)); + if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) { return wrong_panic_prototype(g, proto_node, fn_type); } @@ -3122,9 +3231,8 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) { } { - auto entry = g->primitive_type_table.maybe_get(tld->name); - if (entry) { - TypeTableEntry *type = entry->value; + TypeTableEntry *type = get_primitive_type(g, tld->name); + if (type != nullptr) { add_node_error(g, tld->source_node, buf_sprintf("declaration shadows type '%s'", buf_ptr(&type->name))); } @@ -3255,7 +3363,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeThisLiteral: case NodeTypeSymbol: case NodeTypePrefixOpExpr: - case NodeTypeAddrOfExpr: + case NodeTypePointerType: case NodeTypeIfBoolExpr: case NodeTypeWhileExpr: case NodeTypeForExpr: @@ -3267,6 +3375,8 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeUnreachable: case NodeTypeAsmExpr: case NodeTypeFieldAccessExpr: + case NodeTypePtrDeref: + case NodeTypeUnwrapOptional: case NodeTypeStructField: case NodeTypeContainerInitExpr: case NodeTypeStructValueField: @@ -3308,16 +3418,16 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdInvalid: return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: add_node_error(g, source_node, buf_sprintf("variable of type '%s' not allowed", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdMetaType: case TypeTableEntryIdVoid: @@ -3327,7 +3437,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -3367,9 +3477,8 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here")); variable_entry->value->type = g->builtin_types.entry_invalid; } else { - auto primitive_table_entry = g->primitive_type_table.maybe_get(name); - if (primitive_table_entry) { - TypeTableEntry *type = primitive_table_entry->value; + TypeTableEntry *type = get_primitive_type(g, name); + if (type != nullptr) { add_node_error(g, source_node, buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name))); variable_entry->value->type = g->builtin_types.entry_invalid; @@ -3451,12 +3560,12 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var) { add_node_error(g, source_node, buf_sprintf("variable initialization is unreachable")); implicit_type = g->builtin_types.entry_invalid; } else if ((!is_const || linkage == VarLinkageExternal) && - (implicit_type->id == TypeTableEntryIdNumLitFloat || - implicit_type->id == TypeTableEntryIdNumLitInt)) + (implicit_type->id == TypeTableEntryIdComptimeFloat || + implicit_type->id == TypeTableEntryIdComptimeInt)) { add_node_error(g, source_node, buf_sprintf("unable to infer variable type")); implicit_type = g->builtin_types.entry_invalid; - } else if (implicit_type->id == TypeTableEntryIdNullLit) { + } else if (implicit_type->id == TypeTableEntryIdNull) { add_node_error(g, source_node, buf_sprintf("unable to infer variable type")); implicit_type = g->builtin_types.entry_invalid; } else if (implicit_type->id == TypeTableEntryIdMetaType && !is_const) { @@ -3616,6 +3725,7 @@ FnTableEntry *scope_get_fn_if_root(Scope *scope) { case ScopeIdVarDecl: case ScopeIdCImport: case ScopeIdLoop: + case ScopeIdSuspend: case ScopeIdCompTime: case ScopeIdCoroPrelude: scope = scope->parent; @@ -3674,6 +3784,7 @@ TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt } TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag) { + assert(enum_type->data.enumeration.zero_bits_known); for (uint32_t i = 0; i < enum_type->data.enumeration.src_field_count; i += 1) { TypeEnumField *field = &enum_type->data.enumeration.fields[i]; if (bigint_cmp(&field->value, tag) == CmpEQ) { @@ -3700,11 +3811,11 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -3719,14 +3830,24 @@ static bool is_container(TypeTableEntry *type_entry) { zig_unreachable(); } +bool is_ref(TypeTableEntry *type_entry) { + return type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle; +} + +bool is_array_ref(TypeTableEntry *type_entry) { + TypeTableEntry *array = is_ref(type_entry) ? + type_entry->data.pointer.child_type : type_entry; + return array->id == TypeTableEntryIdArray; +} + bool is_container_ref(TypeTableEntry *type_entry) { - return (type_entry->id == TypeTableEntryIdPointer) ? + return is_ref(type_entry) ? is_container(type_entry->data.pointer.child_type) : is_container(type_entry); } TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) { assert(is_container_ref(type_entry)); - return (type_entry->id == TypeTableEntryIdPointer) ? + return is_ref(type_entry) ? type_entry->data.pointer.child_type : type_entry; } @@ -3749,11 +3870,11 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -3772,7 +3893,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) { if (type->id == TypeTableEntryIdPointer) return type; if (type->id == TypeTableEntryIdFn) return type; if (type->id == TypeTableEntryIdPromise) return type; - if (type->id == TypeTableEntryIdMaybe) { + if (type->id == TypeTableEntryIdOptional) { if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type; if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type; if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type; @@ -3819,7 +3940,7 @@ AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index) { return nullptr; } -static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) { +static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry) { TypeTableEntry *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; @@ -3857,14 +3978,10 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr if (fn_type->data.fn.gen_param_info) { var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index; } - - if (arg_vars) { - arg_vars[i] = var; - } } } -static bool analyze_resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) { +bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) { FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn; if (infer_fn != nullptr) { if (infer_fn->anal_state == FnAnalStateInvalid) { @@ -3916,7 +4033,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ } if (inferred_err_set_type->data.error_set.infer_fn != nullptr) { - if (!analyze_resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) { + if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) { fn_table_entry->anal_state = FnAnalStateInvalid; return; } @@ -3938,7 +4055,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ } if (g->verbose_ir) { - fprintf(stderr, "{ // (analyzed)\n"); + fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name)); ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4); fprintf(stderr, "}\n"); } @@ -3946,7 +4063,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ fn_table_entry->anal_state = FnAnalStateComplete; } -void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) { +static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) { assert(fn_table_entry->anal_state != FnAnalStateProbing); if (fn_table_entry->anal_state != FnAnalStateReady) return; @@ -3960,7 +4077,7 @@ void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) { if (!fn_table_entry->child_scope) fn_table_entry->child_scope = &fn_table_entry->fndef_scope->base; - define_local_param_variables(g, fn_table_entry, nullptr); + define_local_param_variables(g, fn_table_entry); TypeTableEntry *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); @@ -4188,43 +4305,7 @@ void semantic_analyze(CodeGen *g) { } } -TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits) { - size_t index; - if (size_in_bits == 2) { - index = 0; - } else if (size_in_bits == 3) { - index = 1; - } else if (size_in_bits == 4) { - index = 2; - } else if (size_in_bits == 5) { - index = 3; - } else if (size_in_bits == 6) { - index = 4; - } else if (size_in_bits == 7) { - index = 5; - } else if (size_in_bits == 8) { - index = 6; - } else if (size_in_bits == 16) { - index = 7; - } else if (size_in_bits == 29) { - index = 8; - } else if (size_in_bits == 32) { - index = 9; - } else if (size_in_bits == 64) { - index = 10; - } else if (size_in_bits == 128) { - index = 11; - } else { - return nullptr; - } - return &g->builtin_types.entry_int[is_signed ? 0 : 1][index]; -} - TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) { - TypeTableEntry **common_entry = get_int_type_ptr(g, is_signed, size_in_bits); - if (common_entry) - return *common_entry; - TypeId type_id = {}; type_id.id = TypeTableEntryIdInt; type_id.data.integer.is_signed = is_signed; @@ -4253,10 +4334,10 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { switch (type_entry->id) { case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -4279,7 +4360,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { return type_has_bits(type_entry); case TypeTableEntryIdErrorUnion: return type_has_bits(type_entry->data.error_union.payload_type); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return type_has_bits(type_entry->data.maybe.child_type) && !type_is_codegen_pointer(type_entry->data.maybe.child_type); case TypeTableEntryIdUnion: @@ -4296,8 +4377,9 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { static ZigWindowsSDK *get_windows_sdk(CodeGen *g) { if (g->win_sdk == nullptr) { - if (os_find_windows_sdk(&g->win_sdk)) { - zig_panic("Unable to determine Windows SDK path."); + if (zig_find_windows_sdk(&g->win_sdk)) { + fprintf(stderr, "unable to determine windows sdk path\n"); + exit(1); } } assert(g->win_sdk != nullptr); @@ -4351,22 +4433,14 @@ Buf *get_linux_libc_include_path(void) { } char *prev_newline = buf_ptr(out_stderr); ZigList<const char *> search_paths = {}; - bool found_search_paths = false; for (;;) { char *newline = strchr(prev_newline, '\n'); if (newline == nullptr) { - zig_panic("unable to determine libc include path: bad output from C compiler command"); + break; } *newline = 0; - if (found_search_paths) { - if (strcmp(prev_newline, "End of search list.") == 0) { - break; - } + if (prev_newline[0] == ' ') { search_paths.append(prev_newline); - } else { - if (strcmp(prev_newline, "#include <...> search starts here:") == 0) { - found_search_paths = true; - } } prev_newline = newline + 1; } @@ -4399,7 +4473,8 @@ void find_libc_include_path(CodeGen *g) { ZigWindowsSDK *sdk = get_windows_sdk(g); g->libc_include_dir = buf_alloc(); if (os_get_win32_ucrt_include_path(sdk, g->libc_include_dir)) { - zig_panic("Unable to determine libc include path."); + fprintf(stderr, "Unable to determine libc include path. --libc-include-dir"); + exit(1); } } else if (g->zig_target.os == OsLinux) { g->libc_include_dir = get_linux_libc_include_path(); @@ -4421,24 +4496,32 @@ void find_libc_lib_path(CodeGen *g) { if (g->zig_target.os == OsWindows) { ZigWindowsSDK *sdk = get_windows_sdk(g); - Buf* vc_lib_dir = buf_alloc(); - if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) { - zig_panic("Unable to determine vcruntime path."); + if (g->msvc_lib_dir == nullptr) { + if (sdk->msvc_lib_dir_ptr == nullptr) { + fprintf(stderr, "Unable to determine vcruntime path. --msvc-lib-dir"); + exit(1); + } + g->msvc_lib_dir = buf_create_from_mem(sdk->msvc_lib_dir_ptr, sdk->msvc_lib_dir_len); } - Buf* ucrt_lib_path = buf_alloc(); - if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine ucrt path."); + if (g->libc_lib_dir == nullptr) { + Buf* ucrt_lib_path = buf_alloc(); + if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) { + fprintf(stderr, "Unable to determine ucrt path. --libc-lib-dir"); + exit(1); + } + g->libc_lib_dir = ucrt_lib_path; } - Buf* kern_lib_path = buf_alloc(); - if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine kernel32 path."); + if (g->kernel32_lib_dir == nullptr) { + Buf* kern_lib_path = buf_alloc(); + if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) { + fprintf(stderr, "Unable to determine kernel32 path. --kernel32-lib-dir"); + exit(1); + } + g->kernel32_lib_dir = kern_lib_path; } - g->msvc_lib_dir = vc_lib_dir; - g->libc_lib_dir = ucrt_lib_path; - g->kernel32_lib_dir = kern_lib_path; } else if (g->zig_target.os == OsLinux) { g->libc_lib_dir = get_linux_libc_lib_path("crt1.o"); } else { @@ -4515,6 +4598,52 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) { return true; } +static uint32_t hash_const_val_ptr(ConstExprValue *const_val) { + uint32_t hash_val = 0; + switch (const_val->data.x_ptr.mut) { + case ConstPtrMutRuntimeVar: + hash_val += (uint32_t)3500721036; + break; + case ConstPtrMutComptimeConst: + hash_val += (uint32_t)4214318515; + break; + case ConstPtrMutComptimeVar: + hash_val += (uint32_t)1103195694; + break; + } + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + hash_val += (uint32_t)2478261866; + hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee); + return hash_val; + case ConstPtrSpecialBaseArray: + hash_val += (uint32_t)1764906839; + hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val); + hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index); + hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492; + return hash_val; + case ConstPtrSpecialBaseStruct: + hash_val += (uint32_t)3518317043; + hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val); + hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index); + return hash_val; + case ConstPtrSpecialHardCodedAddr: + hash_val += (uint32_t)4048518294; + hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr); + return hash_val; + case ConstPtrSpecialDiscard: + hash_val += 2010123162; + return hash_val; + case ConstPtrSpecialFunction: + hash_val += (uint32_t)2590901619; + hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); + return hash_val; + } + zig_unreachable(); +} + static uint32_t hash_const_val(ConstExprValue *const_val) { assert(const_val->special == ConstValSpecialStatic); switch (const_val->type->id) { @@ -4527,7 +4656,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case TypeTableEntryIdVoid: return (uint32_t)4149439618; case TypeTableEntryIdInt: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: { uint32_t result = 1331471175; for (size_t i = 0; i < const_val->data.x_bigint.digit_count; i += 1) { @@ -4547,6 +4676,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { } case TypeTableEntryIdFloat: switch (const_val->type->data.floating.bit_count) { + case 16: + { + uint16_t result; + static_assert(sizeof(result) == sizeof(const_val->data.x_f16), ""); + memcpy(&result, &const_val->data.x_f16, sizeof(result)); + return result * 65537u; + } case 32: { uint32_t result; @@ -4568,7 +4704,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: { float128_t f128 = bigfloat_to_f128(&const_val->data.x_bigfloat); uint32_t ints[4]; @@ -4583,57 +4719,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction); return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); case TypeTableEntryIdPointer: - { - uint32_t hash_val = 0; - switch (const_val->data.x_ptr.mut) { - case ConstPtrMutRuntimeVar: - hash_val += (uint32_t)3500721036; - break; - case ConstPtrMutComptimeConst: - hash_val += (uint32_t)4214318515; - break; - case ConstPtrMutComptimeVar: - hash_val += (uint32_t)1103195694; - break; - } - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - hash_val += (uint32_t)2478261866; - hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee); - return hash_val; - case ConstPtrSpecialBaseArray: - hash_val += (uint32_t)1764906839; - hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val); - hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index); - hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492; - return hash_val; - case ConstPtrSpecialBaseStruct: - hash_val += (uint32_t)3518317043; - hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val); - hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index); - return hash_val; - case ConstPtrSpecialHardCodedAddr: - hash_val += (uint32_t)4048518294; - hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr); - return hash_val; - case ConstPtrSpecialDiscard: - hash_val += 2010123162; - return hash_val; - case ConstPtrSpecialFunction: - hash_val += (uint32_t)2590901619; - hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); - return hash_val; - } - zig_unreachable(); - } + return hash_const_val_ptr(const_val); case TypeTableEntryIdPromise: // TODO better hashing algorithm return 223048345; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: return 162837799; - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: return 844854567; case TypeTableEntryIdArray: // TODO better hashing algorithm @@ -4644,11 +4736,15 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case TypeTableEntryIdUnion: // TODO better hashing algorithm return 2709806591; - case TypeTableEntryIdMaybe: - if (const_val->data.x_maybe) { - return hash_const_val(const_val->data.x_maybe) * 1992916303; + case TypeTableEntryIdOptional: + if (get_codegen_ptr_type(const_val->type) != nullptr) { + return hash_const_val(const_val) * 1992916303; } else { - return 4016830364; + if (const_val->data.x_optional) { + return hash_const_val(const_val->data.x_optional) * 1992916303; + } else { + return 4016830364; + } } case TypeTableEntryIdErrorUnion: // TODO better hashing algorithm @@ -4713,10 +4809,10 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: case TypeTableEntryIdFn: @@ -4748,10 +4844,12 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { } return false; - case TypeTableEntryIdMaybe: - if (value->data.x_maybe == nullptr) + case TypeTableEntryIdOptional: + if (get_codegen_ptr_type(value->type) != nullptr) + return value->data.x_ptr.mut == ConstPtrMutComptimeVar; + if (value->data.x_optional == nullptr) return false; - return can_mutate_comptime_var_state(value->data.x_maybe); + return can_mutate_comptime_var_state(value->data.x_optional); case TypeTableEntryIdErrorUnion: if (value->data.x_err_union.err != nullptr) @@ -4778,10 +4876,10 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: case TypeTableEntryIdFn: @@ -4798,7 +4896,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdUnion: return false; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return return_type_is_cacheable(return_type->data.maybe.child_type); case TypeTableEntryIdErrorUnion: @@ -4816,6 +4914,8 @@ bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type) { while (scope) { if (scope->id == ScopeIdVarDecl) { ScopeVarDecl *var_scope = (ScopeVarDecl *)scope; + if (type_is_invalid(var_scope->var->value->type)) + return false; if (can_mutate_comptime_var_state(var_scope->var->value)) return false; } else if (scope->id == ScopeIdFnDef) { @@ -4889,10 +4989,10 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdInvalid: case TypeTableEntryIdOpaque: zig_unreachable(); - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -4900,17 +5000,30 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdArgTuple: return true; case TypeTableEntryIdArray: + return type_requires_comptime(type_entry->data.array.child_type); case TypeTableEntryIdStruct: + assert(type_has_zero_bits_known(type_entry)); + return type_entry->data.structure.requires_comptime; case TypeTableEntryIdUnion: - case TypeTableEntryIdMaybe: + assert(type_has_zero_bits_known(type_entry)); + return type_entry->data.unionation.requires_comptime; + case TypeTableEntryIdOptional: + return type_requires_comptime(type_entry->data.maybe.child_type); case TypeTableEntryIdErrorUnion: + return type_requires_comptime(type_entry->data.error_union.payload_type); + case TypeTableEntryIdPointer: + if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) { + return false; + } else { + return type_requires_comptime(type_entry->data.pointer.child_type); + } + case TypeTableEntryIdFn: + return type_entry->data.fn.is_generic; case TypeTableEntryIdEnum: case TypeTableEntryIdErrorSet: - case TypeTableEntryIdFn: case TypeTableEntryIdBool: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdPointer: case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: case TypeTableEntryIdPromise: @@ -4966,7 +5079,9 @@ void init_const_c_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str) { // then make the pointer point to it const_val->special = ConstValSpecialStatic; - const_val->type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + // TODO make this `[*]null u8` instead of `[*]u8` + const_val->type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); const_val->data.x_ptr.special = ConstPtrSpecialBaseArray; const_val->data.x_ptr.data.base_array.array_val = array_val; const_val->data.x_ptr.data.base_array.elem_index = 0; @@ -5027,10 +5142,13 @@ ConstExprValue *create_const_signed(TypeTableEntry *type, int64_t x) { void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double value) { const_val->special = ConstValSpecialStatic; const_val->type = type; - if (type->id == TypeTableEntryIdNumLitFloat) { + if (type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_64(&const_val->data.x_bigfloat, value); } else if (type->id == TypeTableEntryIdFloat) { switch (type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = zig_double_to_f16(value); + break; case 32: const_val->data.x_f32 = value; break; @@ -5107,13 +5225,16 @@ void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *arr { assert(array_val->type->id == TypeTableEntryIdArray); - TypeTableEntry *ptr_type = get_pointer_to_type(g, array_val->type->data.array.child_type, is_const); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, array_val->type->data.array.child_type, + is_const, false, PtrLenUnknown, get_abi_alignment(g, array_val->type->data.array.child_type), + 0, 0); const_val->special = ConstValSpecialStatic; const_val->type = get_slice_type(g, ptr_type); const_val->data.x_struct.fields = create_const_vals(2); - init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const); + init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const, + PtrLenUnknown); init_const_usize(g, &const_val->data.x_struct.fields[slice_len_index], len); } @@ -5124,21 +5245,24 @@ ConstExprValue *create_const_slice(CodeGen *g, ConstExprValue *array_val, size_t } void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, - size_t elem_index, bool is_const) + size_t elem_index, bool is_const, PtrLen ptr_len) { assert(array_val->type->id == TypeTableEntryIdArray); TypeTableEntry *child_type = array_val->type->data.array.child_type; const_val->special = ConstValSpecialStatic; - const_val->type = get_pointer_to_type(g, child_type, is_const); + const_val->type = get_pointer_to_type_extra(g, child_type, is_const, false, + ptr_len, get_abi_alignment(g, child_type), 0, 0); const_val->data.x_ptr.special = ConstPtrSpecialBaseArray; const_val->data.x_ptr.data.base_array.array_val = array_val; const_val->data.x_ptr.data.base_array.elem_index = elem_index; } -ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const) { +ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const, + PtrLen ptr_len) +{ ConstExprValue *const_val = create_const_vals(1); - init_const_ptr_array(g, const_val, array_val, elem_index, is_const); + init_const_ptr_array(g, const_val, array_val, elem_index, is_const, ptr_len); return const_val; } @@ -5257,6 +5381,52 @@ bool ir_get_var_is_comptime(VariableTableEntry *var) { return var->is_comptime->value.data.x_bool; } +bool const_values_equal_ptr(ConstExprValue *a, ConstExprValue *b) { + if (a->data.x_ptr.special != b->data.x_ptr.special) + return false; + if (a->data.x_ptr.mut != b->data.x_ptr.mut) + return false; + switch (a->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee) + return false; + return true; + case ConstPtrSpecialBaseArray: + if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val && + a->data.x_ptr.data.base_array.array_val->global_refs != + b->data.x_ptr.data.base_array.array_val->global_refs) + { + return false; + } + if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index) + return false; + if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr) + return false; + return true; + case ConstPtrSpecialBaseStruct: + if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val && + a->data.x_ptr.data.base_struct.struct_val->global_refs != + b->data.x_ptr.data.base_struct.struct_val->global_refs) + { + return false; + } + if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index) + return false; + return true; + case ConstPtrSpecialHardCodedAddr: + if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr) + return false; + return true; + case ConstPtrSpecialDiscard: + return true; + case ConstPtrSpecialFunction: + return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry; + } + zig_unreachable(); +} + bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { assert(a->type->id == b->type->id); assert(a->special == ConstValSpecialStatic); @@ -5292,6 +5462,8 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { case TypeTableEntryIdFloat: assert(a->type->data.floating.bit_count == b->type->data.floating.bit_count); switch (a->type->data.floating.bit_count) { + case 16: + return f16_eq(a->data.x_f16, b->data.x_f16); case 32: return a->data.x_f32 == b->data.x_f32; case 64: @@ -5301,58 +5473,30 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: return bigfloat_cmp(&a->data.x_bigfloat, &b->data.x_bigfloat) == CmpEQ; case TypeTableEntryIdInt: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ; case TypeTableEntryIdPointer: case TypeTableEntryIdFn: - if (a->data.x_ptr.special != b->data.x_ptr.special) - return false; - if (a->data.x_ptr.mut != b->data.x_ptr.mut) - return false; - switch (a->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee) - return false; - return true; - case ConstPtrSpecialBaseArray: - if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val && - a->data.x_ptr.data.base_array.array_val->global_refs != - b->data.x_ptr.data.base_array.array_val->global_refs) - { - return false; - } - if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index) - return false; - if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr) - return false; - return true; - case ConstPtrSpecialBaseStruct: - if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val && - a->data.x_ptr.data.base_struct.struct_val->global_refs != - b->data.x_ptr.data.base_struct.struct_val->global_refs) - { - return false; - } - if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index) - return false; - return true; - case ConstPtrSpecialHardCodedAddr: - if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr) - return false; - return true; - case ConstPtrSpecialDiscard: - return true; - case ConstPtrSpecialFunction: - return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry; + return const_values_equal_ptr(a, b); + case TypeTableEntryIdArray: { + assert(a->type->data.array.len == b->type->data.array.len); + assert(a->data.x_array.special != ConstArraySpecialUndef); + assert(b->data.x_array.special != ConstArraySpecialUndef); + + size_t len = a->type->data.array.len; + ConstExprValue *a_elems = a->data.x_array.s_none.elements; + ConstExprValue *b_elems = b->data.x_array.s_none.elements; + + for (size_t i = 0; i < len; ++i) { + if (!const_values_equal(&a_elems[i], &b_elems[i])) + return false; } - zig_unreachable(); - case TypeTableEntryIdArray: - zig_panic("TODO"); + + return true; + } case TypeTableEntryIdStruct: for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) { ConstExprValue *field_a = &a->data.x_struct.fields[i]; @@ -5361,15 +5505,17 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { return false; } return true; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: zig_panic("TODO"); - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: zig_panic("TODO"); - case TypeTableEntryIdMaybe: - if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) { - return (a->data.x_maybe == nullptr && b->data.x_maybe == nullptr); + case TypeTableEntryIdOptional: + if (get_codegen_ptr_type(a->type) != nullptr) + return const_values_equal_ptr(a, b); + if (a->data.x_optional == nullptr || b->data.x_optional == nullptr) { + return (a->data.x_optional == nullptr && b->data.x_optional == nullptr); } else { - return const_values_equal(a->data.x_maybe, b->data.x_maybe); + return const_values_equal(a->data.x_optional, b->data.x_optional); } case TypeTableEntryIdErrorUnion: zig_panic("TODO"); @@ -5442,6 +5588,41 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue * } } +void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeTableEntry *type_entry) { + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + case ConstPtrSpecialBaseStruct: + buf_appendf(buf, "*"); + render_const_value(g, buf, const_ptr_pointee(g, const_val)); + return; + case ConstPtrSpecialBaseArray: + if (const_val->data.x_ptr.data.base_array.is_cstr) { + buf_appendf(buf, "*(c str lit)"); + return; + } else { + buf_appendf(buf, "*"); + render_const_value(g, buf, const_ptr_pointee(g, const_val)); + return; + } + case ConstPtrSpecialHardCodedAddr: + buf_appendf(buf, "(%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->name), + const_val->data.x_ptr.data.hard_coded_addr.addr); + return; + case ConstPtrSpecialDiscard: + buf_append_str(buf, "*_"); + return; + case ConstPtrSpecialFunction: + { + FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry; + buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name)); + return; + } + } + zig_unreachable(); +} + void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { switch (const_val->special) { case ConstValSpecialRuntime: @@ -5465,11 +5646,14 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { case TypeTableEntryIdVoid: buf_appendf(buf, "{}"); return; - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: bigfloat_append_buf(buf, &const_val->data.x_bigfloat); return; case TypeTableEntryIdFloat: switch (type_entry->data.floating.bit_count) { + case 16: + buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16)); + return; case 32: buf_appendf(buf, "%f", const_val->data.x_f32); return; @@ -5493,7 +5677,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdInt: bigint_append_buf(buf, &const_val->data.x_bigint, 10); return; @@ -5518,38 +5702,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { return; } case TypeTableEntryIdPointer: - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - case ConstPtrSpecialBaseStruct: - buf_appendf(buf, "&"); - render_const_value(g, buf, const_ptr_pointee(g, const_val)); - return; - case ConstPtrSpecialBaseArray: - if (const_val->data.x_ptr.data.base_array.is_cstr) { - buf_appendf(buf, "&(c str lit)"); - return; - } else { - buf_appendf(buf, "&"); - render_const_value(g, buf, const_ptr_pointee(g, const_val)); - return; - } - case ConstPtrSpecialHardCodedAddr: - buf_appendf(buf, "(&%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name), - const_val->data.x_ptr.data.hard_coded_addr.addr); - return; - case ConstPtrSpecialDiscard: - buf_append_str(buf, "&_"); - return; - case ConstPtrSpecialFunction: - { - FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry; - buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name)); - return; - } - } - zig_unreachable(); + return render_const_val_ptr(g, buf, const_val, type_entry); case TypeTableEntryIdBlock: { AstNode *node = const_val->data.x_block->source_node; @@ -5597,20 +5750,22 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "}"); return; } - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: { buf_appendf(buf, "null"); return; } - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: { buf_appendf(buf, "undefined"); return; } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { - if (const_val->data.x_maybe) { - render_const_value(g, buf, const_val->data.x_maybe); + if (get_codegen_ptr_type(const_val->type) != nullptr) + return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type); + if (const_val->data.x_optional) { + render_const_value(g, buf, const_val->data.x_optional); } else { buf_appendf(buf, "null"); } @@ -5712,11 +5867,11 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdFloat: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: @@ -5731,6 +5886,7 @@ uint32_t type_id_hash(TypeId x) { return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); case TypeTableEntryIdPointer: return hash_ptr(x.data.pointer.child_type) + + ((x.data.pointer.ptr_len == PtrLenSingle) ? (uint32_t)1120226602 : (uint32_t)3200913342) + (x.data.pointer.is_const ? (uint32_t)2749109194 : (uint32_t)4047371087) + (x.data.pointer.is_volatile ? (uint32_t)536730450 : (uint32_t)1685612214) + (((uint32_t)x.data.pointer.alignment) ^ (uint32_t)0x777fbe0e) + @@ -5757,11 +5913,11 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdFloat: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdPromise: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -5779,6 +5935,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdPointer: return a.data.pointer.child_type == b.data.pointer.child_type && + a.data.pointer.ptr_len == b.data.pointer.ptr_len && a.data.pointer.is_const == b.data.pointer.is_const && a.data.pointer.is_volatile == b.data.pointer.is_volatile && a.data.pointer.alignment == b.data.pointer.alignment && @@ -5800,10 +5957,14 @@ uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) { return (uint32_t)(x.data.ctz.bit_count) * (uint32_t)810453934; case ZigLLVMFnIdClz: return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817; + case ZigLLVMFnIdPopCount: + return (uint32_t)(x.data.clz.bit_count) * (uint32_t)101195049; case ZigLLVMFnIdFloor: - return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1899859168; + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1899859168; case ZigLLVMFnIdCeil: - return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1953839089; + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1953839089; + case ZigLLVMFnIdSqrt: + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)2225366385; case ZigLLVMFnIdOverflowArithmetic: return ((uint32_t)(x.data.overflow_arithmetic.bit_count) * 87135777) + ((uint32_t)(x.data.overflow_arithmetic.add_sub_mul) * 31640542) + @@ -5820,9 +5981,12 @@ bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) { return a.data.ctz.bit_count == b.data.ctz.bit_count; case ZigLLVMFnIdClz: return a.data.clz.bit_count == b.data.clz.bit_count; + case ZigLLVMFnIdPopCount: + return a.data.pop_count.bit_count == b.data.pop_count.bit_count; case ZigLLVMFnIdFloor: case ZigLLVMFnIdCeil: - return a.data.floor_ceil.bit_count == b.data.floor_ceil.bit_count; + case ZigLLVMFnIdSqrt: + return a.data.floating.bit_count == b.data.floating.bit_count; case ZigLLVMFnIdOverflowArithmetic: return (a.data.overflow_arithmetic.bit_count == b.data.overflow_arithmetic.bit_count) && (a.data.overflow_arithmetic.add_sub_mul == b.data.overflow_arithmetic.add_sub_mul) && @@ -5875,11 +6039,11 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdPointer, TypeTableEntryIdArray, TypeTableEntryIdStruct, - TypeTableEntryIdNumLitFloat, - TypeTableEntryIdNumLitInt, - TypeTableEntryIdUndefLit, - TypeTableEntryIdNullLit, - TypeTableEntryIdMaybe, + TypeTableEntryIdComptimeFloat, + TypeTableEntryIdComptimeInt, + TypeTableEntryIdUndefined, + TypeTableEntryIdNull, + TypeTableEntryIdOptional, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, TypeTableEntryIdEnum, @@ -5902,8 +6066,8 @@ size_t type_id_len() { return array_length(all_type_ids); } -size_t type_id_index(TypeTableEntryId id) { - switch (id) { +size_t type_id_index(TypeTableEntry *entry) { + switch (entry->id) { case TypeTableEntryIdInvalid: zig_unreachable(); case TypeTableEntryIdMetaType: @@ -5923,16 +6087,18 @@ size_t type_id_index(TypeTableEntryId id) { case TypeTableEntryIdArray: return 7; case TypeTableEntryIdStruct: + if (entry->data.structure.is_slice) + return 6; return 8; - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: return 9; - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: return 10; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: return 11; - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: return 12; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return 13; case TypeTableEntryIdErrorUnion: return 14; @@ -5982,16 +6148,16 @@ const char *type_id_name(TypeTableEntryId id) { return "Array"; case TypeTableEntryIdStruct: return "Struct"; - case TypeTableEntryIdNumLitFloat: - return "FloatLiteral"; - case TypeTableEntryIdNumLitInt: - return "IntLiteral"; - case TypeTableEntryIdUndefLit: - return "UndefinedLiteral"; - case TypeTableEntryIdNullLit: - return "NullLiteral"; - case TypeTableEntryIdMaybe: - return "Nullable"; + case TypeTableEntryIdComptimeFloat: + return "ComptimeFloat"; + case TypeTableEntryIdComptimeInt: + return "ComptimeInt"; + case TypeTableEntryIdUndefined: + return "Undefined"; + case TypeTableEntryIdNull: + return "Null"; + case TypeTableEntryIdOptional: + return "Optional"; case TypeTableEntryIdErrorUnion: return "ErrorUnion"; case TypeTableEntryIdErrorSet: @@ -6061,7 +6227,12 @@ uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) { } else if (type_entry->id == TypeTableEntryIdOpaque) { return 1; } else { - return LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref); + uint32_t llvm_alignment = LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref); + // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw + if (type_entry->id == TypeTableEntryIdPromise && llvm_alignment < 8) { + return 8; + } + return llvm_alignment; } } @@ -6109,3 +6280,27 @@ bool fn_type_can_fail(FnTypeId *fn_type_id) { return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync; } +TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name) { + if (buf_len(name) >= 2) { + uint8_t first_c = buf_ptr(name)[0]; + if (first_c == 'i' || first_c == 'u') { + for (size_t i = 1; i < buf_len(name); i += 1) { + uint8_t c = buf_ptr(name)[i]; + if (c < '0' || c > '9') { + goto not_integer; + } + } + bool is_signed = (first_c == 'i'); + uint32_t bit_count = atoi(buf_ptr(name) + 1); + return get_int_type(g, is_signed, bit_count); + } + } + +not_integer: + + auto primitive_table_entry = g->primitive_type_table.maybe_get(name); + if (primitive_table_entry != nullptr) { + return primitive_table_entry->value; + } + return nullptr; +} diff --git a/src/analyze.hpp b/src/analyze.hpp index aa4557666b..e4dfae4ecb 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -16,15 +16,14 @@ ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *m TypeTableEntry *new_type_table_entry(TypeTableEntryId id); TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const); TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const, - bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count); + bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count); uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry); uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry); -TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits); TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits); TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type); TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type); TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id); -TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type); +TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type); TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t array_size); TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type); TypeTableEntry *get_partial_container_type(CodeGen *g, Scope *scope, ContainerKind kind, @@ -70,6 +69,8 @@ TypeUnionField *find_union_type_field(TypeTableEntry *type_entry, Buf *name); TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag); TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt *tag); +bool is_ref(TypeTableEntry *type_entry); +bool is_array_ref(TypeTableEntry *type_entry); bool is_container_ref(TypeTableEntry *type_entry); void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node); void scan_import(CodeGen *g, ImportTableEntry *import); @@ -104,6 +105,7 @@ ScopeDeferExpr *create_defer_expr_scope(AstNode *node, Scope *parent); Scope *create_var_scope(AstNode *node, Scope *parent, VariableTableEntry *var); ScopeCImport *create_cimport_scope(AstNode *node, Scope *parent); ScopeLoop *create_loop_scope(AstNode *node, Scope *parent); +ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent); ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry); ScopeDecls *create_decls_scope(AstNode *node, Scope *parent, TypeTableEntry *container_type, ImportTableEntry *import); Scope *create_comptime_scope(AstNode *node, Scope *parent); @@ -151,8 +153,9 @@ ConstExprValue *create_const_ptr_hard_coded_addr(CodeGen *g, TypeTableEntry *poi size_t addr, bool is_const); void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, - size_t elem_index, bool is_const); -ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const); + size_t elem_index, bool is_const, PtrLen ptr_len); +ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, + bool is_const, PtrLen ptr_len); void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, size_t start, size_t len, bool is_const); @@ -173,7 +176,7 @@ void update_compile_var(CodeGen *g, Buf *name, ConstExprValue *value); const char *type_id_name(TypeTableEntryId id); TypeTableEntryId type_id_at_index(size_t index); size_t type_id_len(); -size_t type_id_index(TypeTableEntryId id); +size_t type_id_index(TypeTableEntry *entry); TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id); bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry); LinkLib *create_link_lib(Buf *name); @@ -190,7 +193,7 @@ void add_fn_export(CodeGen *g, FnTableEntry *fn_table_entry, Buf *symbol_name, G ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name); TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g); -void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry); +bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node); TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry); @@ -198,5 +201,8 @@ uint32_t get_coro_frame_align_bytes(CodeGen *g); bool fn_type_can_fail(FnTypeId *fn_type_id); bool type_can_fail(TypeTableEntry *type_entry); bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type); +AstNode *type_decl_node(TypeTableEntry *type_entry); + +TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name); #endif diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 2c3e1fc873..984b4230b1 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) { case BinOpTypeAssignBitXor: return "^="; case BinOpTypeAssignBitOr: return "|="; case BinOpTypeAssignMergeErrorSets: return "||="; - case BinOpTypeUnwrapMaybe: return "??"; + case BinOpTypeUnwrapOptional: return "orelse"; case BinOpTypeArrayCat: return "++"; case BinOpTypeArrayMult: return "**"; case BinOpTypeErrorUnion: return "!"; @@ -66,9 +66,8 @@ static const char *prefix_op_str(PrefixOp prefix_op) { case PrefixOpNegationWrap: return "-%"; case PrefixOpBoolNot: return "!"; case PrefixOpBinNot: return "~"; - case PrefixOpDereference: return "*"; - case PrefixOpMaybe: return "?"; - case PrefixOpUnwrapMaybe: return "??"; + case PrefixOpOptional: return "?"; + case PrefixOpAddrOf: return "&"; } zig_unreachable(); } @@ -186,8 +185,6 @@ static const char *node_type_str(NodeType node_type) { return "Symbol"; case NodeTypePrefixOpExpr: return "PrefixOpExpr"; - case NodeTypeAddrOfExpr: - return "AddrOfExpr"; case NodeTypeUse: return "Use"; case NodeTypeBoolLiteral: @@ -222,6 +219,10 @@ static const char *node_type_str(NodeType node_type) { return "AsmExpr"; case NodeTypeFieldAccessExpr: return "FieldAccessExpr"; + case NodeTypePtrDeref: + return "PtrDerefExpr"; + case NodeTypeUnwrapOptional: + return "UnwrapOptional"; case NodeTypeContainerDecl: return "ContainerDecl"; case NodeTypeStructField: @@ -250,6 +251,8 @@ static const char *node_type_str(NodeType node_type) { return "Suspend"; case NodeTypePromiseType: return "PromiseType"; + case NodeTypePointerType: + return "PointerType"; } zig_unreachable(); } @@ -615,41 +618,47 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "%s", prefix_op_str(op)); AstNode *child_node = node->data.prefix_op_expr.primary_expr; - bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr; + bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType; render_node_extra(ar, child_node, new_grouped); if (!grouped) fprintf(ar->f, ")"); break; } - case NodeTypeAddrOfExpr: + case NodeTypePointerType: { if (!grouped) fprintf(ar->f, "("); - fprintf(ar->f, "&"); - if (node->data.addr_of_expr.align_expr != nullptr) { + const char *star = "[*]"; + if (node->data.pointer_type.star_token != nullptr && + (node->data.pointer_type.star_token->id == TokenIdStar || node->data.pointer_type.star_token->id == TokenIdStarStar)) + { + star = "*"; + } + fprintf(ar->f, "%s", star); + if (node->data.pointer_type.align_expr != nullptr) { fprintf(ar->f, "align("); - render_node_grouped(ar, node->data.addr_of_expr.align_expr); - if (node->data.addr_of_expr.bit_offset_start != nullptr) { - assert(node->data.addr_of_expr.bit_offset_end != nullptr); + render_node_grouped(ar, node->data.pointer_type.align_expr); + if (node->data.pointer_type.bit_offset_start != nullptr) { + assert(node->data.pointer_type.bit_offset_end != nullptr); Buf offset_start_buf = BUF_INIT; buf_resize(&offset_start_buf, 0); - bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10); + bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10); Buf offset_end_buf = BUF_INIT; buf_resize(&offset_end_buf, 0); - bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10); + bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10); fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf)); } fprintf(ar->f, ") "); } - if (node->data.addr_of_expr.is_const) { + if (node->data.pointer_type.is_const) { fprintf(ar->f, "const "); } - if (node->data.addr_of_expr.is_volatile) { + if (node->data.pointer_type.is_volatile) { fprintf(ar->f, "volatile "); } - render_node_ungrouped(ar, node->data.addr_of_expr.op_expr); + render_node_ungrouped(ar, node->data.pointer_type.op_expr); if (!grouped) fprintf(ar->f, ")"); break; } @@ -668,7 +677,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, " "); } AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; - bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr); + bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType); render_node_extra(ar, fn_ref_node, grouped); fprintf(ar->f, "("); for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) { @@ -696,6 +705,20 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { print_symbol(ar, rhs); break; } + case NodeTypePtrDeref: + { + AstNode *lhs = node->data.ptr_deref_expr.target; + render_node_ungrouped(ar, lhs); + fprintf(ar->f, ".*"); + break; + } + case NodeTypeUnwrapOptional: + { + AstNode *lhs = node->data.unwrap_optional.expr; + render_node_ungrouped(ar, lhs); + fprintf(ar->f, ".?"); + break; + } case NodeTypeUndefinedLiteral: fprintf(ar->f, "undefined"); break; @@ -728,7 +751,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { render_node_grouped(ar, field_node->data.struct_field.type); } if (field_node->data.struct_field.value != nullptr) { - fprintf(ar->f, "= "); + fprintf(ar->f, " = "); render_node_grouped(ar, field_node->data.struct_field.value); } fprintf(ar->f, ",\n"); @@ -1089,9 +1112,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { { fprintf(ar->f, "suspend"); if (node->data.suspend.block != nullptr) { - fprintf(ar->f, " |"); - render_node_grouped(ar, node->data.suspend.promise_symbol); - fprintf(ar->f, "| "); render_node_grouped(ar, node->data.suspend.block); } break; diff --git a/src/bigfloat.cpp b/src/bigfloat.cpp index 2cab9658e8..cc442fa3b7 100644 --- a/src/bigfloat.cpp +++ b/src/bigfloat.cpp @@ -18,6 +18,10 @@ void bigfloat_init_128(BigFloat *dest, float128_t x) { dest->value = x; } +void bigfloat_init_16(BigFloat *dest, float16_t x) { + f16_to_f128M(x, &dest->value); +} + void bigfloat_init_32(BigFloat *dest, float x) { float32_t f32_val; memcpy(&f32_val, &x, sizeof(float)); @@ -146,6 +150,10 @@ Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2) { } } +float16_t bigfloat_to_f16(const BigFloat *bigfloat) { + return f128M_to_f16(&bigfloat->value); +} + float bigfloat_to_f32(const BigFloat *bigfloat) { float32_t f32_value = f128M_to_f32(&bigfloat->value); float result; @@ -181,3 +189,7 @@ bool bigfloat_has_fraction(const BigFloat *bigfloat) { f128M_roundToInt(&bigfloat->value, softfloat_round_minMag, false, &floored); return !f128M_eq(&floored, &bigfloat->value); } + +void bigfloat_sqrt(BigFloat *dest, const BigFloat *op) { + f128M_sqrt(&op->value, &dest->value); +} diff --git a/src/bigfloat.hpp b/src/bigfloat.hpp index 894b252c3a..c6ae567945 100644 --- a/src/bigfloat.hpp +++ b/src/bigfloat.hpp @@ -22,6 +22,7 @@ struct BigFloat { struct Buf; +void bigfloat_init_16(BigFloat *dest, float16_t x); void bigfloat_init_32(BigFloat *dest, float x); void bigfloat_init_64(BigFloat *dest, double x); void bigfloat_init_128(BigFloat *dest, float128_t x); @@ -29,6 +30,7 @@ void bigfloat_init_bigfloat(BigFloat *dest, const BigFloat *x); void bigfloat_init_bigint(BigFloat *dest, const BigInt *op); int bigfloat_init_buf_base10(BigFloat *dest, const uint8_t *buf_ptr, size_t buf_len); +float16_t bigfloat_to_f16(const BigFloat *bigfloat); float bigfloat_to_f32(const BigFloat *bigfloat); double bigfloat_to_f64(const BigFloat *bigfloat); float128_t bigfloat_to_f128(const BigFloat *bigfloat); @@ -42,6 +44,7 @@ void bigfloat_div_trunc(BigFloat *dest, const BigFloat *op1, const BigFloat *op2 void bigfloat_div_floor(BigFloat *dest, const BigFloat *op1, const BigFloat *op2); void bigfloat_rem(BigFloat *dest, const BigFloat *op1, const BigFloat *op2); void bigfloat_mod(BigFloat *dest, const BigFloat *op1, const BigFloat *op2); +void bigfloat_sqrt(BigFloat *dest, const BigFloat *op); void bigfloat_append_buf(Buf *buf, const BigFloat *op); Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2); diff --git a/src/bigint.cpp b/src/bigint.cpp index 85e5dad4ad..bf18b9a1bf 100644 --- a/src/bigint.cpp +++ b/src/bigint.cpp @@ -86,6 +86,11 @@ static void to_twos_complement(BigInt *dest, const BigInt *op, size_t bit_count) size_t digits_to_copy = bit_count / 64; size_t leftover_bits = bit_count % 64; dest->digit_count = digits_to_copy + ((leftover_bits == 0) ? 0 : 1); + if (dest->digit_count == 1 && leftover_bits == 0) { + dest->data.digit = op_digits[0]; + if (dest->data.digit == 0) dest->digit_count = 0; + return; + } dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count); for (size_t i = 0; i < digits_to_copy; i += 1) { uint64_t digit = (i < op->digit_count) ? op_digits[i] : 0; @@ -1254,12 +1259,11 @@ void bigint_and(BigInt *dest, const BigInt *op1, const BigInt *op2) { bigint_normalize(dest); return; } - // TODO this code path is untested - uint64_t first_digit = dest->data.digit; + dest->digit_count = max(op1->digit_count, op2->digit_count); dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count); - dest->data.digits[0] = first_digit; - size_t i = 1; + + size_t i = 0; for (; i < op1->digit_count && i < op2->digit_count; i += 1) { dest->data.digits[i] = op1_digits[i] & op2_digits[i]; } @@ -1407,7 +1411,6 @@ void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2) { return; } - // TODO this code path is untested size_t digit_shift_count = shift_amt / 64; size_t leftover_shift_count = shift_amt % 64; @@ -1422,7 +1425,7 @@ void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2) { uint64_t digit = op1_digits[op_digit_index]; size_t dest_digit_index = op_digit_index - digit_shift_count; dest->data.digits[dest_digit_index] = carry | (digit >> leftover_shift_count); - carry = (0xffffffffffffffffULL << leftover_shift_count) & digit; + carry = digit << (64 - leftover_shift_count); if (dest_digit_index == 0) { break; } op_digit_index -= 1; @@ -1590,6 +1593,37 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base) { } } +size_t bigint_popcount_unsigned(const BigInt *bi) { + assert(!bi->is_negative); + if (bi->digit_count == 0) + return 0; + + size_t count = 0; + size_t bit_count = bi->digit_count * 64; + for (size_t i = 0; i < bit_count; i += 1) { + if (bit_at_index(bi, i)) + count += 1; + } + return count; +} + +size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count) { + if (bit_count == 0) + return 0; + if (bi->digit_count == 0) + return 0; + + BigInt twos_comp = {0}; + to_twos_complement(&twos_comp, bi, bit_count); + + size_t count = 0; + for (size_t i = 0; i < bit_count; i += 1) { + if (bit_at_index(&twos_comp, i)) + count += 1; + } + return count; +} + size_t bigint_ctz(const BigInt *bi, size_t bit_count) { if (bit_count == 0) return 0; @@ -1680,10 +1714,15 @@ void bigint_incr(BigInt *x) { bigint_init_unsigned(x, 1); return; } - - if (x->digit_count == 1 && x->data.digit != UINT64_MAX) { - x->data.digit += 1; - return; + + if (x->digit_count == 1) { + if (x->is_negative && x->data.digit != 0) { + x->data.digit -= 1; + return; + } else if (!x->is_negative && x->data.digit != UINT64_MAX) { + x->data.digit += 1; + return; + } } BigInt copy; diff --git a/src/bigint.hpp b/src/bigint.hpp index 9f044c8722..48b222a227 100644 --- a/src/bigint.hpp +++ b/src/bigint.hpp @@ -81,6 +81,8 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base); size_t bigint_ctz(const BigInt *bi, size_t bit_count); size_t bigint_clz(const BigInt *bi, size_t bit_count); +size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count); +size_t bigint_popcount_unsigned(const BigInt *bi); size_t bigint_bits_needed(const BigInt *op); diff --git a/src/codegen.cpp b/src/codegen.cpp index a58832f983..539356ef2f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -17,6 +17,7 @@ #include "os.hpp" #include "translate_c.hpp" #include "target.hpp" +#include "util.hpp" #include "zig_llvm.h" #include <stdio.h> @@ -59,6 +60,33 @@ PackageTableEntry *new_anonymous_package(void) { return new_package("", ""); } +static const char *symbols_that_llvm_depends_on[] = { + "memcpy", + "memset", + "sqrt", + "powi", + "sin", + "cos", + "pow", + "exp", + "exp2", + "log", + "log10", + "log2", + "fma", + "fabs", + "minnum", + "maxnum", + "copysign", + "floor", + "ceil", + "trunc", + "rint", + "nearbyint", + "round", + // TODO probably all of compiler-rt needs to go here +}; + CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out_type, BuildMode build_mode, Buf *zig_lib_dir) { @@ -88,10 +116,15 @@ CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out g->exported_symbol_names.init(8); g->external_prototypes.init(8); g->string_literals_table.init(16); + g->type_info_cache.init(32); g->is_test_build = false; g->want_h_file = (out_type == OutTypeObj || out_type == OutTypeLib); buf_resize(&g->global_asm, 0); + for (size_t i = 0; i < array_length(symbols_that_llvm_depends_on); i += 1) { + g->external_prototypes.put(buf_create_from_str(symbols_that_llvm_depends_on[i]), nullptr); + } + if (root_src_path) { Buf *src_basename = buf_alloc(); Buf *src_dir = buf_alloc(); @@ -325,13 +358,6 @@ static void addLLVMArgAttr(LLVMValueRef arg_val, unsigned param_index, const cha return addLLVMAttr(arg_val, param_index + 1, attr_name); } -static void addLLVMCallsiteAttr(LLVMValueRef call_instr, unsigned param_index, const char *attr_name) { - unsigned kind_id = LLVMGetEnumAttributeKindForName(attr_name, strlen(attr_name)); - assert(kind_id != 0); - LLVMAttributeRef llvm_attr = LLVMCreateEnumAttribute(LLVMGetGlobalContext(), kind_id, 0); - LLVMAddCallSiteAttribute(call_instr, param_index + 1, llvm_attr); -} - static bool is_symbol_available(CodeGen *g, Buf *name) { return g->exported_symbol_names.maybe_get(name) == nullptr && g->external_prototypes.maybe_get(name) == nullptr; } @@ -512,7 +538,9 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { } if (fn_table_entry->body_node != nullptr) { - bool want_fn_safety = g->build_mode != BuildModeFastRelease && !fn_table_entry->def_scope->safety_off; + bool want_fn_safety = g->build_mode != BuildModeFastRelease && + g->build_mode != BuildModeSmallRelease && + !fn_table_entry->def_scope->safety_off; if (want_fn_safety) { if (g->libc_link_lib != nullptr) { addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong"); @@ -578,11 +606,6 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { if (param_type->id == TypeTableEntryIdPointer) { addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "nonnull"); } - // Note: byval is disabled on windows due to an LLVM bug: - // https://github.com/zig-lang/zig/issues/536 - if (is_byval && g->zig_target.os != OsWindows) { - addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "byval"); - } } uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); @@ -652,6 +675,7 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { } case ScopeIdDeferExpr: case ScopeIdLoop: + case ScopeIdSuspend: case ScopeIdCompTime: case ScopeIdCoroPrelude: return get_di_scope(g, scope->parent); @@ -717,12 +741,12 @@ static LLVMValueRef get_int_overflow_fn(CodeGen *g, TypeTableEntry *type_entry, return fn_val; } -static LLVMValueRef get_floor_ceil_fn(CodeGen *g, TypeTableEntry *type_entry, ZigLLVMFnId fn_id) { +static LLVMValueRef get_float_fn(CodeGen *g, TypeTableEntry *type_entry, ZigLLVMFnId fn_id) { assert(type_entry->id == TypeTableEntryIdFloat); ZigLLVMFnKey key = {}; key.id = fn_id; - key.data.floor_ceil.bit_count = (uint32_t)type_entry->data.floating.bit_count; + key.data.floating.bit_count = (uint32_t)type_entry->data.floating.bit_count; auto existing_entry = g->llvm_fn_table.maybe_get(key); if (existing_entry) @@ -733,6 +757,8 @@ static LLVMValueRef get_floor_ceil_fn(CodeGen *g, TypeTableEntry *type_entry, Zi name = "floor"; } else if (fn_id == ZigLLVMFnIdCeil) { name = "ceil"; + } else if (fn_id == ZigLLVMFnIdSqrt) { + name = "sqrt"; } else { zig_unreachable(); } @@ -815,7 +841,7 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) { } static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) { - if (g->build_mode == BuildModeFastRelease) + if (g->build_mode == BuildModeFastRelease || g->build_mode == BuildModeSmallRelease) return false; // TODO memoize @@ -859,7 +885,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("exact division produced remainder"); case PanicMsgIdSliceWidenRemainder: return buf_create_from_str("slice widening size mismatch"); - case PanicMsgIdUnwrapMaybeFail: + case PanicMsgIdUnwrapOptionalFail: return buf_create_from_str("attempt to unwrap null"); case PanicMsgIdUnreachable: return buf_create_from_str("reached unreachable code"); @@ -869,6 +895,10 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("incorrect alignment"); case PanicMsgIdBadUnionField: return buf_create_from_str("access of inactive union field"); + case PanicMsgIdBadEnumValue: + return buf_create_from_str("invalid enum value"); + case PanicMsgIdFloatToInt: + return buf_create_from_str("integer part of floating point value out of bounds"); } zig_unreachable(); } @@ -887,7 +917,8 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) { assert(val->global_refs->llvm_global); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(str_type->type_ref, 0)); } @@ -932,6 +963,53 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) { return g->memcpy_fn_val; } +static LLVMValueRef get_stacksave_fn_val(CodeGen *g) { + if (g->stacksave_fn_val) + return g->stacksave_fn_val; + + // declare i8* @llvm.stacksave() + + LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), nullptr, 0, false); + g->stacksave_fn_val = LLVMAddFunction(g->module, "llvm.stacksave", fn_type); + assert(LLVMGetIntrinsicID(g->stacksave_fn_val)); + + return g->stacksave_fn_val; +} + +static LLVMValueRef get_stackrestore_fn_val(CodeGen *g) { + if (g->stackrestore_fn_val) + return g->stackrestore_fn_val; + + // declare void @llvm.stackrestore(i8* %ptr) + + LLVMTypeRef param_type = LLVMPointerType(LLVMInt8Type(), 0); + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), ¶m_type, 1, false); + g->stackrestore_fn_val = LLVMAddFunction(g->module, "llvm.stackrestore", fn_type); + assert(LLVMGetIntrinsicID(g->stackrestore_fn_val)); + + return g->stackrestore_fn_val; +} + +static LLVMValueRef get_write_register_fn_val(CodeGen *g) { + if (g->write_register_fn_val) + return g->write_register_fn_val; + + // declare void @llvm.write_register.i64(metadata, i64 @value) + // !0 = !{!"sp\00"} + + LLVMTypeRef param_types[] = { + LLVMMetadataTypeInContext(LLVMGetGlobalContext()), + LLVMIntType(g->pointer_size_bytes * 8), + }; + + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); + Buf *name = buf_sprintf("llvm.write_register.i%d", g->pointer_size_bytes * 8); + g->write_register_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->write_register_fn_val)); + + return g->write_register_fn_val; +} + static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) { if (g->coro_destroy_fn_val) return g->coro_destroy_fn_val; @@ -1408,7 +1486,8 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMValueRef full_buf_ptr = LLVMConstInBoundsGEP(global_array, full_buf_ptr_indices, 2); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); LLVMValueRef global_slice_fields[] = { full_buf_ptr, @@ -1626,7 +1705,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, T return trunc_val; } LLVMValueRef orig_val; - if (actual_type->data.integral.is_signed) { + if (wanted_type->data.integral.is_signed) { orig_val = LLVMBuildSExt(g->builder, trunc_val, actual_type->type_ref, ""); } else { orig_val = LLVMBuildZExt(g->builder, trunc_val, actual_type->type_ref, ""); @@ -1900,7 +1979,7 @@ static LLVMValueRef gen_floor(CodeGen *g, LLVMValueRef val, TypeTableEntry *type if (type_entry->id == TypeTableEntryIdInt) return val; - LLVMValueRef floor_fn = get_floor_ceil_fn(g, type_entry, ZigLLVMFnIdFloor); + LLVMValueRef floor_fn = get_float_fn(g, type_entry, ZigLLVMFnIdFloor); return LLVMBuildCall(g->builder, floor_fn, &val, 1, ""); } @@ -1908,7 +1987,7 @@ static LLVMValueRef gen_ceil(CodeGen *g, LLVMValueRef val, TypeTableEntry *type_ if (type_entry->id == TypeTableEntryIdInt) return val; - LLVMValueRef ceil_fn = get_floor_ceil_fn(g, type_entry, ZigLLVMFnIdCeil); + LLVMValueRef ceil_fn = get_float_fn(g, type_entry, ZigLLVMFnIdCeil); return LLVMBuildCall(g->builder, ceil_fn, &val, 1, ""); } @@ -2159,9 +2238,13 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, IrInstruction *op2 = bin_op_instruction->op2; assert(op1->value.type == op2->value.type || op_id == IrBinOpBitShiftLeftLossy || - op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy || - op_id == IrBinOpBitShiftRightExact || - (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet)); + op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy || + op_id == IrBinOpBitShiftRightExact || + (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet) || + (op1->value.type->id == TypeTableEntryIdPointer && + (op_id == IrBinOpAdd || op_id == IrBinOpSub) && + op1->value.type->data.pointer.ptr_len == PtrLenUnknown) + ); TypeTableEntry *type_entry = op1->value.type; bool want_runtime_safety = bin_op_instruction->safety_check_on && @@ -2169,6 +2252,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, LLVMValueRef op1_value = ir_llvm_value(g, op1); LLVMValueRef op2_value = ir_llvm_value(g, op2); + + switch (op_id) { case IrBinOpInvalid: case IrBinOpArrayCat: @@ -2193,12 +2278,10 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, } else if (type_entry->id == TypeTableEntryIdInt) { LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, type_entry->data.integral.is_signed); return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, ""); - } else if (type_entry->id == TypeTableEntryIdEnum) { - LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false); - return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, ""); - } else if (type_entry->id == TypeTableEntryIdErrorSet || - type_entry->id == TypeTableEntryIdPointer || - type_entry->id == TypeTableEntryIdBool) + } else if (type_entry->id == TypeTableEntryIdEnum || + type_entry->id == TypeTableEntryIdErrorSet || + type_entry->id == TypeTableEntryIdBool || + get_codegen_ptr_type(type_entry) != nullptr) { LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false); return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, ""); @@ -2207,7 +2290,11 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, } case IrBinOpAdd: case IrBinOpAddWrap: - if (type_entry->id == TypeTableEntryIdFloat) { + if (type_entry->id == TypeTableEntryIdPointer) { + assert(type_entry->data.pointer.ptr_len == PtrLenUnknown); + // TODO runtime safety + return LLVMBuildInBoundsGEP(g->builder, op1_value, &op2_value, 1, ""); + } else if (type_entry->id == TypeTableEntryIdFloat) { ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base)); return LLVMBuildFAdd(g->builder, op1_value, op2_value, ""); } else if (type_entry->id == TypeTableEntryIdInt) { @@ -2270,7 +2357,12 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, } case IrBinOpSub: case IrBinOpSubWrap: - if (type_entry->id == TypeTableEntryIdFloat) { + if (type_entry->id == TypeTableEntryIdPointer) { + assert(type_entry->data.pointer.ptr_len == PtrLenUnknown); + // TODO runtime safety + LLVMValueRef subscript_value = LLVMBuildNeg(g->builder, op2_value, ""); + return LLVMBuildInBoundsGEP(g->builder, op1_value, &subscript_value, 1, ""); + } else if (type_entry->id == TypeTableEntryIdFloat) { ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base)); return LLVMBuildFSub(g->builder, op1_value, op2_value, ""); } else if (type_entry->id == TypeTableEntryIdInt) { @@ -2460,7 +2552,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, assert(wanted_type->data.structure.is_slice); assert(actual_type->id == TypeTableEntryIdArray); - TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[0].type_entry; + TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry; TypeTableEntry *wanted_child_type = wanted_pointer_type->data.pointer.child_type; @@ -2486,15 +2578,41 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, } else { return LLVMBuildUIToFP(g->builder, expr_val, wanted_type->type_ref, ""); } - case CastOpFloatToInt: + case CastOpFloatToInt: { assert(wanted_type->id == TypeTableEntryIdInt); ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &cast_instruction->base)); + + bool want_safety = ir_want_runtime_safety(g, &cast_instruction->base); + + LLVMValueRef result; if (wanted_type->data.integral.is_signed) { - return LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, ""); + result = LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, ""); } else { - return LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, ""); + result = LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, ""); } + if (want_safety) { + LLVMValueRef back_to_float; + if (wanted_type->data.integral.is_signed) { + back_to_float = LLVMBuildSIToFP(g->builder, result, LLVMTypeOf(expr_val), ""); + } else { + back_to_float = LLVMBuildUIToFP(g->builder, result, LLVMTypeOf(expr_val), ""); + } + LLVMValueRef difference = LLVMBuildFSub(g->builder, expr_val, back_to_float, ""); + LLVMValueRef one_pos = LLVMConstReal(LLVMTypeOf(expr_val), 1.0f); + LLVMValueRef one_neg = LLVMConstReal(LLVMTypeOf(expr_val), -1.0f); + LLVMValueRef ok_bit_pos = LLVMBuildFCmp(g->builder, LLVMRealOLT, difference, one_pos, ""); + LLVMValueRef ok_bit_neg = LLVMBuildFCmp(g->builder, LLVMRealOGT, difference, one_neg, ""); + LLVMValueRef ok_bit = LLVMBuildAnd(g->builder, ok_bit_pos, ok_bit_neg, ""); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckOk"); + LLVMBasicBlockRef bad_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckFail"); + LLVMBuildCondBr(g->builder, ok_bit, ok_block, bad_block); + LLVMPositionBuilderAtEnd(g->builder, bad_block); + gen_safety_crash(g, PanicMsgIdFloatToInt); + LLVMPositionBuilderAtEnd(g->builder, ok_block); + } + return result; + } case CastOpBoolToInt: assert(wanted_type->id == TypeTableEntryIdInt); assert(actual_type->id == TypeTableEntryIdBool); @@ -2504,6 +2622,31 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, add_error_range_check(g, wanted_type, g->err_tag_type, expr_val); } return expr_val; + case CastOpBitCast: + return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, ""); + case CastOpPtrOfArrayToSlice: { + assert(cast_instruction->tmp_ptr); + assert(actual_type->id == TypeTableEntryIdPointer); + TypeTableEntry *array_type = actual_type->data.pointer.child_type; + assert(array_type->id == TypeTableEntryIdArray); + + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr, + slice_ptr_index, ""); + LLVMValueRef indices[] = { + LLVMConstNull(g->builtin_types.entry_usize->type_ref), + LLVMConstInt(g->builtin_types.entry_usize->type_ref, 0, false), + }; + LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, ""); + gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr, + slice_len_index, ""); + LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->type_ref, + array_type->data.array.len, false); + gen_store_untyped(g, len_value, len_field_ptr, 0, false); + + return cast_instruction->tmp_ptr; + } } zig_unreachable(); } @@ -2559,8 +2702,25 @@ static LLVMValueRef ir_render_int_to_enum(CodeGen *g, IrExecutable *executable, TypeTableEntry *tag_int_type = wanted_type->data.enumeration.tag_int_type; LLVMValueRef target_val = ir_llvm_value(g, instruction->target); - return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base), + LLVMValueRef tag_int_value = gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base), instruction->target->value.type, tag_int_type, target_val); + + if (ir_want_runtime_safety(g, &instruction->base)) { + LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue"); + LLVMBasicBlockRef ok_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkValue"); + size_t field_count = wanted_type->data.enumeration.src_field_count; + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count); + for (size_t field_i = 0; field_i < field_count; field_i += 1) { + LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref, + &wanted_type->data.enumeration.fields[field_i].value); + LLVMAddCase(switch_instr, this_tag_int_value, ok_value_block); + } + LLVMPositionBuilderAtEnd(g->builder, bad_value_block); + gen_safety_crash(g, PanicMsgIdBadEnumValue); + + LLVMPositionBuilderAtEnd(g->builder, ok_value_block); + } + return tag_int_value; } static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, IrInstructionIntToErr *instruction) { @@ -2639,7 +2799,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst switch (op_id) { case IrUnOpInvalid: - case IrUnOpMaybe: + case IrUnOpOptional: case IrUnOpDereference: zig_unreachable(); case IrUnOpNegation: @@ -2717,7 +2877,7 @@ static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, if (have_init_expr) { assert(var->value->type == init_value->value.type); TypeTableEntry *var_ptr_type = get_pointer_to_type_extra(g, var->value->type, false, false, - var->align_bytes, 0, 0); + PtrLenSingle, var->align_bytes, 0, 0); gen_assign_raw(g, var->value_ref, var_ptr_type, ir_llvm_value(g, init_value)); } else { bool want_safe = ir_want_runtime_safety(g, &decl_var_instruction->base); @@ -2814,7 +2974,13 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI bool safety_check_on = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on; - if (array_type->id == TypeTableEntryIdArray) { + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.child_type->id == TypeTableEntryIdArray); + array_type = array_type->data.pointer.child_type; + } if (safety_check_on) { LLVMValueRef end = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false); @@ -2855,18 +3021,26 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildInBoundsGEP(g->builder, array_ptr, indices, 1, ""); } else if (array_type->id == TypeTableEntryIdStruct) { assert(array_type->data.structure.is_slice); + if (!type_has_bits(instruction->base.value.type)) { + if (safety_check_on) { + assert(LLVMGetTypeKind(LLVMTypeOf(array_ptr)) == LLVMIntegerTypeKind); + add_bounds_check(g, subscript_value, LLVMIntEQ, nullptr, LLVMIntULT, array_ptr); + } + return nullptr; + } + assert(LLVMGetTypeKind(LLVMTypeOf(array_ptr)) == LLVMPointerTypeKind); assert(LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(array_ptr))) == LLVMStructTypeKind); if (safety_check_on) { - size_t len_index = array_type->data.structure.fields[1].gen_index; + size_t len_index = array_type->data.structure.fields[slice_len_index].gen_index; assert(len_index != SIZE_MAX); LLVMValueRef len_ptr = LLVMBuildStructGEP(g->builder, array_ptr, (unsigned)len_index, ""); LLVMValueRef len = gen_load_untyped(g, len_ptr, 0, false, ""); add_bounds_check(g, subscript_value, LLVMIntEQ, nullptr, LLVMIntULT, len); } - size_t ptr_index = array_type->data.structure.fields[0].gen_index; + size_t ptr_index = array_type->data.structure.fields[slice_ptr_index].gen_index; assert(ptr_index != SIZE_MAX); LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, array_ptr, (unsigned)ptr_index, ""); LLVMValueRef ptr = gen_load_untyped(g, ptr_ptr, 0, false, ""); @@ -2895,6 +3069,38 @@ static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) { return 1 + get_async_allocator_arg_index(g, fn_type_id); } + +static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) { + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, ""); + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, ""); + + LLVMValueRef ptr_value = gen_load_untyped(g, ptr_field_ptr, 0, false, ""); + LLVMValueRef len_value = gen_load_untyped(g, len_field_ptr, 0, false, ""); + + LLVMValueRef ptr_addr = LLVMBuildPtrToInt(g->builder, ptr_value, LLVMTypeOf(len_value), ""); + LLVMValueRef end_addr = LLVMBuildNUWAdd(g->builder, ptr_addr, len_value, ""); + LLVMValueRef align_amt = LLVMConstInt(LLVMTypeOf(end_addr), get_abi_alignment(g, g->builtin_types.entry_usize), false); + LLVMValueRef align_adj = LLVMBuildURem(g->builder, end_addr, align_amt, ""); + return LLVMBuildNUWSub(g->builder, end_addr, align_adj, ""); +} + +static void gen_set_stack_pointer(CodeGen *g, LLVMValueRef aligned_end_addr) { + LLVMValueRef write_register_fn_val = get_write_register_fn_val(g); + + if (g->sp_md_node == nullptr) { + Buf *sp_reg_name = buf_create_from_str(arch_stack_pointer_register_name(&g->zig_target.arch)); + LLVMValueRef str_node = LLVMMDString(buf_ptr(sp_reg_name), buf_len(sp_reg_name) + 1); + g->sp_md_node = LLVMMDNode(&str_node, 1); + } + + LLVMValueRef params[] = { + g->sp_md_node, + aligned_end_addr, + }; + + LLVMBuildCall(g->builder, write_register_fn_val, params, 2, ""); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { LLVMValueRef fn_val; TypeTableEntry *fn_type; @@ -2961,18 +3167,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } LLVMCallConv llvm_cc = get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc); - LLVMValueRef result = ZigLLVMBuildCall(g->builder, fn_val, - gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + LLVMValueRef result; + + if (instruction->new_stack == nullptr) { + result = ZigLLVMBuildCall(g->builder, fn_val, + gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + } else { + LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g); + LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g); - for (size_t param_i = 0; param_i < fn_type_id->param_count; param_i += 1) { - FnGenParamInfo *gen_info = &fn_type->data.fn.gen_param_info[param_i]; - // Note: byval is disabled on windows due to an LLVM bug: - // https://github.com/zig-lang/zig/issues/536 - if (gen_info->is_byval && g->zig_target.os != OsWindows) { - addLLVMCallsiteAttr(result, (unsigned)gen_info->gen_index, "byval"); - } + LLVMValueRef new_stack_addr = get_new_stack_addr(g, ir_llvm_value(g, instruction->new_stack)); + LLVMValueRef old_stack_ref = LLVMBuildCall(g->builder, stacksave_fn_val, nullptr, 0, ""); + gen_set_stack_pointer(g, new_stack_addr); + result = ZigLLVMBuildCall(g->builder, fn_val, + gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, ""); } + if (instruction->is_async) { LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_payload_index, ""); LLVMBuildStore(g->builder, result, payload_ptr); @@ -2985,6 +3197,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr return nullptr; } else if (first_arg_ret) { return instruction->tmp_ptr; + } else if (handle_is_ptr(src_return_type)) { + auto store_instr = LLVMBuildStore(g->builder, result, instruction->tmp_ptr); + LLVMSetAlignment(store_instr, LLVMGetAlignment(instruction->tmp_ptr)); + return instruction->tmp_ptr; } else { return result; } @@ -3185,7 +3401,7 @@ static LLVMValueRef ir_render_asm(CodeGen *g, IrExecutable *executable, IrInstru } static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLVMValueRef maybe_handle) { - assert(maybe_type->id == TypeTableEntryIdMaybe); + assert(maybe_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = maybe_type->data.maybe.child_type; if (child_type->zero_bits) { return maybe_handle; @@ -3207,23 +3423,23 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable } static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable, - IrInstructionUnwrapMaybe *instruction) + IrInstructionUnwrapOptional *instruction) { TypeTableEntry *ptr_type = instruction->value->value.type; assert(ptr_type->id == TypeTableEntryIdPointer); TypeTableEntry *maybe_type = ptr_type->data.pointer.child_type; - assert(maybe_type->id == TypeTableEntryIdMaybe); + assert(maybe_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = maybe_type->data.maybe.child_type; LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value); LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type); if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) { LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle); - LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk"); - LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail"); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk"); + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail"); LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block); LLVMPositionBuilderAtEnd(g->builder, fail_block); - gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail); + gen_safety_crash(g, PanicMsgIdUnwrapOptionalFail); LLVMPositionBuilderAtEnd(g->builder, ok_block); } @@ -3243,14 +3459,24 @@ static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable, static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, BuiltinFnId fn_id) { ZigLLVMFnKey key = {}; const char *fn_name; + uint32_t n_args; if (fn_id == BuiltinFnIdCtz) { fn_name = "cttz"; + n_args = 2; key.id = ZigLLVMFnIdCtz; key.data.ctz.bit_count = (uint32_t)int_type->data.integral.bit_count; - } else { + } else if (fn_id == BuiltinFnIdClz) { fn_name = "ctlz"; + n_args = 2; key.id = ZigLLVMFnIdClz; key.data.clz.bit_count = (uint32_t)int_type->data.integral.bit_count; + } else if (fn_id == BuiltinFnIdPopCount) { + fn_name = "ctpop"; + n_args = 1; + key.id = ZigLLVMFnIdPopCount; + key.data.pop_count.bit_count = (uint32_t)int_type->data.integral.bit_count; + } else { + zig_unreachable(); } auto existing_entry = g->llvm_fn_table.maybe_get(key); @@ -3263,7 +3489,7 @@ static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, Bui int_type->type_ref, LLVMInt1Type(), }; - LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, 2, false); + LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, n_args, false); LLVMValueRef fn_val = LLVMAddFunction(g->module, llvm_name, fn_type); assert(LLVMGetIntrinsicID(fn_val)); @@ -3296,6 +3522,14 @@ static LLVMValueRef ir_render_ctz(CodeGen *g, IrExecutable *executable, IrInstru return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int); } +static LLVMValueRef ir_render_pop_count(CodeGen *g, IrExecutable *executable, IrInstructionPopCount *instruction) { + TypeTableEntry *int_type = instruction->value->value.type; + LLVMValueRef fn_val = get_int_builtin_fn(g, int_type, BuiltinFnIdPopCount); + LLVMValueRef operand = ir_llvm_value(g, instruction->value); + LLVMValueRef wrong_size_int = LLVMBuildCall(g->builder, fn_val, &operand, 1, ""); + return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int); +} + static LLVMValueRef ir_render_switch_br(CodeGen *g, IrExecutable *executable, IrInstructionSwitchBr *instruction) { LLVMValueRef target_value = ir_llvm_value(g, instruction->target_value); LLVMBasicBlockRef else_block = instruction->else_block->llvm_block; @@ -3366,34 +3600,112 @@ static LLVMValueRef ir_render_err_name(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildInBoundsGEP(g->builder, g->err_name_table, indices, 2, ""); } +static LLVMValueRef get_enum_tag_name_function(CodeGen *g, TypeTableEntry *enum_type) { + assert(enum_type->id == TypeTableEntryIdEnum); + if (enum_type->data.enumeration.name_function) + return enum_type->data.enumeration.name_function; + + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); + TypeTableEntry *u8_slice_type = get_slice_type(g, u8_ptr_type); + TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type; + + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(u8_slice_type->type_ref, 0), + &tag_int_type->type_ref, 1, false); + + Buf *fn_name = get_mangled_name(g, buf_sprintf("__zig_tag_name_%s", buf_ptr(&enum_type->name)), false); + LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); + LLVMSetLinkage(fn_val, LLVMInternalLinkage); + LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); + addLLVMFnAttr(fn_val, "nounwind"); + add_uwtable_attr(g, fn_val); + if (g->build_mode == BuildModeDebug) { + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); + } + + LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); + LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); + FnTableEntry *prev_cur_fn = g->cur_fn; + LLVMValueRef prev_cur_fn_val = g->cur_fn_val; + + LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); + LLVMPositionBuilderAtEnd(g->builder, entry_block); + ZigLLVMClearCurrentDebugLocation(g->builder); + g->cur_fn = nullptr; + g->cur_fn_val = fn_val; + + size_t field_count = enum_type->data.enumeration.src_field_count; + LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue"); + LLVMValueRef tag_int_value = LLVMGetParam(fn_val, 0); + LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count); + + + TypeTableEntry *usize = g->builtin_types.entry_usize; + LLVMValueRef array_ptr_indices[] = { + LLVMConstNull(usize->type_ref), + LLVMConstNull(usize->type_ref), + }; + + for (size_t field_i = 0; field_i < field_count; field_i += 1) { + Buf *name = enum_type->data.enumeration.fields[field_i].name; + LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true); + LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), ""); + LLVMSetInitializer(str_global, str_init); + LLVMSetLinkage(str_global, LLVMPrivateLinkage); + LLVMSetGlobalConstant(str_global, true); + LLVMSetUnnamedAddr(str_global, true); + LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init))); + + LLVMValueRef fields[] = { + LLVMConstGEP(str_global, array_ptr_indices, 2), + LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false), + }; + LLVMValueRef slice_init_value = LLVMConstNamedStruct(u8_slice_type->type_ref, fields, 2); + + LLVMValueRef slice_global = LLVMAddGlobal(g->module, LLVMTypeOf(slice_init_value), ""); + LLVMSetInitializer(slice_global, slice_init_value); + LLVMSetLinkage(slice_global, LLVMPrivateLinkage); + LLVMSetGlobalConstant(slice_global, true); + LLVMSetUnnamedAddr(slice_global, true); + LLVMSetAlignment(slice_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(slice_init_value))); + + LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "Name"); + LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref, + &enum_type->data.enumeration.fields[field_i].value); + LLVMAddCase(switch_instr, this_tag_int_value, return_block); + + LLVMPositionBuilderAtEnd(g->builder, return_block); + LLVMBuildRet(g->builder, slice_global); + } + + LLVMPositionBuilderAtEnd(g->builder, bad_value_block); + if (g->build_mode == BuildModeDebug || g->build_mode == BuildModeSafeRelease) { + gen_safety_crash(g, PanicMsgIdBadEnumValue); + } else { + LLVMBuildUnreachable(g->builder); + } + + g->cur_fn = prev_cur_fn; + g->cur_fn_val = prev_cur_fn_val; + LLVMPositionBuilderAtEnd(g->builder, prev_block); + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + + enum_type->data.enumeration.name_function = fn_val; + return fn_val; +} + static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable, IrInstructionTagName *instruction) { TypeTableEntry *enum_type = instruction->target->value.type; assert(enum_type->id == TypeTableEntryIdEnum); - assert(enum_type->data.enumeration.generate_name_table); - TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type; - LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target); - if (ir_want_runtime_safety(g, &instruction->base)) { - size_t field_count = enum_type->data.enumeration.src_field_count; - - // if the field_count can't fit in the bits of the enum_type, then it can't possibly - // be the wrong value - BigInt field_bi; - bigint_init_unsigned(&field_bi, field_count); - if (bigint_fits_in_bits(&field_bi, tag_int_type->data.integral.bit_count, false)) { - LLVMValueRef end_val = LLVMConstInt(LLVMTypeOf(enum_tag_value), field_count, false); - add_bounds_check(g, enum_tag_value, LLVMIntEQ, nullptr, LLVMIntULT, end_val); - } - } + LLVMValueRef enum_name_function = get_enum_tag_name_function(g, enum_type); - LLVMValueRef indices[] = { - LLVMConstNull(g->builtin_types.entry_usize->type_ref), - gen_widen_or_shorten(g, false, tag_int_type, - g->builtin_types.entry_usize, enum_tag_value), - }; - return LLVMBuildInBoundsGEP(g->builder, enum_type->data.enumeration.name_table, indices, 2, ""); + LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target); + return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); } static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable, @@ -3443,17 +3755,17 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I } else if (target_type->id == TypeTableEntryIdFn) { align_bytes = target_type->data.fn.fn_type_id.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPointer) { align_bytes = target_type->data.maybe.child_type->data.pointer.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdFn) { align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPromise) { zig_panic("TODO audit this function"); @@ -3552,9 +3864,30 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn LLVMAtomicOrdering failure_order = to_LLVMAtomicOrdering(instruction->failure_order); LLVMValueRef result_val = ZigLLVMBuildCmpXchg(g->builder, ptr_val, cmp_val, new_val, - success_order, failure_order); + success_order, failure_order, instruction->is_weak); + + TypeTableEntry *maybe_type = instruction->base.value.type; + assert(maybe_type->id == TypeTableEntryIdOptional); + TypeTableEntry *child_type = maybe_type->data.maybe.child_type; - return LLVMBuildExtractValue(g->builder, result_val, 1, ""); + if (type_is_codegen_pointer(child_type)) { + LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); + LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, ""); + return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(child_type->type_ref), payload_val, ""); + } + + assert(instruction->tmp_ptr != nullptr); + assert(type_has_bits(instruction->type)); + + LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); + LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_child_index, ""); + gen_assign_raw(g, val_ptr, get_pointer_to_type(g, instruction->type, false), payload_val); + + LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, ""); + LLVMValueRef nonnull_bit = LLVMBuildNot(g->builder, success_bit, ""); + LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_null_index, ""); + gen_store_untyped(g, nonnull_bit, maybe_ptr, 0, false); + return instruction->tmp_ptr; } static LLVMValueRef ir_render_fence(CodeGen *g, IrExecutable *executable, IrInstructionFence *instruction) { @@ -3654,7 +3987,12 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base); - if (array_type->id == TypeTableEntryIdArray) { + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + array_type = array_type->data.pointer.child_type; + } LLVMValueRef start_val = ir_llvm_value(g, instruction->start); LLVMValueRef end_val; if (instruction->end) { @@ -3662,7 +4000,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst } else { end_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false); } - if (want_runtime_safety) { add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val); if (instruction->end) { @@ -3695,6 +4032,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst return tmp_struct_ptr; } else if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.ptr_len == PtrLenUnknown); LLVMValueRef start_val = ir_llvm_value(g, instruction->start); LLVMValueRef end_val = ir_llvm_value(g, instruction->end); @@ -3702,11 +4040,15 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val); } - LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_ptr_index, ""); - LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, &start_val, 1, ""); - gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + if (type_has_bits(array_type)) { + size_t gen_ptr_index = instruction->base.value.type->data.structure.fields[slice_ptr_index].gen_index; + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, gen_ptr_index, ""); + LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, &start_val, 1, ""); + gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + } - LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_len_index, ""); + size_t gen_len_index = instruction->base.value.type->data.structure.fields[slice_len_index].gen_index; + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, gen_len_index, ""); LLVMValueRef len_value = LLVMBuildNSWSub(g->builder, end_val, start_val, ""); gen_store_untyped(g, len_value, len_field_ptr, 0, false); @@ -3804,6 +4146,26 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable return LLVMBuildCall(g->builder, get_frame_address_fn_val(g), &zero, 1, ""); } +static LLVMValueRef get_handle_fn_val(CodeGen *g) { + if (g->coro_frame_fn_val) + return g->coro_frame_fn_val; + + LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0) + , nullptr, 0, false); + Buf *name = buf_sprintf("llvm.coro.frame"); + g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_frame_fn_val)); + + return g->coro_frame_fn_val; +} + +static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable, + IrInstructionHandle *instruction) +{ + LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_promise->type_ref); + return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, ""); +} + static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) { TypeTableEntry *int_type = instruction->result_ptr_type; assert(int_type->id == TypeTableEntryIdInt); @@ -3939,10 +4301,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu } } -static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionMaybeWrap *instruction) { +static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) { TypeTableEntry *wanted_type = instruction->base.value.type; - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = wanted_type->data.maybe.child_type; @@ -4049,7 +4411,7 @@ static LLVMValueRef ir_render_struct_init(CodeGen *g, IrExecutable *executable, uint32_t field_align_bytes = get_abi_alignment(g, type_struct_field->type_entry); TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_struct_field->type_entry, - false, false, field_align_bytes, + false, false, PtrLenSingle, field_align_bytes, (uint32_t)type_struct_field->packed_bits_offset, (uint32_t)type_struct_field->unaligned_bit_count); gen_assign_raw(g, field_ptr, ptr_type, value); @@ -4065,7 +4427,7 @@ static LLVMValueRef ir_render_union_init(CodeGen *g, IrExecutable *executable, I uint32_t field_align_bytes = get_abi_alignment(g, type_union_field->type_entry); TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_union_field->type_entry, - false, false, field_align_bytes, + false, false, PtrLenSingle, field_align_bytes, 0, 0); LLVMValueRef uncasted_union_ptr; @@ -4312,7 +4674,8 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f LLVMPositionBuilderAtEnd(g->builder, ok_block); LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, ""); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *slice_type = get_slice_type(g, u8_ptr_type); size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, ""); @@ -4381,6 +4744,16 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, return LLVMBuildIntToPtr(g->builder, uncasted_result, operand_type->type_ref, ""); } +static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable, + IrInstructionAtomicLoad *instruction) +{ + LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering); + LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); + LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value.type, ""); + LLVMSetOrdering(load_inst, ordering); + return load_inst; +} + static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable, IrInstructionMergeErrRetTraces *instruction) { @@ -4402,6 +4775,13 @@ static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *e return nullptr; } +static LLVMValueRef ir_render_sqrt(CodeGen *g, IrExecutable *executable, IrInstructionSqrt *instruction) { + LLVMValueRef op = ir_llvm_value(g, instruction->op); + assert(instruction->base.value.type->id == TypeTableEntryIdFloat); + LLVMValueRef fn_val = get_float_fn(g, instruction->base.value.type, ZigLLVMFnIdSqrt); + return LLVMBuildCall(g->builder, fn_val, &op, 1, ""); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -4453,13 +4833,13 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdCheckSwitchProngs: case IrInstructionIdCheckStatementIsVoid: case IrInstructionIdTypeName: - case IrInstructionIdCanImplicitCast: case IrInstructionIdDeclRef: case IrInstructionIdSwitchVar: case IrInstructionIdOffsetOf: + case IrInstructionIdTypeInfo: case IrInstructionIdTypeId: case IrInstructionIdSetEvalBranchQuota: - case IrInstructionIdPtrTypeOf: + case IrInstructionIdPtrType: case IrInstructionIdOpaqueType: case IrInstructionIdSetAlignStack: case IrInstructionIdArgType: @@ -4469,6 +4849,15 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdPromiseResultType: case IrInstructionIdAwaitBookkeeping: case IrInstructionIdAddImplicitReturnType: + case IrInstructionIdIntCast: + case IrInstructionIdFloatCast: + case IrInstructionIdIntToFloat: + case IrInstructionIdFloatToInt: + case IrInstructionIdBoolToInt: + case IrInstructionIdErrSetCast: + case IrInstructionIdFromBytes: + case IrInstructionIdToBytes: + case IrInstructionIdEnumToInt: zig_unreachable(); case IrInstructionIdReturn: @@ -4505,12 +4894,14 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_asm(g, executable, (IrInstructionAsm *)instruction); case IrInstructionIdTestNonNull: return ir_render_test_non_null(g, executable, (IrInstructionTestNonNull *)instruction); - case IrInstructionIdUnwrapMaybe: - return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapOptional *)instruction); case IrInstructionIdClz: return ir_render_clz(g, executable, (IrInstructionClz *)instruction); case IrInstructionIdCtz: return ir_render_ctz(g, executable, (IrInstructionCtz *)instruction); + case IrInstructionIdPopCount: + return ir_render_pop_count(g, executable, (IrInstructionPopCount *)instruction); case IrInstructionIdSwitchBr: return ir_render_switch_br(g, executable, (IrInstructionSwitchBr *)instruction); case IrInstructionIdPhi: @@ -4539,6 +4930,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction); case IrInstructionIdFrameAddress: return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction); + case IrInstructionIdHandle: + return ir_render_handle(g, executable, (IrInstructionHandle *)instruction); case IrInstructionIdOverflowOp: return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction); case IrInstructionIdTestErr: @@ -4547,8 +4940,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction); case IrInstructionIdUnwrapErrPayload: return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction); - case IrInstructionIdMaybeWrap: - return ir_render_maybe_wrap(g, executable, (IrInstructionMaybeWrap *)instruction); + case IrInstructionIdOptionalWrap: + return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction); case IrInstructionIdErrWrapCode: return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction); case IrInstructionIdErrWrapPayload: @@ -4617,12 +5010,16 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); + case IrInstructionIdAtomicLoad: + return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction); case IrInstructionIdMergeErrRetTraces: return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction); + case IrInstructionIdSqrt: + return ir_render_sqrt(g, executable, (IrInstructionSqrt *)instruction); } zig_unreachable(); } @@ -4649,7 +5046,7 @@ static void ir_render(CodeGen *g, FnTableEntry *fn_entry) { static LLVMValueRef gen_const_ptr_struct_recursive(CodeGen *g, ConstExprValue *struct_const_val, size_t field_index); static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *array_const_val, size_t index); -static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *array_const_val); +static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *union_const_val); static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent *parent) { switch (parent->id) { @@ -4665,6 +5062,10 @@ static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent parent->data.p_array.elem_index); case ConstParentIdUnion: return gen_const_ptr_union_recursive(g, parent->data.p_union.union_val); + case ConstParentIdScalar: + render_const_val(g, parent->data.p_scalar.scalar_val, ""); + render_const_val_global(g, parent->data.p_scalar.scalar_val, ""); + return parent->data.p_scalar.scalar_val->global_refs->llvm_global; } zig_unreachable(); } @@ -4690,7 +5091,8 @@ static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *ar }; return LLVMConstInBoundsGEP(base_ptr, indices, 2); } else { - zig_unreachable(); + assert(parent->id == ConstParentIdScalar); + return base_ptr; } } @@ -4734,10 +5136,10 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -4769,7 +5171,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con } case TypeTableEntryIdPointer: case TypeTableEntryIdFn: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdPromise: { LLVMValueRef ptr_val = gen_const_val(g, const_val, ""); @@ -4817,6 +5219,79 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef return LLVMTypeOf(val) != type_entry->type_ref; } +static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) { + render_const_val_global(g, const_val, name); + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + case ConstPtrSpecialDiscard: + zig_unreachable(); + case ConstPtrSpecialRef: + { + ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee; + render_const_val(g, pointee, ""); + render_const_val_global(g, pointee, ""); + ConstExprValue *other_val = pointee; + const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + case ConstPtrSpecialBaseArray: + { + ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val; + size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index; + assert(array_const_val->type->id == TypeTableEntryIdArray); + if (array_const_val->type->zero_bits) { + // make this a null pointer + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val, + elem_index); + LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); + const_val->global_refs->llvm_value = ptr_val; + render_const_val_global(g, const_val, ""); + return ptr_val; + } + case ConstPtrSpecialBaseStruct: + { + ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val; + assert(struct_const_val->type->id == TypeTableEntryIdStruct); + if (struct_const_val->type->zero_bits) { + // make this a null pointer + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index; + size_t gen_field_index = + struct_const_val->type->data.structure.fields[src_field_index].gen_index; + LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val, + gen_field_index); + LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); + const_val->global_refs->llvm_value = ptr_val; + render_const_val_global(g, const_val, ""); + return ptr_val; + } + case ConstPtrSpecialHardCodedAddr: + { + uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr; + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + case ConstPtrSpecialFunction: + return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref); + } + zig_unreachable(); +} + static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const char *name) { TypeTableEntry *type_entry = const_val->type; assert(!type_entry->zero_bits); @@ -4839,6 +5314,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c const_val->data.x_err_set->value, false); case TypeTableEntryIdFloat: switch (type_entry->data.floating.bit_count) { + case 16: + return LLVMConstReal(type_entry->type_ref, zig_f16_to_double(const_val->data.x_f16)); case 32: return LLVMConstReal(type_entry->type_ref, const_val->data.x_f32); case 64: @@ -4861,23 +5338,19 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c } else { return LLVMConstNull(LLVMInt1Type()); } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { - return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false); + return LLVMConstInt(LLVMInt1Type(), const_val->data.x_optional ? 1 : 0, false); } else if (type_is_codegen_pointer(child_type)) { - if (const_val->data.x_maybe) { - return gen_const_val(g, const_val->data.x_maybe, ""); - } else { - return LLVMConstNull(child_type->type_ref); - } + return gen_const_val_ptr(g, const_val, name); } else { LLVMValueRef child_val; LLVMValueRef maybe_val; bool make_unnamed_struct; - if (const_val->data.x_maybe) { - child_val = gen_const_val(g, const_val->data.x_maybe, ""); + if (const_val->data.x_optional) { + child_val = gen_const_val(g, const_val->data.x_optional, ""); maybe_val = LLVMConstAllOnes(LLVMInt1Type()); make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val); @@ -5067,78 +5540,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst); return fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry); case TypeTableEntryIdPointer: - { - render_const_val_global(g, const_val, name); - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - case ConstPtrSpecialDiscard: - zig_unreachable(); - case ConstPtrSpecialRef: - { - ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee; - render_const_val(g, pointee, ""); - render_const_val_global(g, pointee, ""); - ConstExprValue *other_val = pointee; - const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - case ConstPtrSpecialBaseArray: - { - ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val; - size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index; - assert(array_const_val->type->id == TypeTableEntryIdArray); - if (array_const_val->type->zero_bits) { - // make this a null pointer - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val, - elem_index); - LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); - const_val->global_refs->llvm_value = ptr_val; - render_const_val_global(g, const_val, ""); - return ptr_val; - } - case ConstPtrSpecialBaseStruct: - { - ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val; - assert(struct_const_val->type->id == TypeTableEntryIdStruct); - if (struct_const_val->type->zero_bits) { - // make this a null pointer - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index; - size_t gen_field_index = - struct_const_val->type->data.structure.fields[src_field_index].gen_index; - LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val, - gen_field_index); - LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); - const_val->global_refs->llvm_value = ptr_val; - render_const_val_global(g, const_val, ""); - return ptr_val; - } - case ConstPtrSpecialHardCodedAddr: - { - uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr; - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - case ConstPtrSpecialFunction: - return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref); - } - } - zig_unreachable(); + return gen_const_val_ptr(g, const_val, name); case TypeTableEntryIdErrorUnion: { TypeTableEntry *payload_type = type_entry->data.error_union.payload_type; @@ -5180,10 +5582,10 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -5232,7 +5634,8 @@ static void generate_error_name_table(CodeGen *g) { assert(g->errors_by_index.length > 0); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); LLVMValueRef *values = allocate<LLVMValueRef>(g->errors_by_index.length); @@ -5269,54 +5672,6 @@ static void generate_error_name_table(CodeGen *g) { LLVMSetAlignment(g->err_name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(err_name_table_init))); } -static void generate_enum_name_tables(CodeGen *g) { - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); - TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); - - TypeTableEntry *usize = g->builtin_types.entry_usize; - LLVMValueRef array_ptr_indices[] = { - LLVMConstNull(usize->type_ref), - LLVMConstNull(usize->type_ref), - }; - - - for (size_t enum_i = 0; enum_i < g->name_table_enums.length; enum_i += 1) { - TypeTableEntry *enum_type = g->name_table_enums.at(enum_i); - assert(enum_type->id == TypeTableEntryIdEnum); - - size_t field_count = enum_type->data.enumeration.src_field_count; - LLVMValueRef *values = allocate<LLVMValueRef>(field_count); - for (size_t field_i = 0; field_i < field_count; field_i += 1) { - Buf *name = enum_type->data.enumeration.fields[field_i].name; - - LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true); - LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), ""); - LLVMSetInitializer(str_global, str_init); - LLVMSetLinkage(str_global, LLVMPrivateLinkage); - LLVMSetGlobalConstant(str_global, true); - LLVMSetUnnamedAddr(str_global, true); - LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init))); - - LLVMValueRef fields[] = { - LLVMConstGEP(str_global, array_ptr_indices, 2), - LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false), - }; - values[field_i] = LLVMConstNamedStruct(str_type->type_ref, fields, 2); - } - - LLVMValueRef name_table_init = LLVMConstArray(str_type->type_ref, values, (unsigned)field_count); - - Buf *table_name = get_mangled_name(g, buf_sprintf("%s_name_table", buf_ptr(&enum_type->name)), false); - LLVMValueRef name_table = LLVMAddGlobal(g->module, LLVMTypeOf(name_table_init), buf_ptr(table_name)); - LLVMSetInitializer(name_table, name_table_init); - LLVMSetLinkage(name_table, LLVMPrivateLinkage); - LLVMSetGlobalConstant(name_table, true); - LLVMSetUnnamedAddr(name_table, true); - LLVMSetAlignment(name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(name_table_init))); - enum_type->data.enumeration.name_table = name_table; - } -} - static void build_all_basic_blocks(CodeGen *g, FnTableEntry *fn) { IrExecutable *executable = &fn->analyzed_executable; assert(executable->basic_block_list.length > 0); @@ -5413,14 +5768,13 @@ static void do_code_gen(CodeGen *g) { } generate_error_name_table(g); - generate_enum_name_tables(g); // Generate module level variables for (size_t i = 0; i < g->global_vars.length; i += 1) { TldVar *tld_var = g->global_vars.at(i); VariableTableEntry *var = tld_var->var; - if (var->value->type->id == TypeTableEntryIdNumLitFloat) { + if (var->value->type->id == TypeTableEntryIdComptimeFloat) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->value; assert(const_val->special != ConstValSpecialRuntime); @@ -5434,7 +5788,7 @@ static void do_code_gen(CodeGen *g) { continue; } - if (var->value->type->id == TypeTableEntryIdNumLitInt) { + if (var->value->type->id == TypeTableEntryIdComptimeInt) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->value; assert(const_val->special != ConstValSpecialRuntime); @@ -5552,8 +5906,8 @@ static void do_code_gen(CodeGen *g) { } else if (instruction->id == IrInstructionIdSlice) { IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction; slot = &slice_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdMaybeWrap) { - IrInstructionMaybeWrap *maybe_wrap_instruction = (IrInstructionMaybeWrap *)instruction; + } else if (instruction->id == IrInstructionIdOptionalWrap) { + IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction; slot = &maybe_wrap_instruction->tmp_ptr; } else if (instruction->id == IrInstructionIdErrWrapPayload) { IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction; @@ -5561,6 +5915,9 @@ static void do_code_gen(CodeGen *g) { } else if (instruction->id == IrInstructionIdErrWrapCode) { IrInstructionErrWrapCode *err_wrap_code_instruction = (IrInstructionErrWrapCode *)instruction; slot = &err_wrap_code_instruction->tmp_ptr; + } else if (instruction->id == IrInstructionIdCmpxchg) { + IrInstructionCmpxchg *cmpxchg_instruction = (IrInstructionCmpxchg *)instruction; + slot = &cmpxchg_instruction->tmp_ptr; } else { zig_unreachable(); } @@ -5670,6 +6027,7 @@ static void do_code_gen(CodeGen *g) { ir_render(g, fn_table_entry); } + assert(!g->errors.length); if (buf_len(&g->global_asm) != 0) { @@ -5722,10 +6080,12 @@ static void do_code_gen(CodeGen *g) { os_path_join(g->cache_dir, o_basename, output_path); ensure_cache_dir(g); + bool is_small = g->build_mode == BuildModeSmallRelease; + switch (g->emit_file_type) { case EmitFileTypeBinary: if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path), - ZigLLVM_EmitBinary, &err_msg, g->build_mode == BuildModeDebug)) + ZigLLVM_EmitBinary, &err_msg, g->build_mode == BuildModeDebug, is_small)) { zig_panic("unable to write object file %s: %s", buf_ptr(output_path), err_msg); } @@ -5735,7 +6095,7 @@ static void do_code_gen(CodeGen *g) { case EmitFileTypeAssembly: if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path), - ZigLLVM_EmitAssembly, &err_msg, g->build_mode == BuildModeDebug)) + ZigLLVM_EmitAssembly, &err_msg, g->build_mode == BuildModeDebug, is_small)) { zig_panic("unable to write assembly file %s: %s", buf_ptr(output_path), err_msg); } @@ -5744,7 +6104,7 @@ static void do_code_gen(CodeGen *g) { case EmitFileTypeLLVMIr: if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path), - ZigLLVM_EmitLLVMIr, &err_msg, g->build_mode == BuildModeDebug)) + ZigLLVM_EmitLLVMIr, &err_msg, g->build_mode == BuildModeDebug, is_small)) { zig_panic("unable to write llvm-ir file %s: %s", buf_ptr(output_path), err_msg); } @@ -5756,21 +6116,6 @@ static void do_code_gen(CodeGen *g) { } } -static const uint8_t int_sizes_in_bits[] = { - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 16, - 29, - 32, - 64, - 128, -}; - struct CIntTypeInfo { CIntType id; const char *name; @@ -5823,25 +6168,27 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_block = entry; } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitFloat); - buf_init_from_str(&entry->name, "(float literal)"); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeFloat); + buf_init_from_str(&entry->name, "comptime_float"); entry->zero_bits = true; g->builtin_types.entry_num_lit_float = entry; + g->primitive_type_table.put(&entry->name, entry); } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitInt); - buf_init_from_str(&entry->name, "(integer literal)"); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeInt); + buf_init_from_str(&entry->name, "comptime_int"); entry->zero_bits = true; g->builtin_types.entry_num_lit_int = entry; + g->primitive_type_table.put(&entry->name, entry); } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefLit); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefined); buf_init_from_str(&entry->name, "(undefined)"); entry->zero_bits = true; g->builtin_types.entry_undef = entry; } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNullLit); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNull); buf_init_from_str(&entry->name, "(null)"); entry->zero_bits = true; g->builtin_types.entry_null = entry; @@ -5853,16 +6200,6 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_arg_tuple = entry; } - for (size_t int_size_i = 0; int_size_i < array_length(int_sizes_in_bits); int_size_i += 1) { - uint8_t size_in_bits = int_sizes_in_bits[int_size_i]; - for (size_t is_sign_i = 0; is_sign_i < array_length(is_signed_list); is_sign_i += 1) { - bool is_signed = is_signed_list[is_sign_i]; - TypeTableEntry *entry = make_int_type(g, is_signed, size_in_bits); - g->primitive_type_table.put(&entry->name, entry); - get_int_type_ptr(g, is_signed, size_in_bits)[0] = entry; - } - } - for (size_t i = 0; i < array_length(c_int_type_infos); i += 1) { const CIntTypeInfo *info = &c_int_type_infos[i]; uint32_t size_in_bits = target_c_type_size_in_bits(&g->zig_target, info->id); @@ -5921,58 +6258,30 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_usize = entry; } } - { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat); - entry->type_ref = LLVMFloatType(); - buf_init_from_str(&entry->name, "f32"); - entry->data.floating.bit_count = 32; - uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref); - entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name), - debug_size_in_bits, - ZigLLVMEncoding_DW_ATE_float()); - g->builtin_types.entry_f32 = entry; - g->primitive_type_table.put(&entry->name, entry); - } - { + auto add_fp_entry = [] (CodeGen *g, + const char *name, + uint32_t bit_count, + LLVMTypeRef type_ref, + TypeTableEntry **field) { TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat); - entry->type_ref = LLVMDoubleType(); - buf_init_from_str(&entry->name, "f64"); - entry->data.floating.bit_count = 64; + entry->type_ref = type_ref; + buf_init_from_str(&entry->name, name); + entry->data.floating.bit_count = bit_count; uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref); entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name), debug_size_in_bits, ZigLLVMEncoding_DW_ATE_float()); - g->builtin_types.entry_f64 = entry; + *field = entry; g->primitive_type_table.put(&entry->name, entry); - } - { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat); - entry->type_ref = LLVMFP128Type(); - buf_init_from_str(&entry->name, "f128"); - entry->data.floating.bit_count = 128; - - uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref); - entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name), - debug_size_in_bits, - ZigLLVMEncoding_DW_ATE_float()); - g->builtin_types.entry_f128 = entry; - g->primitive_type_table.put(&entry->name, entry); - } - { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat); - entry->type_ref = LLVMX86FP80Type(); - buf_init_from_str(&entry->name, "c_longdouble"); - entry->data.floating.bit_count = 80; + }; + add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16); + add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32); + add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64); + add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128); + add_fp_entry(g, "c_longdouble", 80, LLVMX86FP80Type(), &g->builtin_types.entry_c_longdouble); - uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref); - entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name), - debug_size_in_bits, - ZigLLVMEncoding_DW_ATE_float()); - g->builtin_types.entry_c_longdouble = entry; - g->primitive_type_table.put(&entry->name, entry); - } { TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid); entry->type_ref = LLVMVoidType(); @@ -6006,12 +6315,9 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_u29 = get_int_type(g, false, 29); g->builtin_types.entry_u32 = get_int_type(g, false, 32); g->builtin_types.entry_u64 = get_int_type(g, false, 64); - g->builtin_types.entry_u128 = get_int_type(g, false, 128); g->builtin_types.entry_i8 = get_int_type(g, true, 8); - g->builtin_types.entry_i16 = get_int_type(g, true, 16); g->builtin_types.entry_i32 = get_int_type(g, true, 32); g->builtin_types.entry_i64 = get_int_type(g, true, 64); - g->builtin_types.entry_i128 = get_int_type(g, true, 128); { g->builtin_types.entry_c_void = get_opaque_type(g, nullptr, nullptr, "c_void"); @@ -6061,6 +6367,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0); create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0); create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0); + create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0); create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3); create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3); create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1); @@ -6070,6 +6377,8 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdMemberCount, "memberCount", 1); create_builtin_fn(g, BuiltinFnIdMemberType, "memberType", 2); create_builtin_fn(g, BuiltinFnIdMemberName, "memberName", 2); + create_builtin_fn(g, BuiltinFnIdField, "field", 2); + create_builtin_fn(g, BuiltinFnIdTypeInfo, "typeInfo", 1); create_builtin_fn(g, BuiltinFnIdTypeof, "typeOf", 1); // TODO rename to TypeOf create_builtin_fn(g, BuiltinFnIdAddWithOverflow, "addWithOverflow", 4); create_builtin_fn(g, BuiltinFnIdSubWithOverflow, "subWithOverflow", 4); @@ -6080,15 +6389,25 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdCUndef, "cUndef", 1); create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 1); create_builtin_fn(g, BuiltinFnIdClz, "clz", 1); + create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 1); create_builtin_fn(g, BuiltinFnIdImport, "import", 1); create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1); create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1); create_builtin_fn(g, BuiltinFnIdTypeName, "typeName", 1); - create_builtin_fn(g, BuiltinFnIdCanImplicitCast, "canImplicitCast", 2); create_builtin_fn(g, BuiltinFnIdEmbedFile, "embedFile", 1); - create_builtin_fn(g, BuiltinFnIdCmpExchange, "cmpxchg", 5); + create_builtin_fn(g, BuiltinFnIdCmpxchgWeak, "cmpxchgWeak", 6); + create_builtin_fn(g, BuiltinFnIdCmpxchgStrong, "cmpxchgStrong", 6); create_builtin_fn(g, BuiltinFnIdFence, "fence", 1); create_builtin_fn(g, BuiltinFnIdTruncate, "truncate", 2); + create_builtin_fn(g, BuiltinFnIdIntCast, "intCast", 2); + create_builtin_fn(g, BuiltinFnIdFloatCast, "floatCast", 2); + create_builtin_fn(g, BuiltinFnIdIntToFloat, "intToFloat", 2); + create_builtin_fn(g, BuiltinFnIdFloatToInt, "floatToInt", 2); + create_builtin_fn(g, BuiltinFnIdBoolToInt, "boolToInt", 1); + create_builtin_fn(g, BuiltinFnIdErrToInt, "errorToInt", 1); + create_builtin_fn(g, BuiltinFnIdIntToErr, "intToError", 1); + create_builtin_fn(g, BuiltinFnIdEnumToInt, "enumToInt", 1); + create_builtin_fn(g, BuiltinFnIdIntToEnum, "intToEnum", 2); create_builtin_fn(g, BuiltinFnIdCompileErr, "compileError", 1); create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int @@ -6109,8 +6428,10 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdDivFloor, "divFloor", 2); create_builtin_fn(g, BuiltinFnIdRem, "rem", 2); create_builtin_fn(g, BuiltinFnIdMod, "mod", 2); + create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 2); create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX); + create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1); create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2); create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2); @@ -6122,6 +6443,10 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdExport, "export", 3); create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0); create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5); + create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3); + create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2); + create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1); + create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2); } static const char *bool_to_str(bool b) { @@ -6133,17 +6458,12 @@ static const char *build_mode_to_str(BuildMode build_mode) { case BuildModeDebug: return "Mode.Debug"; case BuildModeSafeRelease: return "Mode.ReleaseSafe"; case BuildModeFastRelease: return "Mode.ReleaseFast"; + case BuildModeSmallRelease: return "Mode.ReleaseSmall"; } zig_unreachable(); } -static void define_builtin_compile_vars(CodeGen *g) { - if (g->std_package == nullptr) - return; - - const char *builtin_zig_basename = "builtin.zig"; - Buf *builtin_zig_path = buf_alloc(); - os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path); +Buf *codegen_generate_builtin_source(CodeGen *g) { Buf *contents = buf_alloc(); // Modifications to this struct must be coordinated with code that does anything with @@ -6273,6 +6593,7 @@ static void define_builtin_compile_vars(CodeGen *g) { " Debug,\n" " ReleaseSafe,\n" " ReleaseFast,\n" + " ReleaseSmall,\n" "};\n\n"); } { @@ -6286,6 +6607,197 @@ static void define_builtin_compile_vars(CodeGen *g) { } { buf_appendf(contents, + "pub const TypeInfo = union(TypeId) {\n" + " Type: void,\n" + " Void: void,\n" + " Bool: void,\n" + " NoReturn: void,\n" + " Int: Int,\n" + " Float: Float,\n" + " Pointer: Pointer,\n" + " Array: Array,\n" + " Struct: Struct,\n" + " ComptimeFloat: void,\n" + " ComptimeInt: void,\n" + " Undefined: void,\n" + " Null: void,\n" + " Optional: Optional,\n" + " ErrorUnion: ErrorUnion,\n" + " ErrorSet: ErrorSet,\n" + " Enum: Enum,\n" + " Union: Union,\n" + " Fn: Fn,\n" + " Namespace: void,\n" + " Block: void,\n" + " BoundFn: Fn,\n" + " ArgTuple: void,\n" + " Opaque: void,\n" + " Promise: Promise,\n" + "\n\n" + " pub const Int = struct {\n" + " is_signed: bool,\n" + " bits: u8,\n" + " };\n" + "\n" + " pub const Float = struct {\n" + " bits: u8,\n" + " };\n" + "\n" + " pub const Pointer = struct {\n" + " size: Size,\n" + " is_const: bool,\n" + " is_volatile: bool,\n" + " alignment: u32,\n" + " child: type,\n" + "\n" + " pub const Size = enum {\n" + " One,\n" + " Many,\n" + " Slice,\n" + " };\n" + " };\n" + "\n" + " pub const Array = struct {\n" + " len: usize,\n" + " child: type,\n" + " };\n" + "\n" + " pub const ContainerLayout = enum {\n" + " Auto,\n" + " Extern,\n" + " Packed,\n" + " };\n" + "\n" + " pub const StructField = struct {\n" + " name: []const u8,\n" + " offset: ?usize,\n" + " field_type: type,\n" + " };\n" + "\n" + " pub const Struct = struct {\n" + " layout: ContainerLayout,\n" + " fields: []StructField,\n" + " defs: []Definition,\n" + " };\n" + "\n" + " pub const Optional = struct {\n" + " child: type,\n" + " };\n" + "\n" + " pub const ErrorUnion = struct {\n" + " error_set: type,\n" + " payload: type,\n" + " };\n" + "\n" + " pub const Error = struct {\n" + " name: []const u8,\n" + " value: usize,\n" + " };\n" + "\n" + " pub const ErrorSet = struct {\n" + " errors: []Error,\n" + " };\n" + "\n" + " pub const EnumField = struct {\n" + " name: []const u8,\n" + " value: usize,\n" + " };\n" + "\n" + " pub const Enum = struct {\n" + " layout: ContainerLayout,\n" + " tag_type: type,\n" + " fields: []EnumField,\n" + " defs: []Definition,\n" + " };\n" + "\n" + " pub const UnionField = struct {\n" + " name: []const u8,\n" + " enum_field: ?EnumField,\n" + " field_type: type,\n" + " };\n" + "\n" + " pub const Union = struct {\n" + " layout: ContainerLayout,\n" + " tag_type: ?type,\n" + " fields: []UnionField,\n" + " defs: []Definition,\n" + " };\n" + "\n" + " pub const CallingConvention = enum {\n" + " Unspecified,\n" + " C,\n" + " Cold,\n" + " Naked,\n" + " Stdcall,\n" + " Async,\n" + " };\n" + "\n" + " pub const FnArg = struct {\n" + " is_generic: bool,\n" + " is_noalias: bool,\n" + " arg_type: ?type,\n" + " };\n" + "\n" + " pub const Fn = struct {\n" + " calling_convention: CallingConvention,\n" + " is_generic: bool,\n" + " is_var_args: bool,\n" + " return_type: ?type,\n" + " async_allocator_type: ?type,\n" + " args: []FnArg,\n" + " };\n" + "\n" + " pub const Promise = struct {\n" + " child: ?type,\n" + " };\n" + "\n" + " pub const Definition = struct {\n" + " name: []const u8,\n" + " is_pub: bool,\n" + " data: Data,\n" + "\n" + " pub const Data = union(enum) {\n" + " Type: type,\n" + " Var: type,\n" + " Fn: FnDef,\n" + "\n" + " pub const FnDef = struct {\n" + " fn_type: type,\n" + " inline_type: Inline,\n" + " calling_convention: CallingConvention,\n" + " is_var_args: bool,\n" + " is_extern: bool,\n" + " is_export: bool,\n" + " lib_name: ?[]const u8,\n" + " return_type: type,\n" + " arg_names: [][] const u8,\n" + "\n" + " pub const Inline = enum {\n" + " Auto,\n" + " Always,\n" + " Never,\n" + " };\n" + " };\n" + " };\n" + " };\n" + "};\n\n"); + assert(ContainerLayoutAuto == 0); + assert(ContainerLayoutExtern == 1); + assert(ContainerLayoutPacked == 2); + + assert(CallingConventionUnspecified == 0); + assert(CallingConventionC == 1); + assert(CallingConventionCold == 2); + assert(CallingConventionNaked == 3); + assert(CallingConventionStdcall == 4); + assert(CallingConventionAsync == 5); + + assert(FnInlineAuto == 0); + assert(FnInlineAlways == 1); + assert(FnInlineNever == 2); + } + { + buf_appendf(contents, "pub const FloatMode = enum {\n" " Optimized,\n" " Strict,\n" @@ -6317,13 +6829,27 @@ static void define_builtin_compile_vars(CodeGen *g) { buf_appendf(contents, "pub const __zig_test_fn_slice = {}; // overwritten later\n"); + + return contents; +} + +static void define_builtin_compile_vars(CodeGen *g) { + if (g->std_package == nullptr) + return; + + const char *builtin_zig_basename = "builtin.zig"; + Buf *builtin_zig_path = buf_alloc(); + os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path); + + Buf *contents = codegen_generate_builtin_source(g); ensure_cache_dir(g); os_write_file(builtin_zig_path, contents); int err; Buf *abs_full_path = buf_alloc(); if ((err = os_path_real(builtin_zig_path, abs_full_path))) { - zig_panic("unable to open '%s': %s", buf_ptr(builtin_zig_path), err_str(err)); + fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(builtin_zig_path), err_str(err)); + exit(1); } assert(g->root_package); @@ -6383,7 +6909,7 @@ static void init(CodeGen *g) { const char *target_specific_features; if (g->is_native_target) { // LLVM creates invalid binaries on Windows sometimes. - // See https://github.com/zig-lang/zig/issues/508 + // See https://github.com/ziglang/zig/issues/508 // As a workaround we do not use target native features on Windows. if (g->zig_target.os == OsWindows) { target_specific_cpu_args = ""; @@ -6443,7 +6969,7 @@ static void init(CodeGen *g) { } } - g->have_err_ret_tracing = g->build_mode != BuildModeFastRelease; + g->have_err_ret_tracing = g->build_mode != BuildModeFastRelease && g->build_mode != BuildModeSmallRelease; define_builtin_fns(g); define_builtin_compile_vars(g); @@ -6490,11 +7016,11 @@ static ImportTableEntry *add_special_code(CodeGen *g, PackageTableEntry *package Buf *abs_full_path = buf_alloc(); int err; if ((err = os_path_real(&path_to_code_src, abs_full_path))) { - zig_panic("unable to open '%s': %s", buf_ptr(&path_to_code_src), err_str(err)); + zig_panic("unable to open '%s': %s\n", buf_ptr(&path_to_code_src), err_str(err)); } Buf *import_code = buf_alloc(); if ((err = os_fetch_file_path(abs_full_path, import_code, false))) { - zig_panic("unable to open '%s': %s", buf_ptr(&path_to_code_src), err_str(err)); + zig_panic("unable to open '%s': %s\n", buf_ptr(&path_to_code_src), err_str(err)); } return add_source_file(g, package, abs_full_path, import_code); @@ -6522,7 +7048,8 @@ static void create_test_compile_var_and_add_test_runner(CodeGen *g) { exit(0); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); TypeTableEntry *fn_type = get_test_fn_type(g); @@ -6577,12 +7104,14 @@ static void gen_root_source(CodeGen *g) { Buf *abs_full_path = buf_alloc(); int err; if ((err = os_path_real(rel_full_path, abs_full_path))) { - zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(rel_full_path), err_str(err)); + exit(1); } Buf *source_code = buf_alloc(); if ((err = os_fetch_file_path(rel_full_path, source_code, true))) { - zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(rel_full_path), err_str(err)); + exit(1); } g->root_import = add_source_file(g, g->root_package, abs_full_path, source_code); @@ -6681,10 +7210,10 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry switch (type_entry->id) { case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -6726,7 +7255,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry case TypeTableEntryIdArray: prepend_c_type_to_decl_list(g, gen_h, type_entry->data.array.child_type); return; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: prepend_c_type_to_decl_list(g, gen_h, type_entry->data.maybe.child_type); return; case TypeTableEntryIdFn: @@ -6815,7 +7344,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf buf_appendf(out_buf, "%s%s *", const_str, buf_ptr(&child_buf)); break; } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { @@ -6866,10 +7395,10 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf case TypeTableEntryIdBoundFn: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: zig_unreachable(); @@ -6925,7 +7454,7 @@ static void gen_h_file(CodeGen *g) { FILE *out_h = fopen(buf_ptr(g->out_h_path), "wb"); if (!out_h) - zig_panic("unable to open %s: %s", buf_ptr(g->out_h_path), strerror(errno)); + zig_panic("unable to open %s: %s\n", buf_ptr(g->out_h_path), strerror(errno)); Buf *export_macro = preprocessor_mangle(buf_sprintf("%s_EXPORT", buf_ptr(g->root_out_name))); buf_upcase(export_macro); @@ -7018,67 +7547,76 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdArray: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdFn: case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdEnum: - assert(type_entry->data.enumeration.layout == ContainerLayoutExtern); - fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) { - TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i]; - Buf *value_buf = buf_alloc(); - bigint_append_buf(value_buf, &enum_field->value, 10); - fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf)); - if (field_i != type_entry->data.enumeration.src_field_count - 1) { - fprintf(out_h, ","); + if (type_entry->data.enumeration.layout == ContainerLayoutExtern) { + fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) { + TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i]; + Buf *value_buf = buf_alloc(); + bigint_append_buf(value_buf, &enum_field->value, 10); + fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf)); + if (field_i != type_entry->data.enumeration.src_field_count - 1) { + fprintf(out_h, ","); + } + fprintf(out_h, "\n"); } - fprintf(out_h, "\n"); + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "enum %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdStruct: - assert(type_entry->data.structure.layout == ContainerLayoutExtern); - fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) { - TypeStructField *struct_field = &type_entry->data.structure.fields[field_i]; - - Buf *type_name_buf = buf_alloc(); - get_c_type(g, gen_h, struct_field->type_entry, type_name_buf); - - if (struct_field->type_entry->id == TypeTableEntryIdArray) { - fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf), - buf_ptr(struct_field->name), - struct_field->type_entry->data.array.len); - } else { - fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name)); - } + if (type_entry->data.structure.layout == ContainerLayoutExtern) { + fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) { + TypeStructField *struct_field = &type_entry->data.structure.fields[field_i]; + + Buf *type_name_buf = buf_alloc(); + get_c_type(g, gen_h, struct_field->type_entry, type_name_buf); + + if (struct_field->type_entry->id == TypeTableEntryIdArray) { + fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf), + buf_ptr(struct_field->name), + struct_field->type_entry->data.array.len); + } else { + fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name)); + } + } + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "struct %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdUnion: - assert(type_entry->data.unionation.layout == ContainerLayoutExtern); - fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) { - TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i]; - - Buf *type_name_buf = buf_alloc(); - get_c_type(g, gen_h, union_field->type_entry, type_name_buf); - fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name)); + if (type_entry->data.unionation.layout == ContainerLayoutExtern) { + fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) { + TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i]; + + Buf *type_name_buf = buf_alloc(); + get_c_type(g, gen_h, union_field->type_entry, type_name_buf); + fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name)); + } + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "union %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdOpaque: fprintf(out_h, "struct %s;\n\n", buf_ptr(&type_entry->name)); @@ -7135,4 +7673,3 @@ PackageTableEntry *codegen_create_package(CodeGen *g, const char *root_src_dir, } return pkg; } - diff --git a/src/codegen.hpp b/src/codegen.hpp index a7a4b748c4..b5f3374ec4 100644 --- a/src/codegen.hpp +++ b/src/codegen.hpp @@ -59,5 +59,7 @@ void codegen_add_object(CodeGen *g, Buf *object_path); void codegen_translate_c(CodeGen *g, Buf *path); +Buf *codegen_generate_builtin_source(CodeGen *g); + #endif diff --git a/src/ir.cpp b/src/ir.cpp index 3ba58a09bd..3e423487aa 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -11,9 +11,10 @@ #include "ir.hpp" #include "ir_print.hpp" #include "os.hpp" -#include "translate_c.hpp" #include "range_set.hpp" #include "softfloat.hpp" +#include "translate_c.hpp" +#include "util.hpp" struct IrExecContext { ConstExprValue *mem_slot_list; @@ -38,20 +39,13 @@ struct IrAnalyze { IrBasicBlock *const_predecessor_bb; }; -static const LVal LVAL_NONE = { false, false, false }; -static const LVal LVAL_PTR = { true, false, false }; - -static LVal make_lval_addr(bool is_const, bool is_volatile) { - return { true, is_const, is_volatile }; -} - enum ConstCastResultId { ConstCastResultIdOk, ConstCastResultIdErrSet, ConstCastResultIdErrSetGlobal, ConstCastResultIdPointerChild, ConstCastResultIdSliceChild, - ConstCastResultIdNullableChild, + ConstCastResultIdOptionalChild, ConstCastResultIdErrorUnionPayload, ConstCastResultIdErrorUnionErrorSet, ConstCastResultIdFnAlign, @@ -66,14 +60,10 @@ enum ConstCastResultId { ConstCastResultIdType, ConstCastResultIdUnresolvedInferredErrSet, ConstCastResultIdAsyncAllocatorType, -}; - -struct ConstCastErrSetMismatch { - ZigList<ErrorTableEntry *> missing_errors; + ConstCastResultIdNullWrapPtr, }; struct ConstCastOnly; - struct ConstCastArg { size_t arg_index; ConstCastOnly *child; @@ -83,22 +73,70 @@ struct ConstCastArgNoAlias { size_t arg_index; }; +struct ConstCastOptionalMismatch; +struct ConstCastPointerMismatch; +struct ConstCastSliceMismatch; +struct ConstCastErrUnionErrSetMismatch; +struct ConstCastErrUnionPayloadMismatch; +struct ConstCastErrSetMismatch; +struct ConstCastTypeMismatch; + struct ConstCastOnly { ConstCastResultId id; union { - ConstCastErrSetMismatch error_set; - ConstCastOnly *pointer_child; - ConstCastOnly *slice_child; - ConstCastOnly *nullable_child; - ConstCastOnly *error_union_payload; - ConstCastOnly *error_union_error_set; + ConstCastErrSetMismatch *error_set_mismatch; + ConstCastPointerMismatch *pointer_mismatch; + ConstCastSliceMismatch *slice_mismatch; + ConstCastOptionalMismatch *optional; + ConstCastErrUnionPayloadMismatch *error_union_payload; + ConstCastErrUnionErrSetMismatch *error_union_error_set; + ConstCastTypeMismatch *type_mismatch; ConstCastOnly *return_type; ConstCastOnly *async_allocator_type; + ConstCastOnly *null_wrap_ptr_child; ConstCastArg fn_arg; ConstCastArgNoAlias arg_no_alias; } data; }; +struct ConstCastTypeMismatch { + TypeTableEntry *wanted_type; + TypeTableEntry *actual_type; +}; + +struct ConstCastOptionalMismatch { + ConstCastOnly child; + TypeTableEntry *wanted_child; + TypeTableEntry *actual_child; +}; + +struct ConstCastPointerMismatch { + ConstCastOnly child; + TypeTableEntry *wanted_child; + TypeTableEntry *actual_child; +}; + +struct ConstCastSliceMismatch { + ConstCastOnly child; + TypeTableEntry *wanted_child; + TypeTableEntry *actual_child; +}; + +struct ConstCastErrUnionErrSetMismatch { + ConstCastOnly child; + TypeTableEntry *wanted_err_set; + TypeTableEntry *actual_err_set; +}; + +struct ConstCastErrUnionPayloadMismatch { + ConstCastOnly child; + TypeTableEntry *wanted_payload; + TypeTableEntry *actual_payload; +}; + +struct ConstCastErrSetMismatch { + ZigList<ErrorTableEntry *> missing_errors; +}; static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope); static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval); @@ -108,11 +146,14 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg); static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type); -static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, - VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr); +static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var); +static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op); +static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval); +static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align); +static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align); ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) { - assert(const_val->type->id == TypeTableEntryIdPointer); + assert(get_codegen_ptr_type(const_val->type) != nullptr); assert(const_val->special == ConstValSpecialStatic); switch (const_val->data.x_ptr.special) { case ConstPtrSpecialInvalid: @@ -143,6 +184,8 @@ static bool ir_should_inline(IrExecutable *exec, Scope *scope) { while (scope != nullptr) { if (scope->id == ScopeIdCompTime) return true; + if (scope->id == ScopeIdFnDef) + break; scope = scope->parent; } return false; @@ -369,8 +412,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestNonNull *) { return IrInstructionIdTestNonNull; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapMaybe *) { - return IrInstructionIdUnwrapMaybe; +static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapOptional *) { + return IrInstructionIdUnwrapOptional; } static constexpr IrInstructionId ir_instruction_id(IrInstructionClz *) { @@ -381,6 +424,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCtz *) { return IrInstructionIdCtz; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionPopCount *) { + return IrInstructionIdPopCount; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionTag *) { return IrInstructionIdUnionTag; } @@ -457,6 +504,38 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTruncate *) { return IrInstructionIdTruncate; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionIntCast *) { + return IrInstructionIdIntCast; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatCast *) { + return IrInstructionIdFloatCast; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionErrSetCast *) { + return IrInstructionIdErrSetCast; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionToBytes *) { + return IrInstructionIdToBytes; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionFromBytes *) { + return IrInstructionIdFromBytes; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToFloat *) { + return IrInstructionIdIntToFloat; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatToInt *) { + return IrInstructionIdFloatToInt; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionBoolToInt *) { + return IrInstructionIdBoolToInt; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionIntType *) { return IrInstructionIdIntType; } @@ -501,6 +580,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *) return IrInstructionIdFrameAddress; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) { + return IrInstructionIdHandle; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) { return IrInstructionIdAlignOf; } @@ -521,8 +604,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrPayload return IrInstructionIdUnwrapErrPayload; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionMaybeWrap *) { - return IrInstructionIdMaybeWrap; +static constexpr IrInstructionId ir_instruction_id(IrInstructionOptionalWrap *) { + return IrInstructionIdOptionalWrap; } static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapPayload *) { @@ -565,6 +648,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToEnum *) { return IrInstructionIdIntToEnum; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionEnumToInt *) { + return IrInstructionIdEnumToInt; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToErr *) { return IrInstructionIdIntToErr; } @@ -585,10 +672,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeName *) { return IrInstructionIdTypeName; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionCanImplicitCast *) { - return IrInstructionIdCanImplicitCast; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionDeclRef *) { return IrInstructionIdDeclRef; } @@ -613,6 +696,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionOffsetOf *) { return IrInstructionIdOffsetOf; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeInfo *) { + return IrInstructionIdTypeInfo; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeId *) { return IrInstructionIdTypeId; } @@ -621,8 +708,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo return IrInstructionIdSetEvalBranchQuota; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) { - return IrInstructionIdPtrTypeOf; +static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) { + return IrInstructionIdPtrType; } static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) { @@ -709,6 +796,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { return IrInstructionIdAtomicRmw; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) { + return IrInstructionIdAtomicLoad; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) { return IrInstructionIdPromiseResultType; } @@ -733,6 +824,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTraceP return IrInstructionIdMarkErrRetTracePtr; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionSqrt *) { + return IrInstructionIdSqrt; +} + template<typename T> static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate<T>(1); @@ -988,13 +1083,9 @@ static IrInstruction *ir_build_bin_op_from(IrBuilder *irb, IrInstruction *old_in return new_instruction; } -static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, - VariableTableEntry *var, bool is_const, bool is_volatile) -{ +static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, VariableTableEntry *var) { IrInstructionVarPtr *instruction = ir_build_instruction<IrInstructionVarPtr>(irb, scope, source_node); instruction->var = var; - instruction->is_const = is_const; - instruction->is_volatile = is_volatile; ir_ref_var(var); @@ -1002,12 +1093,13 @@ static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *so } static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *array_ptr, - IrInstruction *elem_index, bool safety_check_on) + IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len) { IrInstructionElemPtr *instruction = ir_build_instruction<IrInstructionElemPtr>(irb, scope, source_node); instruction->array_ptr = array_ptr; instruction->elem_index = elem_index; instruction->safety_check_on = safety_check_on; + instruction->ptr_len = ptr_len; ir_ref_instruction(array_ptr, irb->current_basic_block); ir_ref_instruction(elem_index, irb->current_basic_block); @@ -1015,13 +1107,18 @@ static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_elem_ptr_from(IrBuilder *irb, IrInstruction *old_instruction, - IrInstruction *array_ptr, IrInstruction *elem_index, bool safety_check_on) +static IrInstruction *ir_build_field_ptr_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *container_ptr, IrInstruction *field_name_expr) { - IrInstruction *new_instruction = ir_build_elem_ptr(irb, old_instruction->scope, - old_instruction->source_node, array_ptr, elem_index, safety_check_on); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; + IrInstructionFieldPtr *instruction = ir_build_instruction<IrInstructionFieldPtr>(irb, scope, source_node); + instruction->container_ptr = container_ptr; + instruction->field_name_buffer = nullptr; + instruction->field_name_expr = field_name_expr; + + ir_ref_instruction(container_ptr, irb->current_basic_block); + ir_ref_instruction(field_name_expr, irb->current_basic_block); + + return &instruction->base; } static IrInstruction *ir_build_field_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, @@ -1029,7 +1126,8 @@ static IrInstruction *ir_build_field_ptr(IrBuilder *irb, Scope *scope, AstNode * { IrInstructionFieldPtr *instruction = ir_build_instruction<IrInstructionFieldPtr>(irb, scope, source_node); instruction->container_ptr = container_ptr; - instruction->field_name = field_name; + instruction->field_name_buffer = field_name; + instruction->field_name_expr = nullptr; ir_ref_instruction(container_ptr, irb->current_basic_block); @@ -1071,7 +1169,8 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, + IrInstruction *new_stack) { IrInstructionCall *call_instruction = ir_build_instruction<IrInstructionCall>(irb, scope, source_node); call_instruction->fn_entry = fn_entry; @@ -1082,6 +1181,7 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; call_instruction->async_allocator = async_allocator; + call_instruction->new_stack = new_stack; if (fn_ref) ir_ref_instruction(fn_ref, irb->current_basic_block); @@ -1089,16 +1189,19 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc ir_ref_instruction(args[i], irb->current_basic_block); if (async_allocator) ir_ref_instruction(async_allocator, irb->current_basic_block); + if (new_stack != nullptr) + ir_ref_instruction(new_stack, irb->current_basic_block); return &call_instruction->base; } static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, + IrInstruction *new_stack) { IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope, - old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator); + old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator, new_stack); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -1160,15 +1263,16 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru return new_instruction; } -static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, - uint32_t bit_offset_start, uint32_t bit_offset_end) +static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *child_type, bool is_const, bool is_volatile, PtrLen ptr_len, + IrInstruction *align_value, uint32_t bit_offset_start, uint32_t bit_offset_end) { - IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction<IrInstructionPtrTypeOf>(irb, scope, source_node); + IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction<IrInstructionPtrType>(irb, scope, source_node); ptr_type_of_instruction->align_value = align_value; ptr_type_of_instruction->child_type = child_type; ptr_type_of_instruction->is_const = is_const; ptr_type_of_instruction->is_volatile = is_volatile; + ptr_type_of_instruction->ptr_len = ptr_len; ptr_type_of_instruction->bit_offset_start = bit_offset_start; ptr_type_of_instruction->bit_offset_end = bit_offset_end; @@ -1551,7 +1655,7 @@ static IrInstruction *ir_build_test_nonnull_from(IrBuilder *irb, IrInstruction * static IrInstruction *ir_build_unwrap_maybe(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value, bool safety_check_on) { - IrInstructionUnwrapMaybe *instruction = ir_build_instruction<IrInstructionUnwrapMaybe>(irb, scope, source_node); + IrInstructionUnwrapOptional *instruction = ir_build_instruction<IrInstructionUnwrapOptional>(irb, scope, source_node); instruction->value = value; instruction->safety_check_on = safety_check_on; @@ -1570,7 +1674,7 @@ static IrInstruction *ir_build_unwrap_maybe_from(IrBuilder *irb, IrInstruction * } static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { - IrInstructionMaybeWrap *instruction = ir_build_instruction<IrInstructionMaybeWrap>(irb, scope, source_node); + IrInstructionOptionalWrap *instruction = ir_build_instruction<IrInstructionOptionalWrap>(irb, scope, source_node); instruction->value = value; ir_ref_instruction(value, irb->current_basic_block); @@ -1626,8 +1730,18 @@ static IrInstruction *ir_build_ctz_from(IrBuilder *irb, IrInstruction *old_instr return new_instruction; } +static IrInstruction *ir_build_pop_count(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { + IrInstructionPopCount *instruction = ir_build_instruction<IrInstructionPopCount>(irb, scope, source_node); + instruction->value = value; + + ir_ref_instruction(value, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target_value, - IrBasicBlock *else_block, size_t case_count, IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime) + IrBasicBlock *else_block, size_t case_count, IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime, + IrInstruction *switch_prongs_void) { IrInstructionSwitchBr *instruction = ir_build_instruction<IrInstructionSwitchBr>(irb, scope, source_node); instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; @@ -1637,10 +1751,12 @@ static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode * instruction->case_count = case_count; instruction->cases = cases; instruction->is_comptime = is_comptime; + instruction->switch_prongs_void = switch_prongs_void; ir_ref_instruction(target_value, irb->current_basic_block); if (is_comptime) ir_ref_instruction(is_comptime, irb->current_basic_block); ir_ref_bb(else_block); + if (switch_prongs_void) ir_ref_instruction(switch_prongs_void, irb->current_basic_block); for (size_t i = 0; i < case_count; i += 1) { ir_ref_instruction(cases[i].value, irb->current_basic_block); @@ -1652,10 +1768,10 @@ static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode * static IrInstruction *ir_build_switch_br_from(IrBuilder *irb, IrInstruction *old_instruction, IrInstruction *target_value, IrBasicBlock *else_block, size_t case_count, - IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime) + IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime, IrInstruction *switch_prongs_void) { IrInstruction *new_instruction = ir_build_switch_br(irb, old_instruction->scope, old_instruction->source_node, - target_value, else_block, case_count, cases, is_comptime); + target_value, else_block, case_count, cases, is_comptime, switch_prongs_void); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -1824,38 +1940,34 @@ static IrInstruction *ir_build_embed_file(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_cmpxchg(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *ptr, - IrInstruction *cmp_value, IrInstruction *new_value, IrInstruction *success_order_value, IrInstruction *failure_order_value, - AtomicOrder success_order, AtomicOrder failure_order) +static IrInstruction *ir_build_cmpxchg(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type_value, + IrInstruction *ptr, IrInstruction *cmp_value, IrInstruction *new_value, + IrInstruction *success_order_value, IrInstruction *failure_order_value, + bool is_weak, + TypeTableEntry *type, AtomicOrder success_order, AtomicOrder failure_order) { IrInstructionCmpxchg *instruction = ir_build_instruction<IrInstructionCmpxchg>(irb, scope, source_node); + instruction->type_value = type_value; instruction->ptr = ptr; instruction->cmp_value = cmp_value; instruction->new_value = new_value; instruction->success_order_value = success_order_value; instruction->failure_order_value = failure_order_value; + instruction->is_weak = is_weak; + instruction->type = type; instruction->success_order = success_order; instruction->failure_order = failure_order; + if (type_value != nullptr) ir_ref_instruction(type_value, irb->current_basic_block); ir_ref_instruction(ptr, irb->current_basic_block); ir_ref_instruction(cmp_value, irb->current_basic_block); ir_ref_instruction(new_value, irb->current_basic_block); - ir_ref_instruction(success_order_value, irb->current_basic_block); - ir_ref_instruction(failure_order_value, irb->current_basic_block); + if (type_value != nullptr) ir_ref_instruction(success_order_value, irb->current_basic_block); + if (type_value != nullptr) ir_ref_instruction(failure_order_value, irb->current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_cmpxchg_from(IrBuilder *irb, IrInstruction *old_instruction, IrInstruction *ptr, - IrInstruction *cmp_value, IrInstruction *new_value, IrInstruction *success_order_value, IrInstruction *failure_order_value, - AtomicOrder success_order, AtomicOrder failure_order) -{ - IrInstruction *new_instruction = ir_build_cmpxchg(irb, old_instruction->scope, old_instruction->source_node, - ptr, cmp_value, new_value, success_order_value, failure_order_value, success_order, failure_order); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; -} - static IrInstruction *ir_build_fence(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *order_value, AtomicOrder order) { IrInstructionFence *instruction = ir_build_instruction<IrInstructionFence>(irb, scope, source_node); instruction->order_value = order_value; @@ -1883,10 +1995,88 @@ static IrInstruction *ir_build_truncate(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_truncate_from(IrBuilder *irb, IrInstruction *old_instruction, IrInstruction *dest_type, IrInstruction *target) { - IrInstruction *new_instruction = ir_build_truncate(irb, old_instruction->scope, old_instruction->source_node, dest_type, target); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; +static IrInstruction *ir_build_int_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) { + IrInstructionIntCast *instruction = ir_build_instruction<IrInstructionIntCast>(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_float_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) { + IrInstructionFloatCast *instruction = ir_build_instruction<IrInstructionFloatCast>(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_err_set_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) { + IrInstructionErrSetCast *instruction = ir_build_instruction<IrInstructionErrSetCast>(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_to_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) { + IrInstructionToBytes *instruction = ir_build_instruction<IrInstructionToBytes>(irb, scope, source_node); + instruction->target = target; + + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_from_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_child_type, IrInstruction *target) { + IrInstructionFromBytes *instruction = ir_build_instruction<IrInstructionFromBytes>(irb, scope, source_node); + instruction->dest_child_type = dest_child_type; + instruction->target = target; + + ir_ref_instruction(dest_child_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_int_to_float(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) { + IrInstructionIntToFloat *instruction = ir_build_instruction<IrInstructionIntToFloat>(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_float_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) { + IrInstructionFloatToInt *instruction = ir_build_instruction<IrInstructionFloatToInt>(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_bool_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) { + IrInstructionBoolToInt *instruction = ir_build_instruction<IrInstructionBoolToInt>(irb, scope, source_node); + instruction->target = target; + + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; } static IrInstruction *ir_build_int_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *is_signed, IrInstruction *bit_count) { @@ -2054,6 +2244,17 @@ static IrInstruction *ir_build_frame_address_from(IrBuilder *irb, IrInstruction return new_instruction; } +static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionHandle *instruction = ir_build_instruction<IrInstructionHandle>(irb, scope, source_node); + return &instruction->base; +} + +static IrInstruction *ir_build_handle_from(IrBuilder *irb, IrInstruction *old_instruction) { + IrInstruction *new_instruction = ir_build_handle(irb, old_instruction->scope, old_instruction->source_node); + ir_link_new_instruction(new_instruction, old_instruction); + return new_instruction; +} + static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrOverflowOp op, IrInstruction *type_value, IrInstruction *op1, IrInstruction *op2, IrInstruction *result_ptr, TypeTableEntry *result_ptr_type) @@ -2252,10 +2453,26 @@ static IrInstruction *ir_build_ptr_to_int(IrBuilder *irb, Scope *scope, AstNode } static IrInstruction *ir_build_int_to_enum(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *target) + IrInstruction *dest_type, IrInstruction *target) { IrInstructionIntToEnum *instruction = ir_build_instruction<IrInstructionIntToEnum>( irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + + if (dest_type) ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + + + +static IrInstruction *ir_build_enum_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *target) +{ + IrInstructionEnumToInt *instruction = ir_build_instruction<IrInstructionEnumToInt>( + irb, scope, source_node); instruction->target = target; ir_ref_instruction(target, irb->current_basic_block); @@ -2331,20 +2548,6 @@ static IrInstruction *ir_build_type_name(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } -static IrInstruction *ir_build_can_implicit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *type_value, IrInstruction *target_value) -{ - IrInstructionCanImplicitCast *instruction = ir_build_instruction<IrInstructionCanImplicitCast>( - irb, scope, source_node); - instruction->type_value = type_value; - instruction->target_value = target_value; - - ir_ref_instruction(type_value, irb->current_basic_block); - ir_ref_instruction(target_value, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node, Tld *tld, LVal lval) { @@ -2419,6 +2622,16 @@ static IrInstruction *ir_build_offset_of(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } +static IrInstruction *ir_build_type_info(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *type_value) { + IrInstructionTypeInfo *instruction = ir_build_instruction<IrInstructionTypeInfo>(irb, scope, source_node); + instruction->type_value = type_value; + + ir_ref_instruction(type_value, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_type_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type_value) { @@ -2484,9 +2697,9 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Nullable nullable) { +static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Optional optional) { IrInstructionErrorReturnTrace *instruction = ir_build_instruction<IrInstructionErrorReturnTrace>(irb, scope, source_node); - instruction->nullable = nullable; + instruction->optional = optional; return &instruction->base; } @@ -2669,6 +2882,23 @@ static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand_type, IrInstruction *ptr, + IrInstruction *ordering, AtomicOrder resolved_ordering) +{ + IrInstructionAtomicLoad *instruction = ir_build_instruction<IrInstructionAtomicLoad>(irb, scope, source_node); + instruction->operand_type = operand_type; + instruction->ptr = ptr; + instruction->ordering = ordering; + instruction->resolved_ordering = resolved_ordering; + + if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block); + ir_ref_instruction(ptr, irb->current_basic_block); + if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_type) { @@ -2731,20 +2961,49 @@ static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *sco return &instruction->base; } +static IrInstruction *ir_build_sqrt(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type, IrInstruction *op) { + IrInstructionSqrt *instruction = ir_build_instruction<IrInstructionSqrt>(irb, scope, source_node); + instruction->type = type; + instruction->op = op; + + if (type != nullptr) ir_ref_instruction(type, irb->current_basic_block); + ir_ref_instruction(op, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; - while (inner_scope != outer_scope) { - assert(inner_scope); - if (inner_scope->id == ScopeIdDefer) { - AstNode *defer_node = inner_scope->source_node; - assert(defer_node->type == NodeTypeDefer); - ReturnKind defer_kind = defer_node->data.defer.kind; - results[defer_kind] += 1; + Scope *scope = inner_scope; + while (scope != outer_scope) { + assert(scope); + switch (scope->id) { + case ScopeIdDefer: { + AstNode *defer_node = scope->source_node; + assert(defer_node->type == NodeTypeDefer); + ReturnKind defer_kind = defer_node->data.defer.kind; + results[defer_kind] += 1; + scope = scope->parent; + continue; + } + case ScopeIdDecls: + case ScopeIdFnDef: + return; + case ScopeIdBlock: + case ScopeIdVarDecl: + case ScopeIdLoop: + case ScopeIdSuspend: + case ScopeIdCompTime: + scope = scope->parent; + continue; + case ScopeIdDeferExpr: + case ScopeIdCImport: + case ScopeIdCoroPrelude: + zig_unreachable(); } - inner_scope = inner_scope->parent; } } @@ -2760,27 +3019,43 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o if (!scope) return is_noreturn; - if (scope->id == ScopeIdDefer) { - AstNode *defer_node = scope->source_node; - assert(defer_node->type == NodeTypeDefer); - ReturnKind defer_kind = defer_node->data.defer.kind; - if (defer_kind == ReturnKindUnconditional || - (gen_error_defers && defer_kind == ReturnKindError)) - { - AstNode *defer_expr_node = defer_node->data.defer.expr; - Scope *defer_expr_scope = defer_node->data.defer.expr_scope; - IrInstruction *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope); - if (defer_expr_value != irb->codegen->invalid_instruction) { - if (defer_expr_value->value.type != nullptr && defer_expr_value->value.type->id == TypeTableEntryIdUnreachable) { - is_noreturn = true; - } else { - ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node, defer_expr_value)); + switch (scope->id) { + case ScopeIdDefer: { + AstNode *defer_node = scope->source_node; + assert(defer_node->type == NodeTypeDefer); + ReturnKind defer_kind = defer_node->data.defer.kind; + if (defer_kind == ReturnKindUnconditional || + (gen_error_defers && defer_kind == ReturnKindError)) + { + AstNode *defer_expr_node = defer_node->data.defer.expr; + Scope *defer_expr_scope = defer_node->data.defer.expr_scope; + IrInstruction *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope); + if (defer_expr_value != irb->codegen->invalid_instruction) { + if (defer_expr_value->value.type != nullptr && defer_expr_value->value.type->id == TypeTableEntryIdUnreachable) { + is_noreturn = true; + } else { + ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node, defer_expr_value)); + } } } + scope = scope->parent; + continue; } - + case ScopeIdDecls: + case ScopeIdFnDef: + return is_noreturn; + case ScopeIdBlock: + case ScopeIdVarDecl: + case ScopeIdLoop: + case ScopeIdSuspend: + case ScopeIdCompTime: + scope = scope->parent; + continue; + case ScopeIdDeferExpr: + case ScopeIdCImport: + case ScopeIdCoroPrelude: + zig_unreachable(); } - scope = scope->parent; } return is_noreturn; } @@ -2796,6 +3071,18 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock * ir_set_cursor_at_end(irb, basic_block); } +static ScopeSuspend *get_scope_suspend(Scope *scope) { + while (scope) { + if (scope->id == ScopeIdSuspend) + return (ScopeSuspend *)scope; + if (scope->id == ScopeIdFnDef) + return nullptr; + + scope = scope->parent; + } + return nullptr; +} + static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { while (scope) { if (scope->id == ScopeIdDeferExpr) @@ -2825,20 +3112,47 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode return return_inst; } - ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); - IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig - IrInstruction *replacement_value = irb->exec->coro_handle; - IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, - promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, - AtomicRmwOp_xchg, AtomicOrderSeqCst); - ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle); - IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); + IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended"); + IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended"); + IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter"); + IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled"); + + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 + IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 + IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 + IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, - is_comptime); - // the above blocks are rendered by ir_gen after the rest of codegen + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + + ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); + IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + + IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); + IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); + ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, suspended_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); + IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here + IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); + ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block); + IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr); + ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle); + ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, check_canceled_block); + IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime); } static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { @@ -2923,7 +3237,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, case ReturnKindError: { assert(expr_node); - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; IrInstruction *err_union_val = ir_build_load_ptr(irb, scope, node, err_union_ptr); @@ -2951,7 +3265,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_set_cursor_at_end_and_append_block(irb, continue_block); IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false); - if (lval.is_ptr) + if (lval == LValPtr) return unwrapped_ptr; else return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); @@ -2981,9 +3295,8 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here")); variable_entry->value->type = codegen->builtin_types.entry_invalid; } else { - auto primitive_table_entry = codegen->primitive_type_table.maybe_get(name); - if (primitive_table_entry) { - TypeTableEntry *type = primitive_table_entry->value; + TypeTableEntry *type = get_primitive_type(codegen, name); + if (type != nullptr) { add_node_error(codegen, node, buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name))); variable_entry->value->type = codegen->builtin_types.entry_invalid; @@ -3019,7 +3332,15 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco static VariableTableEntry *ir_create_var(IrBuilder *irb, AstNode *node, Scope *scope, Buf *name, bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstruction *is_comptime) { - VariableTableEntry *var = create_local_var(irb->codegen, node, scope, name, src_is_const, gen_is_const, is_shadowable, is_comptime); + bool is_underscored = name ? buf_eql_str(name, "_") : false; + VariableTableEntry *var = create_local_var( irb->codegen + , node + , scope + , (is_underscored ? nullptr : name) + , src_is_const + , gen_is_const + , (is_underscored ? true : is_shadowable) + , is_comptime ); if (is_comptime != nullptr || gen_is_const) { var->mem_slot_index = exec_next_mem_slot(irb->exec); var->owner_exec = irb->exec; @@ -3086,6 +3407,9 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode if (block_node->data.block.name == nullptr || incoming_blocks.length == 0) { return noreturn_return_value; } + + ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block); + return ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); } else { incoming_blocks.append(irb->current_basic_block); incoming_values.append(ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node))); @@ -3113,7 +3437,7 @@ static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *no } static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) { - IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR); + IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr); IrInstruction *rvalue = ir_gen_node(irb, node->data.bin_op_expr.op2, scope); if (lvalue == irb->codegen->invalid_instruction || rvalue == irb->codegen->invalid_instruction) @@ -3124,7 +3448,7 @@ static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) } static IrInstruction *ir_gen_assign_op(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) { - IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR); + IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr); if (lvalue == irb->codegen->invalid_instruction) return lvalue; IrInstruction *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue); @@ -3226,7 +3550,7 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As AstNode *op1_node = node->data.bin_op_expr.op1; AstNode *op2_node = node->data.bin_op_expr.op2; - IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR); + IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr); if (maybe_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -3240,9 +3564,9 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null); } - IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "MaybeNonNull"); - IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "MaybeNull"); - IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "MaybeEnd"); + IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull"); + IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull"); + IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd"); ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, null_block); @@ -3371,7 +3695,7 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node) return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult); case BinOpTypeMergeErrorSets: return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets); - case BinOpTypeUnwrapMaybe: + case BinOpTypeUnwrapOptional: return ir_gen_maybe_ok_or(irb, scope, node); case BinOpTypeErrorUnion: return ir_gen_error_union(irb, scope, node); @@ -3413,7 +3737,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, Buf *variable_name = node->data.symbol_expr.symbol; - if (buf_eql_str(variable_name, "_") && lval.is_ptr) { + if (buf_eql_str(variable_name, "_") && lval == LValPtr) { IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node); const_instruction->base.value.type = get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_void, false); @@ -3422,11 +3746,11 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, return &const_instruction->base; } - auto primitive_table_entry = irb->codegen->primitive_type_table.maybe_get(variable_name); - if (primitive_table_entry) { - IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_table_entry->value); - if (lval.is_ptr) { - return ir_build_ref(irb, scope, node, value, lval.is_const, lval.is_volatile); + TypeTableEntry *primitive_type = get_primitive_type(irb->codegen, variable_name); + if (primitive_type != nullptr) { + IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_type); + if (lval == LValPtr) { + return ir_build_ref(irb, scope, node, value, false, false); } else { return value; } @@ -3434,9 +3758,8 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name); if (var) { - IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var, - !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile); - if (lval.is_ptr) + IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var); + if (lval == LValPtr) return var_ptr; else return ir_build_load_ptr(irb, scope, node, var_ptr); @@ -3462,7 +3785,7 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode assert(node->type == NodeTypeArrayAccessExpr); AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr; - IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LVAL_PTR); + IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr); if (array_ref_instruction == irb->codegen->invalid_instruction) return array_ref_instruction; @@ -3472,28 +3795,24 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode return subscript_instruction; IrInstruction *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction, - subscript_instruction, true); - if (lval.is_ptr) + subscript_instruction, true, PtrLenSingle); + if (lval == LValPtr) return ptr_instruction; return ir_build_load_ptr(irb, scope, node, ptr_instruction); } -static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeFieldAccessExpr); AstNode *container_ref_node = node->data.field_access_expr.struct_expr; Buf *field_name = node->data.field_access_expr.field_name; - IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LVAL_PTR); + IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr); if (container_ref_instruction == irb->codegen->invalid_instruction) return container_ref_instruction; - IrInstruction *ptr_instruction = ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name); - if (lval.is_ptr) - return ptr_instruction; - - return ir_build_load_ptr(irb, scope, node, ptr_instruction); + return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name); } static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode *node, IrOverflowOp op) { @@ -3524,7 +3843,7 @@ static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode * return ir_build_overflow_op(irb, scope, node, op, type_value, op1, op2, result_ptr, nullptr); } -static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { assert(node->type == NodeTypeFnCallExpr); AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr; @@ -3547,6 +3866,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } + bool is_async = exec_is_async(irb->exec); + switch (builtin_fn->id) { case BuiltinFnIdInvalid: zig_unreachable(); @@ -3556,7 +3877,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *arg = ir_gen_node(irb, arg_node, scope); if (arg == irb->codegen->invalid_instruction) return arg; - return ir_build_typeof(irb, scope, node, arg); + + IrInstruction *type_of = ir_build_typeof(irb, scope, node, arg); + return ir_lval_wrap(irb, scope, type_of, lval); } case BuiltinFnIdSetCold: { @@ -3565,7 +3888,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_set_cold(irb, scope, node, arg0_value); + IrInstruction *set_cold = ir_build_set_cold(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, set_cold, lval); } case BuiltinFnIdSetRuntimeSafety: { @@ -3574,7 +3898,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_set_runtime_safety(irb, scope, node, arg0_value); + IrInstruction *set_safety = ir_build_set_runtime_safety(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, set_safety, lval); } case BuiltinFnIdSetFloatMode: { @@ -3588,7 +3913,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_set_float_mode(irb, scope, node, arg0_value, arg1_value); + IrInstruction *set_float_mode = ir_build_set_float_mode(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, set_float_mode, lval); } case BuiltinFnIdSizeof: { @@ -3597,7 +3923,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_size_of(irb, scope, node, arg0_value); + IrInstruction *size_of = ir_build_size_of(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, size_of, lval); } case BuiltinFnIdCtz: { @@ -3606,7 +3933,18 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_ctz(irb, scope, node, arg0_value); + IrInstruction *ctz = ir_build_ctz(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, ctz, lval); + } + case BuiltinFnIdPopCount: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *instr = ir_build_pop_count(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, instr, lval); } case BuiltinFnIdClz: { @@ -3615,7 +3953,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_clz(irb, scope, node, arg0_value); + IrInstruction *clz = ir_build_clz(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, clz, lval); } case BuiltinFnIdImport: { @@ -3624,11 +3963,13 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_import(irb, scope, node, arg0_value); + IrInstruction *import = ir_build_import(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, import, lval); } case BuiltinFnIdCImport: { - return ir_build_c_import(irb, scope, node); + IrInstruction *c_import = ir_build_c_import(irb, scope, node); + return ir_lval_wrap(irb, scope, c_import, lval); } case BuiltinFnIdCInclude: { @@ -3642,7 +3983,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } - return ir_build_c_include(irb, scope, node, arg0_value); + IrInstruction *c_include = ir_build_c_include(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, c_include, lval); } case BuiltinFnIdCDefine: { @@ -3661,7 +4003,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } - return ir_build_c_define(irb, scope, node, arg0_value, arg1_value); + IrInstruction *c_define = ir_build_c_define(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, c_define, lval); } case BuiltinFnIdCUndef: { @@ -3675,7 +4018,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } - return ir_build_c_undef(irb, scope, node, arg0_value); + IrInstruction *c_undef = ir_build_c_undef(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, c_undef, lval); } case BuiltinFnIdMaxValue: { @@ -3684,7 +4028,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_max_value(irb, scope, node, arg0_value); + IrInstruction *max_value = ir_build_max_value(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, max_value, lval); } case BuiltinFnIdMinValue: { @@ -3693,7 +4038,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_min_value(irb, scope, node, arg0_value); + IrInstruction *min_value = ir_build_min_value(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, min_value, lval); } case BuiltinFnIdCompileErr: { @@ -3702,7 +4048,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_compile_err(irb, scope, node, arg0_value); + IrInstruction *compile_err = ir_build_compile_err(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, compile_err, lval); } case BuiltinFnIdCompileLog: { @@ -3715,7 +4062,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return irb->codegen->invalid_instruction; } - return ir_build_compile_log(irb, scope, node, actual_param_count, args); + IrInstruction *compile_log = ir_build_compile_log(irb, scope, node, actual_param_count, args); + return ir_lval_wrap(irb, scope, compile_log, lval); } case BuiltinFnIdErrName: { @@ -3724,7 +4072,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_err_name(irb, scope, node, arg0_value); + IrInstruction *err_name = ir_build_err_name(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, err_name, lval); } case BuiltinFnIdEmbedFile: { @@ -3733,9 +4082,11 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_embed_file(irb, scope, node, arg0_value); + IrInstruction *embed_file = ir_build_embed_file(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, embed_file, lval); } - case BuiltinFnIdCmpExchange: + case BuiltinFnIdCmpxchgWeak: + case BuiltinFnIdCmpxchgStrong: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); @@ -3762,9 +4113,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg4_value == irb->codegen->invalid_instruction) return arg4_value; - return ir_build_cmpxchg(irb, scope, node, arg0_value, arg1_value, - arg2_value, arg3_value, arg4_value, - AtomicOrderUnordered, AtomicOrderUnordered); + AstNode *arg5_node = node->data.fn_call_expr.params.at(5); + IrInstruction *arg5_value = ir_gen_node(irb, arg5_node, scope); + if (arg5_value == irb->codegen->invalid_instruction) + return arg5_value; + + IrInstruction *cmpxchg = ir_build_cmpxchg(irb, scope, node, arg0_value, arg1_value, + arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak), + nullptr, AtomicOrderUnordered, AtomicOrderUnordered); + return ir_lval_wrap(irb, scope, cmpxchg, lval); } case BuiltinFnIdFence: { @@ -3773,7 +4130,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_fence(irb, scope, node, arg0_value, AtomicOrderUnordered); + IrInstruction *fence = ir_build_fence(irb, scope, node, arg0_value, AtomicOrderUnordered); + return ir_lval_wrap(irb, scope, fence, lval); } case BuiltinFnIdDivExact: { @@ -3787,7 +4145,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdDivTrunc: { @@ -3801,7 +4160,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdDivFloor: { @@ -3815,7 +4175,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdRem: { @@ -3829,7 +4190,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdMod: { @@ -3843,7 +4205,23 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); + } + case BuiltinFnIdSqrt: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *ir_sqrt = ir_build_sqrt(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, ir_sqrt, lval); } case BuiltinFnIdTruncate: { @@ -3857,7 +4235,138 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_truncate(irb, scope, node, arg0_value, arg1_value); + IrInstruction *truncate = ir_build_truncate(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, truncate, lval); + } + case BuiltinFnIdIntCast: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_int_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdFloatCast: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdErrSetCast: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdFromBytes: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdToBytes: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *result = ir_build_to_bytes(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdIntToFloat: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_int_to_float(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdFloatToInt: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdErrToInt: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *result = ir_build_err_to_int(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdIntToErr: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *result = ir_build_int_to_err(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdBoolToInt: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *result = ir_build_bool_to_int(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, result, lval); } case BuiltinFnIdIntType: { @@ -3871,7 +4380,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_int_type(irb, scope, node, arg0_value, arg1_value); + IrInstruction *int_type = ir_build_int_type(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, int_type, lval); } case BuiltinFnIdMemcpy: { @@ -3890,7 +4400,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg2_value == irb->codegen->invalid_instruction) return arg2_value; - return ir_build_memcpy(irb, scope, node, arg0_value, arg1_value, arg2_value); + IrInstruction *ir_memcpy = ir_build_memcpy(irb, scope, node, arg0_value, arg1_value, arg2_value); + return ir_lval_wrap(irb, scope, ir_memcpy, lval); } case BuiltinFnIdMemset: { @@ -3909,7 +4420,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg2_value == irb->codegen->invalid_instruction) return arg2_value; - return ir_build_memset(irb, scope, node, arg0_value, arg1_value, arg2_value); + IrInstruction *ir_memset = ir_build_memset(irb, scope, node, arg0_value, arg1_value, arg2_value); + return ir_lval_wrap(irb, scope, ir_memset, lval); } case BuiltinFnIdMemberCount: { @@ -3918,7 +4430,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_member_count(irb, scope, node, arg0_value); + IrInstruction *member_count = ir_build_member_count(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, member_count, lval); } case BuiltinFnIdMemberType: { @@ -3933,7 +4446,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; - return ir_build_member_type(irb, scope, node, arg0_value, arg1_value); + IrInstruction *member_type = ir_build_member_type(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, member_type, lval); } case BuiltinFnIdMemberName: { @@ -3948,14 +4462,54 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; - return ir_build_member_name(irb, scope, node, arg0_value, arg1_value); + IrInstruction *member_name = ir_build_member_name(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, member_name, lval); + } + case BuiltinFnIdField: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *ptr_instruction = ir_build_field_ptr_instruction(irb, scope, node, arg0_value, arg1_value); + + if (lval == LValPtr) + return ptr_instruction; + + return ir_build_load_ptr(irb, scope, node, ptr_instruction); + } + case BuiltinFnIdTypeInfo: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *type_info = ir_build_type_info(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, type_info, lval); } case BuiltinFnIdBreakpoint: - return ir_build_breakpoint(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval); case BuiltinFnIdReturnAddress: - return ir_build_return_address(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval); case BuiltinFnIdFrameAddress: - return ir_build_frame_address(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval); + case BuiltinFnIdHandle: + if (!irb->exec->fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition")); + return irb->codegen->invalid_instruction; + } + if (!is_async) { + add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function")); + return irb->codegen->invalid_instruction; + } + return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval); case BuiltinFnIdAlignOf: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -3963,16 +4517,17 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_align_of(irb, scope, node, arg0_value); + IrInstruction *align_of = ir_build_align_of(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, align_of, lval); } case BuiltinFnIdAddWithOverflow: - return ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval); case BuiltinFnIdSubWithOverflow: - return ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval); case BuiltinFnIdMulWithOverflow: - return ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval); case BuiltinFnIdShlWithOverflow: - return ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval); case BuiltinFnIdTypeName: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -3980,21 +4535,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_type_name(irb, scope, node, arg0_value); - } - case BuiltinFnIdCanImplicitCast: - { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); - if (arg0_value == irb->codegen->invalid_instruction) - return arg0_value; - - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); - if (arg1_value == irb->codegen->invalid_instruction) - return arg1_value; - - return ir_build_can_implicit_cast(irb, scope, node, arg0_value, arg1_value); + IrInstruction *type_name = ir_build_type_name(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, type_name, lval); } case BuiltinFnIdPanic: { @@ -4003,7 +4545,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_panic(irb, scope, node, arg0_value); + IrInstruction *panic = ir_build_panic(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, panic, lval); } case BuiltinFnIdPtrCast: { @@ -4017,7 +4560,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_ptr_cast(irb, scope, node, arg0_value, arg1_value); + IrInstruction *ptr_cast = ir_build_ptr_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, ptr_cast, lval); } case BuiltinFnIdBitCast: { @@ -4031,7 +4575,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bit_cast(irb, scope, node, arg0_value, arg1_value); + IrInstruction *bit_cast = ir_build_bit_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, bit_cast, lval); } case BuiltinFnIdIntToPtr: { @@ -4045,7 +4590,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_int_to_ptr(irb, scope, node, arg0_value, arg1_value); + IrInstruction *int_to_ptr = ir_build_int_to_ptr(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, int_to_ptr, lval); } case BuiltinFnIdPtrToInt: { @@ -4054,7 +4600,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_ptr_to_int(irb, scope, node, arg0_value); + IrInstruction *ptr_to_int = ir_build_ptr_to_int(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, ptr_to_int, lval); } case BuiltinFnIdTagName: { @@ -4064,7 +4611,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *actual_tag = ir_build_union_tag(irb, scope, node, arg0_value); - return ir_build_tag_name(irb, scope, node, actual_tag); + IrInstruction *tag_name = ir_build_tag_name(irb, scope, node, actual_tag); + return ir_lval_wrap(irb, scope, tag_name, lval); } case BuiltinFnIdTagType: { @@ -4073,7 +4621,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_tag_type(irb, scope, node, arg0_value); + IrInstruction *tag_type = ir_build_tag_type(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, tag_type, lval); } case BuiltinFnIdFieldParentPtr: { @@ -4092,7 +4641,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg2_value == irb->codegen->invalid_instruction) return arg2_value; - return ir_build_field_parent_ptr(irb, scope, node, arg0_value, arg1_value, arg2_value, nullptr); + IrInstruction *field_parent_ptr = ir_build_field_parent_ptr(irb, scope, node, arg0_value, arg1_value, arg2_value, nullptr); + return ir_lval_wrap(irb, scope, field_parent_ptr, lval); } case BuiltinFnIdOffsetOf: { @@ -4106,7 +4656,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_offset_of(irb, scope, node, arg0_value, arg1_value); + IrInstruction *offset_of = ir_build_offset_of(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, offset_of, lval); } case BuiltinFnIdInlineCall: case BuiltinFnIdNoInlineCall: @@ -4132,7 +4683,38 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr); + IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr); + return ir_lval_wrap(irb, scope, call, lval); + } + case BuiltinFnIdNewStackCall: + { + if (node->data.fn_call_expr.params.length == 0) { + add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0")); + return irb->codegen->invalid_instruction; + } + + AstNode *new_stack_node = node->data.fn_call_expr.params.at(0); + IrInstruction *new_stack = ir_gen_node(irb, new_stack_node, scope); + if (new_stack == irb->codegen->invalid_instruction) + return new_stack; + + AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1); + IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); + if (fn_ref == irb->codegen->invalid_instruction) + return fn_ref; + + size_t arg_count = node->data.fn_call_expr.params.length - 2; + + IrInstruction **args = allocate<IrInstruction*>(arg_count); + for (size_t i = 0; i < arg_count; i += 1) { + AstNode *arg_node = node->data.fn_call_expr.params.at(i + 2); + args[i] = ir_gen_node(irb, arg_node, scope); + if (args[i] == irb->codegen->invalid_instruction) + return args[i]; + } + + IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, false, nullptr, new_stack); + return ir_lval_wrap(irb, scope, call, lval); } case BuiltinFnIdTypeId: { @@ -4141,7 +4723,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_type_id(irb, scope, node, arg0_value); + IrInstruction *type_id = ir_build_type_id(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, type_id, lval); } case BuiltinFnIdShlExact: { @@ -4155,7 +4738,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdShrExact: { @@ -4169,7 +4753,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true); + IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true); + return ir_lval_wrap(irb, scope, bin_op, lval); } case BuiltinFnIdSetEvalBranchQuota: { @@ -4178,7 +4763,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_set_eval_branch_quota(irb, scope, node, arg0_value); + IrInstruction *set_eval_branch_quota = ir_build_set_eval_branch_quota(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval); } case BuiltinFnIdAlignCast: { @@ -4192,10 +4778,14 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_align_cast(irb, scope, node, arg0_value, arg1_value); + IrInstruction *align_cast = ir_build_align_cast(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, align_cast, lval); } case BuiltinFnIdOpaqueType: - return ir_build_opaque_type(irb, scope, node); + { + IrInstruction *opaque_type = ir_build_opaque_type(irb, scope, node); + return ir_lval_wrap(irb, scope, opaque_type, lval); + } case BuiltinFnIdSetAlignStack: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -4203,7 +4793,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - return ir_build_set_align_stack(irb, scope, node, arg0_value); + IrInstruction *set_align_stack = ir_build_set_align_stack(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, set_align_stack, lval); } case BuiltinFnIdArgType: { @@ -4217,7 +4808,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - return ir_build_arg_type(irb, scope, node, arg0_value, arg1_value); + IrInstruction *arg_type = ir_build_arg_type(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, arg_type, lval); } case BuiltinFnIdExport: { @@ -4236,11 +4828,13 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg2_value == irb->codegen->invalid_instruction) return arg2_value; - return ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value); + IrInstruction *ir_export = ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value); + return ir_lval_wrap(irb, scope, ir_export, lval); } case BuiltinFnIdErrorReturnTrace: { - return ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::Null); + IrInstruction *error_return_trace = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::Null); + return ir_lval_wrap(irb, scope, error_return_trace, lval); } case BuiltinFnIdAtomicRmw: { @@ -4274,15 +4868,61 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo // these 2 values don't mean anything since we passed non-null values for other args AtomicRmwOp_xchg, AtomicOrderMonotonic); } + case BuiltinFnIdAtomicLoad: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + AstNode *arg2_node = node->data.fn_call_expr.params.at(2); + IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope); + if (arg2_value == irb->codegen->invalid_instruction) + return arg2_value; + + return ir_build_atomic_load(irb, scope, node, arg0_value, arg1_value, arg2_value, + // this value does not mean anything since we passed non-null values for other arg + AtomicOrderMonotonic); + } + case BuiltinFnIdIntToEnum: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + IrInstruction *result = ir_build_int_to_enum(irb, scope, node, arg0_value, arg1_value); + return ir_lval_wrap(irb, scope, result, lval); + } + case BuiltinFnIdEnumToInt: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *result = ir_build_enum_to_int(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, result, lval); + } } zig_unreachable(); } -static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { assert(node->type == NodeTypeFnCallExpr); if (node->data.fn_call_expr.is_builtin) - return ir_gen_builtin_fn_call(irb, scope, node); + return ir_gen_builtin_fn_call(irb, scope, node, lval); AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); @@ -4308,7 +4948,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node } } - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator); + IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, nullptr); + return ir_lval_wrap(irb, scope, fn_call, lval); } static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -4379,30 +5020,28 @@ static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, Ast } static IrInstruction *ir_gen_prefix_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id) { - return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LVAL_NONE); + return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone); } static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval) { - if (!lval.is_ptr) + if (lval != LValPtr) return value; if (value == irb->codegen->invalid_instruction) return value; // We needed a pointer to a value, but we got a value. So we create // an instruction which just makes a const pointer of it. - return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile); + return ir_build_ref(irb, scope, value->source_node, value, false, false); } -static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) { - assert(node->type == NodeTypeAddrOfExpr); - bool is_const = node->data.addr_of_expr.is_const; - bool is_volatile = node->data.addr_of_expr.is_volatile; - AstNode *expr_node = node->data.addr_of_expr.op_expr; - AstNode *align_expr = node->data.addr_of_expr.align_expr; - - if (align_expr == nullptr && !is_const && !is_volatile) { - return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile)); - } +static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) { + assert(node->type == NodeTypePointerType); + PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar || + node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown; + bool is_const = node->data.pointer_type.is_const; + bool is_volatile = node->data.pointer_type.is_volatile; + AstNode *expr_node = node->data.pointer_type.op_expr; + AstNode *align_expr = node->data.pointer_type.align_expr; IrInstruction *align_value; if (align_expr != nullptr) { @@ -4418,27 +5057,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n return child_type; uint32_t bit_offset_start = 0; - if (node->data.addr_of_expr.bit_offset_start != nullptr) { - if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) { + if (node->data.pointer_type.bit_offset_start != nullptr) { + if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) { Buf *val_buf = buf_alloc(); - bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10); + bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10); exec_add_error_node(irb->codegen, irb->exec, node, buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf))); return irb->codegen->invalid_instruction; } - bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start); + bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start); } uint32_t bit_offset_end = 0; - if (node->data.addr_of_expr.bit_offset_end != nullptr) { - if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) { + if (node->data.pointer_type.bit_offset_end != nullptr) { + if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) { Buf *val_buf = buf_alloc(); - bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10); + bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10); exec_add_error_node(irb->codegen, irb->exec, node, buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf))); return irb->codegen->invalid_instruction; } - bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end); + bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end); } if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) { @@ -4447,14 +5086,14 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n return irb->codegen->invalid_instruction; } - return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile, - align_value, bit_offset_start, bit_offset_end); + return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile, + ptr_len, align_value, bit_offset_start, bit_offset_end); } static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode *source_node, AstNode *expr_node, LVal lval) { - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -4462,27 +5101,12 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode if (payload_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - if (lval.is_ptr) + if (lval == LValPtr) return payload_ptr; return ir_build_load_ptr(irb, scope, source_node, payload_ptr); } -static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { - assert(node->type == NodeTypePrefixOpExpr); - AstNode *expr_node = node->data.prefix_op_expr.primary_expr; - - IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); - if (maybe_ptr == irb->codegen->invalid_instruction) - return irb->codegen->invalid_instruction; - - IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true); - if (lval.is_ptr) - return unwrapped_ptr; - - return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); -} - static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypePrefixOpExpr); AstNode *expr_node = node->data.prefix_op_expr.primary_expr; @@ -4510,12 +5134,12 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval); case PrefixOpNegationWrap: return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval); - case PrefixOpDereference: - return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval); - case PrefixOpMaybe: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval); - case PrefixOpUnwrapMaybe: - return ir_gen_maybe_assert_ok(irb, scope, node, lval); + case PrefixOpOptional: + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval); + case PrefixOpAddrOf: { + AstNode *expr_node = node->data.prefix_op_expr.primary_expr; + return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr), lval); + } } zig_unreachable(); } @@ -4570,6 +5194,11 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod AstNodeVariableDeclaration *variable_declaration = &node->data.variable_declaration; + if (buf_eql_str(variable_declaration->symbol, "_")) { + add_node_error(irb->codegen, node, buf_sprintf("`_` is not a declarable symbol")); + return irb->codegen->invalid_instruction; + } + IrInstruction *type_instruction; if (variable_declaration->type != nullptr) { type_instruction = ir_gen_node(irb, variable_declaration->type, scope); @@ -4582,6 +5211,7 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod bool is_shadowable = false; bool is_const = variable_declaration->is_const; bool is_extern = variable_declaration->is_extern; + IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime); VariableTableEntry *var = ir_create_var(irb, node, scope, variable_declaration->symbol, @@ -4654,7 +5284,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n } else { payload_scope = scope; } - IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR); + IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr); if (err_val_ptr == irb->codegen->invalid_instruction) return err_val_ptr; IrInstruction *err_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, err_val_ptr); @@ -4689,8 +5319,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); @@ -4737,7 +5369,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n VariableTableEntry *payload_var = ir_create_var(irb, symbol_node, scope, var_symbol, true, false, false, is_comptime); Scope *child_scope = payload_var->child_scope; - IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR); + IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr); if (maybe_val_ptr == irb->codegen->invalid_instruction) return maybe_val_ptr; IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr); @@ -4769,8 +5401,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); @@ -4830,8 +5464,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); @@ -4881,7 +5517,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo } assert(elem_node->type == NodeTypeSymbol); - IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LVAL_PTR); + IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LValPtr); if (array_val_ptr == irb->codegen->invalid_instruction) return array_val_ptr; @@ -4905,7 +5541,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo IrInstruction *undefined_value = ir_build_const_undefined(irb, child_scope, elem_node); ir_build_var_decl(irb, child_scope, elem_node, elem_var, elem_var_type, nullptr, undefined_value); - IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var, false, false); + IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var); AstNode *index_var_source_node; VariableTableEntry *index_var; @@ -4923,7 +5559,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo IrInstruction *zero = ir_build_const_usize(irb, child_scope, node, 0); IrInstruction *one = ir_build_const_usize(irb, child_scope, node, 1); ir_build_var_decl(irb, child_scope, index_var_source_node, index_var, usize, nullptr, zero); - IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var, false, false); + IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var); IrBasicBlock *cond_block = ir_create_basic_block(irb, child_scope, "ForCond"); @@ -4943,7 +5579,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo ir_mark_gen(ir_build_cond_br(irb, child_scope, node, cond, body_block, else_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, body_block); - IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false); + IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false, PtrLenSingle); IrInstruction *elem_val; if (node->data.for_expr.elem_is_ptr) { elem_val = elem_ptr; @@ -5168,16 +5804,16 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no AstNode *else_node = node->data.test_expr.else_node; bool var_is_ptr = node->data.test_expr.var_is_ptr; - IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); + IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); if (maybe_val_ptr == irb->codegen->invalid_instruction) return maybe_val_ptr; IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr); IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_val); - IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "MaybeThen"); - IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "MaybeElse"); - IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "MaybeEndIf"); + IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "OptionalThen"); + IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "OptionalElse"); + IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf"); IrInstruction *is_comptime; if (ir_should_inline(irb->exec, scope)) { @@ -5246,7 +5882,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * Buf *var_symbol = node->data.if_err_expr.var_symbol; Buf *err_symbol = node->data.if_err_expr.err_symbol; - IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR); + IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr); if (err_val_ptr == irb->codegen->invalid_instruction) return err_val_ptr; @@ -5372,7 +6008,7 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * assert(node->type == NodeTypeSwitchExpr); AstNode *target_node = node->data.switch_expr.expr; - IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR); + IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr); if (target_value_ptr == irb->codegen->invalid_instruction) return target_value_ptr; IrInstruction *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr); @@ -5536,13 +6172,13 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * } - ir_build_check_switch_prongs(irb, scope, node, target_value, check_ranges.items, check_ranges.length, + IrInstruction *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value, check_ranges.items, check_ranges.length, else_prong != nullptr); if (cases.length == 0) { ir_build_br(irb, scope, node, else_block, is_comptime); } else { - ir_build_switch_br(irb, scope, node, target_value, else_block, cases.length, cases.items, is_comptime); + ir_build_switch_br(irb, scope, node, target_value, else_block, cases.length, cases.items, is_comptime, switch_prongs_void); } if (!else_prong) { @@ -5630,6 +6266,9 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode * assert(this_block_scope->end_block != nullptr); return ir_gen_return_from_block(irb, break_scope, node, this_block_scope); } + } else if (search_scope->id == ScopeIdSuspend) { + add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block")); + return irb->codegen->invalid_instruction; } search_scope = search_scope->parent; } @@ -5729,7 +6368,7 @@ static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node) AstNode *start_node = slice_expr->start; AstNode *end_node = slice_expr->end; - IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LVAL_PTR); + IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr); if (ptr_value == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -5763,11 +6402,11 @@ static IrInstruction *ir_gen_err_ok_or(IrBuilder *irb, Scope *parent_scope, AstN add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name))); return irb->codegen->invalid_instruction; } - return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LVAL_NONE); + return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LValNone); } - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -5970,16 +6609,10 @@ static IrInstruction *ir_gen_err_set_decl(IrBuilder *irb, Scope *parent_scope, A buf_init_from_buf(&err_set_type->name, type_name); err_set_type->is_copyable = true; err_set_type->data.error_set.err_count = err_count; - - if (err_count == 0) { - err_set_type->zero_bits = true; - err_set_type->di_type = irb->codegen->builtin_types.entry_void->di_type; - } else { - err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref; - err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type; - irb->codegen->error_di_types.append(&err_set_type->di_type); - err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(err_count); - } + err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref; + err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type; + irb->codegen->error_di_types.append(&err_set_type->di_type); + err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(err_count); ErrorTableEntry **errors = allocate<ErrorTableEntry *>(irb->codegen->errors_by_index.length + err_count); @@ -6071,30 +6704,150 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo async_allocator_type_value, is_var_args); } -static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode *node) { +static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node, + IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited) +{ + IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone"); + IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); + IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn"); + IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn"); + IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel"); + + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 + IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node, + get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void)); + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 + IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100 + IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 + + // TODO relies on Zig not re-ordering fields + IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst); + IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); + IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + atomic_state_field_name); + + // set the is_canceled bit + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + + IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); + IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false); + ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, post_return_block); + if (cancel_awaited) { + ir_build_br(irb, scope, node, do_cancel_block, is_comptime); + } else { + IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false); + IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false); + ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime); + } + + ir_set_cursor_at_end_and_append_block(irb, pre_return_block); + if (cancel_awaited) { + if (cancel_non_suspended) { + ir_build_br(irb, scope, node, do_cancel_block, is_comptime); + } else { + IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); + IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); + ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime); + } + } else { + ir_build_br(irb, scope, node, done_block, is_comptime); + } + + ir_set_cursor_at_end_and_append_block(irb, do_cancel_block); + ir_build_cancel(irb, scope, node, target_inst); + ir_build_br(irb, scope, node, done_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, done_block); + return ir_build_const_void(irb, scope, node); +} + +static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeCancel); - IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, parent_scope); + IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope); if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_build_cancel(irb, parent_scope, node, target_inst); + return ir_gen_cancel_target(irb, scope, node, target_inst, false, true); +} + +static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node, + IrInstruction *target_inst) +{ + IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone"); + IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); + IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended"); + IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended"); + + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 + IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 + IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask); + IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); + IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node, + get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void)); + + // TODO relies on Zig not re-ordering fields + IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst); + IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); + IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + atomic_state_field_name); + + // clear the is_suspended bit + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr, + AtomicRmwOp_and, AtomicOrderSeqCst); + + IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); + IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); + IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); + ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, suspended_block); + ir_build_coro_resume(irb, scope, node, target_inst); + ir_build_br(irb, scope, node, done_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, done_block); + return ir_build_const_void(irb, scope, node); } -static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *parent_scope, AstNode *node) { +static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeResume); - IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, parent_scope); + IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope); if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - return ir_build_coro_resume(irb, parent_scope, node, target_inst); + return ir_gen_resume_target(irb, scope, node, target_inst); } -static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) { +static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypeAwaitExpr); - IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, parent_scope); + IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope); if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -6108,7 +6861,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast return irb->codegen->invalid_instruction; } - ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); + ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope); if (scope_defer_expr) { if (!scope_defer_expr->reported_err) { add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression")); @@ -6119,85 +6872,157 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast Scope *outer_scope = irb->exec->begin_scope; - IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, parent_scope, node, target_inst); + IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst); Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_ptr_field_name); + IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name); if (irb->codegen->have_err_ret_tracing) { - IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull); + IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME); - IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name); - ir_build_store_ptr(irb, parent_scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr); - } - - Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); - IrInstruction *awaiter_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, - awaiter_handle_field_name); - - IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); - VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr, + IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name); + ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr); + } + + IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited"); + IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited"); + IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); + IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend"); + IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend"); + IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend"); + IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume"); + IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget"); + IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel"); + IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers"); + IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock"); + IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended"); + IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended"); + IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend"); + + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); + IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + atomic_state_field_name); + + IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); + IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false); + IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node); + IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 + IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100 + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 + IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010 + + VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr, false, false, true, const_bool_false); - IrInstruction *undefined_value = ir_build_const_undefined(irb, parent_scope, node); - IrInstruction *target_promise_type = ir_build_typeof(irb, parent_scope, node, target_inst); - IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type); - ir_build_await_bookkeeping(irb, parent_scope, node, promise_result_type); - ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value); - IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var, false, false); - ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr); - IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle); - IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node, - promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr, - AtomicRmwOp_xchg, AtomicOrderSeqCst); - IrInstruction *is_non_null = ir_build_test_nonnull(irb, parent_scope, node, maybe_await_handle); - IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend"); - IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend"); - IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "MergeSuspend"); - ir_build_cond_br(irb, parent_scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false); + IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst); + IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type); + ir_build_await_bookkeeping(irb, scope, node, promise_result_type); + ir_build_var_decl(irb, scope, node, result_var, promise_result_type, nullptr, undefined_value); + IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var); + ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr); + IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle); + + IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle); + IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false); + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + + IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false); + IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false); + ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, already_awaited_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, not_awaited_block); + IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); + IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); + ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, cancel_target_block); + ir_build_cancel(irb, scope, node, target_inst); + ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, no_suspend_block); if (irb->codegen->have_err_ret_tracing) { Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME); - IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_field_name); - IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull); - ir_build_merge_err_ret_traces(irb, parent_scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr); + IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name); + IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); + ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr); } Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_field_name); - IrInstruction *no_suspend_result = ir_build_load_ptr(irb, parent_scope, node, promise_result_ptr); - ir_build_cancel(irb, parent_scope, node, target_inst); - ir_build_br(irb, parent_scope, node, merge_block, const_bool_false); + IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); + // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to, + // because we're about to destroy the memory. So we store it into our result variable. + IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr); + ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result); + ir_build_cancel(irb, scope, node, target_inst); + ir_build_br(irb, scope, node, merge_block, const_bool_false); + ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block); - IrInstruction *suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); - IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); + IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false); + IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false); + ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, my_suspended_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block); + IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false); + IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false); + ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, do_suspend_block); + IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false); IrInstructionSwitchBrCase *cases = allocate<IrInstructionSwitchBrCase>(2); - cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0); + cases[0].value = ir_build_const_u8(irb, scope, node, 0); cases[0].block = resume_block; - cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1); - cases[1].block = cleanup_block; - ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, - 2, cases, const_bool_false); + cases[1].value = ir_build_const_u8(irb, scope, node, 1); + cases[1].block = destroy_block; + ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, + 2, cases, const_bool_false, nullptr); + + ir_set_cursor_at_end_and_append_block(irb, destroy_block); + ir_gen_cancel_target(irb, scope, node, target_inst, false, true); + ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, cleanup_block); - ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); - ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false)); + IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false); + IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false); + IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false); + IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false); + ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, do_cancel_block); + IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr); + ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false); + ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false)); + + ir_set_cursor_at_end_and_append_block(irb, do_defers_block); + ir_gen_defers_for_block(irb, scope, outer_scope, true); + ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, resume_block); - IrInstruction *yes_suspend_result = ir_build_load_ptr(irb, parent_scope, node, my_result_var_ptr); - ir_build_br(irb, parent_scope, node, merge_block, const_bool_false); + ir_build_br(irb, scope, node, merge_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, merge_block); - IrBasicBlock **incoming_blocks = allocate<IrBasicBlock *>(2); - IrInstruction **incoming_values = allocate<IrInstruction *>(2); - incoming_blocks[0] = resume_block; - incoming_values[0] = yes_suspend_result; - incoming_blocks[1] = no_suspend_block; - incoming_values[1] = no_suspend_result; - return ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); + return ir_build_load_ptr(irb, scope, node, my_result_var_ptr); } static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { @@ -6216,54 +7041,105 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); if (scope_defer_expr) { if (!scope_defer_expr->reported_err) { - add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here")); scope_defer_expr->reported_err = true; } return irb->codegen->invalid_instruction; } + ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope); + if (existing_suspend_scope) { + if (!existing_suspend_scope->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block")); + add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here")); + existing_suspend_scope->reported_err = true; + } + return irb->codegen->invalid_instruction; + } Scope *outer_scope = irb->exec->begin_scope; - - IrInstruction *suspend_code; + IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); + IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended"); + IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled"); + IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled"); + IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended"); + IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter"); + + IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise); + IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true); IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); + IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001 + IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010 + IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0); + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111 + IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node, + usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + + IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, canceled_block); + IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); + IrBasicBlock *post_canceled_block = irb->current_basic_block; + ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block); + IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr); + ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false); + IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block; + ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); + IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false); + IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); + ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, suspended_block); + ir_build_unreachable(irb, parent_scope, node); + + ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); + IrInstruction *suspend_code; if (node->data.suspend.block == nullptr) { suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false); } else { - assert(node->data.suspend.promise_symbol != nullptr); - assert(node->data.suspend.promise_symbol->type == NodeTypeSymbol); - Buf *promise_symbol_name = node->data.suspend.promise_symbol->data.symbol_expr.symbol; Scope *child_scope; - if (!buf_eql_str(promise_symbol_name, "_")) { - VariableTableEntry *promise_var = ir_create_var(irb, node, parent_scope, promise_symbol_name, - true, true, false, const_bool_false); - ir_build_var_decl(irb, parent_scope, node, promise_var, nullptr, nullptr, irb->exec->coro_handle); - child_scope = promise_var->child_scope; - } else { - child_scope = parent_scope; - } + ScopeSuspend *suspend_scope = create_suspend_scope(node, parent_scope); + suspend_scope->resume_block = resume_block; + child_scope = &suspend_scope->base; IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle); ir_gen_node(irb, node->data.suspend.block, child_scope); - suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); + suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false)); } - IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); - IrInstructionSwitchBrCase *cases = allocate<IrInstructionSwitchBrCase>(2); - cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0); + cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0)); cases[0].block = resume_block; - cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1); - cases[1].block = cleanup_block; - ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, - 2, cases, const_bool_false); + cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1)); + cases[1].block = canceled_block; + ir_mark_gen(ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, + 2, cases, const_bool_false, nullptr)); ir_set_cursor_at_end_and_append_block(irb, cleanup_block); + IrBasicBlock **incoming_blocks = allocate<IrBasicBlock *>(2); + IrInstruction **incoming_values = allocate<IrInstruction *>(2); + incoming_blocks[0] = post_canceled_block; + incoming_values[0] = const_bool_true; + incoming_blocks[1] = post_cancel_awaiter_block; + incoming_values[1] = const_bool_false; + IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); - ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false)); + ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, resume_block); - return ir_build_const_void(irb, parent_scope, node); + return ir_mark_gen(ir_build_const_void(irb, parent_scope, node)); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, @@ -6296,13 +7172,11 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop case NodeTypeSymbol: return ir_gen_symbol(irb, scope, node, lval); case NodeTypeFnCallExpr: - return ir_lval_wrap(irb, scope, ir_gen_fn_call(irb, scope, node), lval); + return ir_gen_fn_call(irb, scope, node, lval); case NodeTypeIfBoolExpr: return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval); case NodeTypePrefixOpExpr: return ir_gen_prefix_op_expr(irb, scope, node, lval); - case NodeTypeAddrOfExpr: - return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval); case NodeTypeContainerInitExpr: return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval); case NodeTypeVariableDeclaration: @@ -6316,13 +7190,44 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop case NodeTypeReturnExpr: return ir_gen_return(irb, scope, node, lval); case NodeTypeFieldAccessExpr: - return ir_gen_field_access(irb, scope, node, lval); + { + IrInstruction *ptr_instruction = ir_gen_field_access(irb, scope, node); + if (ptr_instruction == irb->codegen->invalid_instruction) + return ptr_instruction; + if (lval == LValPtr) + return ptr_instruction; + + return ir_build_load_ptr(irb, scope, node, ptr_instruction); + } + case NodeTypePtrDeref: { + AstNode *expr_node = node->data.ptr_deref_expr.target; + IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); + if (value == irb->codegen->invalid_instruction) + return value; + + return ir_build_un_op(irb, scope, node, IrUnOpDereference, value); + } + case NodeTypeUnwrapOptional: { + AstNode *expr_node = node->data.unwrap_optional.expr; + + IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); + if (maybe_ptr == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true); + if (lval == LValPtr) + return unwrapped_ptr; + + return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); + } case NodeTypeThisLiteral: return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval); case NodeTypeBoolLiteral: return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval); case NodeTypeArrayType: return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval); + case NodeTypePointerType: + return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval); case NodeTypePromiseType: return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval); case NodeTypeStringLiteral: @@ -6380,7 +7285,7 @@ static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *sc } static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope) { - return ir_gen_node_extra(irb, node, scope, LVAL_NONE); + return ir_gen_node_extra(irb, node, scope, LValNone); } static void invalidate_exec(IrExecutable *exec) { @@ -6434,15 +7339,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type); // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa ir_build_var_decl(irb, coro_scope, node, promise_var, coro_frame_type_value, nullptr, undef); - coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var, false, false); + coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var); VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node); IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value); - irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, - await_handle_var, false, false); + irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var); u8_ptr_type = ir_build_const_type(irb, coro_scope, node, get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); @@ -6473,10 +7377,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr); irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr); - Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); - irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - awaiter_handle_field_name); - ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, null_value); + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); + irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + atomic_state_field_name); + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero); Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); @@ -6494,7 +7399,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // coordinate with builtin.zig Buf *index_name = buf_create_from_str("index"); IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); ir_build_store_ptr(irb, scope, node, index_ptr, zero); Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses"); @@ -6511,7 +7415,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); } - IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE); + IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone); assert(result); if (irb->exec->invalid) return false; @@ -6533,7 +7437,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec cases[0].block = invalid_resume_block; cases[1].value = ir_build_const_u8(irb, scope, node, 1); cases[1].block = irb->exec->coro_final_cleanup_block; - ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false); + ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr); ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block); ir_build_coro_end(irb, scope, node); @@ -6544,9 +7448,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final); if (type_has_bits(return_type)) { + IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, + get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8), + 0, 0)); IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); - IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, result_ptr); - IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, + IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, result_ptr); + IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr); IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node, fn_entry->type_entry->data.fn.fn_type_id.return_type); @@ -6559,6 +7467,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr); ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr); } + // Before we destroy the coroutine frame, we need to load the target promise into + // a register or local variable which does not get spilled into the frame, + // otherwise llvm tries to access memory inside the destroyed frame. + IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, + irb->exec->await_handle_var_ptr, false); + IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_br(irb, scope, node, check_free_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block); @@ -6573,6 +7487,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_values[1] = const_bool_true; IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrBasicBlock **merge_incoming_blocks = allocate<IrBasicBlock *>(2); + IrInstruction **merge_incoming_values = allocate<IrInstruction *>(2); + merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block; + merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node); + merge_incoming_blocks[1] = irb->exec->coro_normal_final; + merge_incoming_values[1] = await_handle_in_block; + IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values); + Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdLocalVar); @@ -6580,25 +7502,26 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); - IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe); + IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, + get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8), + 0, 0)); + IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, coro_mem_ptr_maybe); IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); - IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var, true, false); + IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var); IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr); IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false); size_t arg_count = 2; IrInstruction **args = allocate<IrInstruction *>(arg_count); args[0] = implicit_allocator_ptr; // self args[1] = mem_slice; // old_mem - ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, resume_block); - IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, - irb->exec->await_handle_var_ptr, false); - IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); - ir_build_coro_resume(irb, scope, node, awaiter_handle); + ir_gen_resume_target(irb, scope, node, awaiter_handle); ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false); } @@ -6671,17 +7594,22 @@ static bool ir_emit_global_runtime_side_effect(IrAnalyze *ira, IrInstruction *so } static bool const_val_fits_in_num_lit(ConstExprValue *const_val, TypeTableEntry *num_lit_type) { - return ((num_lit_type->id == TypeTableEntryIdNumLitFloat && - (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdNumLitFloat)) || - (num_lit_type->id == TypeTableEntryIdNumLitInt && - (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdNumLitInt))); + return ((num_lit_type->id == TypeTableEntryIdComptimeFloat && + (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdComptimeFloat)) || + (num_lit_type->id == TypeTableEntryIdComptimeInt && + (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdComptimeInt))); } static bool float_has_fraction(ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_has_fraction(&const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + { + float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false); + return !f16_eq(floored, const_val->data.x_f16); + } case 32: return floorf(const_val->data.x_f32) != const_val->data.x_f32; case 64: @@ -6701,10 +7629,13 @@ static bool float_has_fraction(ConstExprValue *const_val) { } static void float_append_buf(Buf *buf, ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_append_buf(buf, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16)); + break; case 32: buf_appendf(buf, "%f", const_val->data.x_f32); break; @@ -6736,10 +7667,21 @@ static void float_append_buf(Buf *buf, ConstExprValue *const_val) { } static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + { + double x = zig_f16_to_double(const_val->data.x_f16); + if (x >= 0) { + bigint_init_unsigned(bigint, (uint64_t)x); + } else { + bigint_init_unsigned(bigint, (uint64_t)-x); + bigint->is_negative = true; + } + break; + } case 32: if (const_val->data.x_f32 >= 0) { bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32)); @@ -6772,10 +7714,13 @@ static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) { } static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = bigfloat_to_f16(bigfloat); + break; case 32: dest_val->data.x_f32 = bigfloat_to_f32(bigfloat); break; @@ -6793,11 +7738,39 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { } } +static void float_init_f16(ConstExprValue *dest_val, float16_t x) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { + bigfloat_init_16(&dest_val->data.x_bigfloat, x); + } else if (dest_val->type->id == TypeTableEntryIdFloat) { + switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = x; + break; + case 32: + dest_val->data.x_f32 = zig_f16_to_double(x); + break; + case 64: + dest_val->data.x_f64 = zig_f16_to_double(x); + break; + case 128: + f16_to_f128M(x, &dest_val->data.x_f128); + break; + default: + zig_unreachable(); + } + } else { + zig_unreachable(); + } +} + static void float_init_f32(ConstExprValue *dest_val, float x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_32(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = zig_double_to_f16(x); + break; case 32: dest_val->data.x_f32 = x; break; @@ -6820,10 +7793,13 @@ static void float_init_f32(ConstExprValue *dest_val, float x) { } static void float_init_f64(ConstExprValue *dest_val, double x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_64(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = zig_double_to_f16(x); + break; case 32: dest_val->data.x_f32 = x; break; @@ -6846,10 +7822,13 @@ static void float_init_f64(ConstExprValue *dest_val, double x) { } static void float_init_f128(ConstExprValue *dest_val, float128_t x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_128(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = f128M_to_f16(&x); + break; case 32: { float32_t f32_val = f128M_to_f32(&x); @@ -6876,10 +7855,13 @@ static void float_init_f128(ConstExprValue *dest_val, float128_t x) { } static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) { - if (src_val->type->id == TypeTableEntryIdNumLitFloat) { + if (src_val->type->id == TypeTableEntryIdComptimeFloat) { float_init_bigfloat(dest_val, &src_val->data.x_bigfloat); } else if (src_val->type->id == TypeTableEntryIdFloat) { switch (src_val->type->data.floating.bit_count) { + case 16: + float_init_f16(dest_val, src_val->data.x_f16); + break; case 32: float_init_f32(dest_val, src_val->data.x_f32); break; @@ -6899,10 +7881,18 @@ static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + if (f16_lt(op1->data.x_f16, op2->data.x_f16)) { + return CmpLT; + } else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) { + return CmpGT; + } else { + return CmpEQ; + } case 32: if (op1->data.x_f32 > op2->data.x_f32) { return CmpGT; @@ -6936,10 +7926,21 @@ static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) { } static Cmp float_cmp_zero(ConstExprValue *op) { - if (op->type->id == TypeTableEntryIdNumLitFloat) { + if (op->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_cmp_zero(&op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + { + const float16_t zero = zig_double_to_f16(0); + if (f16_lt(op->data.x_f16, zero)) { + return CmpLT; + } else if (f16_lt(zero, op->data.x_f16)) { + return CmpGT; + } else { + return CmpEQ; + } + } case 32: if (op->data.x_f32 < 0.0) { return CmpLT; @@ -6977,10 +7978,13 @@ static Cmp float_cmp_zero(ConstExprValue *op) { static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 + op2->data.x_f32; return; @@ -7001,10 +8005,13 @@ static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32; return; @@ -7025,10 +8032,13 @@ static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32; return; @@ -7049,10 +8059,13 @@ static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32; return; @@ -7073,25 +8086,19 @@ static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16); + out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_minMag, false); + return; case 32: - out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32; - if (out_val->data.x_f32 >= 0.0) { - out_val->data.x_f32 = floorf(out_val->data.x_f32); - } else { - out_val->data.x_f32 = ceilf(out_val->data.x_f32); - } + out_val->data.x_f32 = truncf(op1->data.x_f32 / op2->data.x_f32); return; case 64: - out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64; - if (out_val->data.x_f64 >= 0.0) { - out_val->data.x_f64 = floor(out_val->data.x_f64); - } else { - out_val->data.x_f64 = ceil(out_val->data.x_f64); - } + out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64); return; case 128: f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); @@ -7108,10 +8115,14 @@ static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstE static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16); + out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false); + return; case 32: out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32); return; @@ -7133,10 +8144,13 @@ static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstE static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32); return; @@ -7154,13 +8168,34 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal } } +// c = a - b * trunc(a / b) +static float16_t zig_f16_mod(float16_t a, float16_t b) { + float16_t c; + c = f16_div(a, b); + c = f16_roundToInt(c, softfloat_round_min, true); + c = f16_mul(b, c); + c = f16_sub(a, c); + return c; +} + +// c = a - b * trunc(a / b) +static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) { + f128M_div(a, b, c); + f128M_roundToInt(c, softfloat_round_min, true, c); + f128M_mul(b, c, c); + f128M_sub(a, c, c); +} + static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32); return; @@ -7168,9 +8203,7 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64); return; case 128: - f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); - f128M_add(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); - f128M_rem(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); + zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; default: zig_unreachable(); @@ -7182,10 +8215,16 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_negate(ConstExprValue *out_val, ConstExprValue *op) { out_val->type = op->type; - if (op->type->id == TypeTableEntryIdNumLitFloat) { + if (op->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + { + const float16_t zero = zig_double_to_f16(0); + out_val->data.x_f16 = f16_sub(zero, op->data.x_f16); + return; + } case 32: out_val->data.x_f32 = -op->data.x_f32; return; @@ -7208,6 +8247,9 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) { void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) { if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + memcpy(buf, &op->data.x_f16, 2); // TODO wrong when compiler is big endian + return; case 32: memcpy(buf, &op->data.x_f32, 4); // TODO wrong when compiler is big endian return; @@ -7228,6 +8270,9 @@ void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) { void float_read_ieee597(ConstExprValue *val, uint8_t *buf, bool is_big_endian) { if (val->type->id == TypeTableEntryIdFloat) { switch (val->type->data.floating.bit_count) { + case 16: + memcpy(&val->data.x_f16, buf, 2); // TODO wrong when compiler is big endian + return; case 32: memcpy(&val->data.x_f32, buf, 4); // TODO wrong when compiler is big endian return; @@ -7256,9 +8301,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc assert(const_val->special != ConstValSpecialRuntime); bool const_val_is_int = (const_val->type->id == TypeTableEntryIdInt || - const_val->type->id == TypeTableEntryIdNumLitInt); + const_val->type->id == TypeTableEntryIdComptimeInt); bool const_val_is_float = (const_val->type->id == TypeTableEntryIdFloat || - const_val->type->id == TypeTableEntryIdNumLitFloat); + const_val->type->id == TypeTableEntryIdComptimeFloat); if (other_type->id == TypeTableEntryIdFloat) { return true; } else if (other_type->id == TypeTableEntryIdInt && const_val_is_int) { @@ -7278,7 +8323,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc } } else if (const_val_fits_in_num_lit(const_val, other_type)) { return true; - } else if (other_type->id == TypeTableEntryIdMaybe) { + } else if (other_type->id == TypeTableEntryIdOptional) { TypeTableEntry *child_type = other_type->data.maybe.child_type; if (const_val_fits_in_num_lit(const_val, child_type)) { return true; @@ -7302,7 +8347,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc return true; } } - if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdNumLitInt) && + if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdComptimeInt) && const_val_is_float) { if (float_has_fraction(const_val)) { @@ -7315,7 +8360,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc buf_ptr(&other_type->name))); return false; } else { - if (other_type->id == TypeTableEntryIdNumLitInt) { + if (other_type->id == TypeTableEntryIdComptimeInt) { return true; } else { BigInt bigint; @@ -7356,38 +8401,16 @@ static bool slice_is_const(TypeTableEntry *type) { return type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const; } -static bool resolve_inferred_error_set(IrAnalyze *ira, TypeTableEntry *err_set_type, AstNode *source_node) { - assert(err_set_type->id == TypeTableEntryIdErrorSet); - FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn; - if (infer_fn != nullptr) { - if (infer_fn->anal_state == FnAnalStateInvalid) { - return false; - } else if (infer_fn->anal_state == FnAnalStateReady) { - analyze_fn_body(ira->codegen, infer_fn); - if (err_set_type->data.error_set.infer_fn != nullptr) { - assert(ira->codegen->errors.length != 0); - return false; - } - } else { - ir_add_error_node(ira, source_node, - buf_sprintf("cannot resolve inferred error set '%s': function '%s' not fully analyzed yet", - buf_ptr(&err_set_type->name), buf_ptr(&err_set_type->data.error_set.infer_fn->symbol_name))); - return false; - } - } - return true; -} - static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry *set1, TypeTableEntry *set2, AstNode *source_node) { assert(set1->id == TypeTableEntryIdErrorSet); assert(set2->id == TypeTableEntryIdErrorSet); - if (!resolve_inferred_error_set(ira, set1, source_node)) { + if (!resolve_inferred_error_set(ira->codegen, set1, source_node)) { return ira->codegen->builtin_types.entry_invalid; } - if (!resolve_inferred_error_set(ira, set2, source_node)) { + if (!resolve_inferred_error_set(ira->codegen, set2, source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (type_is_global_error_set(set1)) { @@ -7433,89 +8456,122 @@ static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry } -static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *expected_type, - TypeTableEntry *actual_type, AstNode *source_node) +static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *wanted_type, + TypeTableEntry *actual_type, AstNode *source_node, bool wanted_is_mutable) { CodeGen *g = ira->codegen; ConstCastOnly result = {}; result.id = ConstCastResultIdOk; - if (expected_type == actual_type) + if (wanted_type == actual_type) return result; - // pointer const - if (expected_type->id == TypeTableEntryIdPointer && - actual_type->id == TypeTableEntryIdPointer && - (!actual_type->data.pointer.is_const || expected_type->data.pointer.is_const) && - (!actual_type->data.pointer.is_volatile || expected_type->data.pointer.is_volatile) && - actual_type->data.pointer.bit_offset == expected_type->data.pointer.bit_offset && - actual_type->data.pointer.unaligned_bit_count == expected_type->data.pointer.unaligned_bit_count && - actual_type->data.pointer.alignment >= expected_type->data.pointer.alignment) + // * and [*] can do a const-cast-only to ?* and ?[*], respectively + // but not if there is a mutable parent pointer + // and not if the pointer is zero bits + if (!wanted_is_mutable && wanted_type->id == TypeTableEntryIdOptional && + wanted_type->data.maybe.child_type->id == TypeTableEntryIdPointer && + actual_type->id == TypeTableEntryIdPointer && type_has_bits(actual_type)) { - ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.pointer.child_type, actual_type->data.pointer.child_type, source_node); + ConstCastOnly child = types_match_const_cast_only(ira, + wanted_type->data.maybe.child_type, actual_type, source_node, wanted_is_mutable); if (child.id != ConstCastResultIdOk) { - result.id = ConstCastResultIdPointerChild; - result.data.pointer_child = allocate_nonzero<ConstCastOnly>(1); - *result.data.pointer_child = child; + result.id = ConstCastResultIdNullWrapPtr; + result.data.null_wrap_ptr_child = allocate_nonzero<ConstCastOnly>(1); + *result.data.null_wrap_ptr_child = child; } return result; } + // pointer const + if (wanted_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer) { + ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.pointer.child_type, + actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const); + if (child.id != ConstCastResultIdOk) { + result.id = ConstCastResultIdPointerChild; + result.data.pointer_mismatch = allocate_nonzero<ConstCastPointerMismatch>(1); + result.data.pointer_mismatch->child = child; + result.data.pointer_mismatch->wanted_child = wanted_type->data.pointer.child_type; + result.data.pointer_mismatch->actual_child = actual_type->data.pointer.child_type; + return result; + } + if ((actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) && + (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) && + (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) && + actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset && + actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count && + actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment) + { + return result; + } + } + // slice const - if (is_slice(expected_type) && is_slice(actual_type)) { + if (is_slice(wanted_type) && is_slice(actual_type)) { TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry; - TypeTableEntry *expected_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry; - if ((!actual_ptr_type->data.pointer.is_const || expected_ptr_type->data.pointer.is_const) && - (!actual_ptr_type->data.pointer.is_volatile || expected_ptr_type->data.pointer.is_volatile) && - actual_ptr_type->data.pointer.bit_offset == expected_ptr_type->data.pointer.bit_offset && - actual_ptr_type->data.pointer.unaligned_bit_count == expected_ptr_type->data.pointer.unaligned_bit_count && - actual_ptr_type->data.pointer.alignment >= expected_ptr_type->data.pointer.alignment) + TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry; + if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) && + (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) && + actual_ptr_type->data.pointer.bit_offset == wanted_ptr_type->data.pointer.bit_offset && + actual_ptr_type->data.pointer.unaligned_bit_count == wanted_ptr_type->data.pointer.unaligned_bit_count && + actual_ptr_type->data.pointer.alignment >= wanted_ptr_type->data.pointer.alignment) { - ConstCastOnly child = types_match_const_cast_only(ira, expected_ptr_type->data.pointer.child_type, - actual_ptr_type->data.pointer.child_type, source_node); + ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type, + actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const); if (child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdSliceChild; - result.data.slice_child = allocate_nonzero<ConstCastOnly>(1); - *result.data.slice_child = child; + result.data.slice_mismatch = allocate_nonzero<ConstCastSliceMismatch>(1); + result.data.slice_mismatch->child = child; + result.data.slice_mismatch->actual_child = actual_ptr_type->data.pointer.child_type; + result.data.slice_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type; } return result; } } // maybe - if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) { - ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node); + if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) { + ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type, + actual_type->data.maybe.child_type, source_node, wanted_is_mutable); if (child.id != ConstCastResultIdOk) { - result.id = ConstCastResultIdNullableChild; - result.data.nullable_child = allocate_nonzero<ConstCastOnly>(1); - *result.data.nullable_child = child; + result.id = ConstCastResultIdOptionalChild; + result.data.optional = allocate_nonzero<ConstCastOptionalMismatch>(1); + result.data.optional->child = child; + result.data.optional->wanted_child = wanted_type->data.maybe.child_type; + result.data.optional->actual_child = actual_type->data.maybe.child_type; } return result; } // error union - if (expected_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) { - ConstCastOnly payload_child = types_match_const_cast_only(ira, expected_type->data.error_union.payload_type, actual_type->data.error_union.payload_type, source_node); + if (wanted_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) { + ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, + actual_type->data.error_union.payload_type, source_node, wanted_is_mutable); if (payload_child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdErrorUnionPayload; - result.data.error_union_payload = allocate_nonzero<ConstCastOnly>(1); - *result.data.error_union_payload = payload_child; + result.data.error_union_payload = allocate_nonzero<ConstCastErrUnionPayloadMismatch>(1); + result.data.error_union_payload->child = payload_child; + result.data.error_union_payload->wanted_payload = wanted_type->data.error_union.payload_type; + result.data.error_union_payload->actual_payload = actual_type->data.error_union.payload_type; return result; } - ConstCastOnly error_set_child = types_match_const_cast_only(ira, expected_type->data.error_union.err_set_type, actual_type->data.error_union.err_set_type, source_node); + ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type, + actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable); if (error_set_child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdErrorUnionErrorSet; - result.data.error_union_error_set = allocate_nonzero<ConstCastOnly>(1); - *result.data.error_union_error_set = error_set_child; + result.data.error_union_error_set = allocate_nonzero<ConstCastErrUnionErrSetMismatch>(1); + result.data.error_union_error_set->child = error_set_child; + result.data.error_union_error_set->wanted_err_set = wanted_type->data.error_union.err_set_type; + result.data.error_union_error_set->actual_err_set = actual_type->data.error_union.err_set_type; return result; } return result; } // error set - if (expected_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) { + if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) { TypeTableEntry *contained_set = actual_type; - TypeTableEntry *container_set = expected_type; + TypeTableEntry *container_set = wanted_type; // if the container set is inferred, then this will always work. if (container_set->data.error_set.infer_fn != nullptr) { @@ -7526,7 +8582,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; } - if (!resolve_inferred_error_set(ira, contained_set, source_node)) { + if (!resolve_inferred_error_set(ira->codegen, contained_set, source_node)) { result.id = ConstCastResultIdUnresolvedInferredErrSet; return result; } @@ -7548,44 +8604,46 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry if (error_entry == nullptr) { if (result.id == ConstCastResultIdOk) { result.id = ConstCastResultIdErrSet; + result.data.error_set_mismatch = allocate<ConstCastErrSetMismatch>(1); } - result.data.error_set.missing_errors.append(contained_error_entry); + result.data.error_set_mismatch->missing_errors.append(contained_error_entry); } } free(errors); return result; } - if (expected_type == ira->codegen->builtin_types.entry_promise && + if (wanted_type == ira->codegen->builtin_types.entry_promise && actual_type->id == TypeTableEntryIdPromise) { return result; } // fn - if (expected_type->id == TypeTableEntryIdFn && + if (wanted_type->id == TypeTableEntryIdFn && actual_type->id == TypeTableEntryIdFn) { - if (expected_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) { + if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) { result.id = ConstCastResultIdFnAlign; return result; } - if (expected_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) { + if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) { result.id = ConstCastResultIdFnCC; return result; } - if (expected_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) { + if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) { result.id = ConstCastResultIdFnVarArgs; return result; } - if (expected_type->data.fn.is_generic != actual_type->data.fn.is_generic) { + if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) { result.id = ConstCastResultIdFnIsGeneric; return result; } - if (!expected_type->data.fn.is_generic && + if (!wanted_type->data.fn.is_generic && actual_type->data.fn.fn_type_id.return_type->id != TypeTableEntryIdUnreachable) { - ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.fn.fn_type_id.return_type, actual_type->data.fn.fn_type_id.return_type, source_node); + ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type, + actual_type->data.fn.fn_type_id.return_type, source_node, false); if (child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdFnReturnType; result.data.return_type = allocate_nonzero<ConstCastOnly>(1); @@ -7593,9 +8651,11 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; } } - if (!expected_type->data.fn.is_generic && expected_type->data.fn.fn_type_id.cc == CallingConventionAsync) { - ConstCastOnly child = types_match_const_cast_only(ira, actual_type->data.fn.fn_type_id.async_allocator_type, - expected_type->data.fn.fn_type_id.async_allocator_type, source_node); + if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) { + ConstCastOnly child = types_match_const_cast_only(ira, + actual_type->data.fn.fn_type_id.async_allocator_type, + wanted_type->data.fn.fn_type_id.async_allocator_type, + source_node, false); if (child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdAsyncAllocatorType; result.data.async_allocator_type = allocate_nonzero<ConstCastOnly>(1); @@ -7603,22 +8663,23 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; } } - if (expected_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) { + if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) { result.id = ConstCastResultIdFnArgCount; return result; } - if (expected_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) { + if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) { result.id = ConstCastResultIdFnGenericArgCount; return result; } - assert(expected_type->data.fn.is_generic || - expected_type->data.fn.fn_type_id.next_param_index == expected_type->data.fn.fn_type_id.param_count); - for (size_t i = 0; i < expected_type->data.fn.fn_type_id.next_param_index; i += 1) { + assert(wanted_type->data.fn.is_generic || + wanted_type->data.fn.fn_type_id.next_param_index == wanted_type->data.fn.fn_type_id.param_count); + for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.next_param_index; i += 1) { // note it's reversed for parameters FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i]; - FnTypeParamInfo *expected_param_info = &expected_type->data.fn.fn_type_id.param_info[i]; + FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i]; - ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type, expected_param_info->type, source_node); + ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type, + expected_param_info->type, source_node, false); if (arg_child.id != ConstCastResultIdOk) { result.id = ConstCastResultIdFnArg; result.data.fn_arg.arg_index = i; @@ -7637,271 +8698,19 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry } result.id = ConstCastResultIdType; + result.data.type_mismatch = allocate_nonzero<ConstCastTypeMismatch>(1); + result.data.type_mismatch->wanted_type = wanted_type; + result.data.type_mismatch->actual_type = actual_type; return result; } -enum ImplicitCastMatchResult { - ImplicitCastMatchResultNo, - ImplicitCastMatchResultYes, - ImplicitCastMatchResultReportedError, -}; - -static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *expected_type, - TypeTableEntry *actual_type, IrInstruction *value) -{ - AstNode *source_node = value->source_node; - ConstCastOnly const_cast_result = types_match_const_cast_only(ira, expected_type, actual_type, source_node); - if (const_cast_result.id == ConstCastResultIdOk) { - return ImplicitCastMatchResultYes; - } - - // if we got here with error sets, make an error showing the incompatibilities - ZigList<ErrorTableEntry *> *missing_errors = nullptr; - if (const_cast_result.id == ConstCastResultIdErrSet) { - missing_errors = &const_cast_result.data.error_set.missing_errors; - } - if (const_cast_result.id == ConstCastResultIdErrorUnionErrorSet) { - if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSet) { - missing_errors = &const_cast_result.data.error_union_error_set->data.error_set.missing_errors; - } else if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSetGlobal) { - ErrorMsg *msg = ir_add_error(ira, value, - buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name))); - add_error_note(ira->codegen, msg, value->source_node, - buf_sprintf("unable to cast global error set into smaller set")); - return ImplicitCastMatchResultReportedError; - } - } else if (const_cast_result.id == ConstCastResultIdErrSetGlobal) { - ErrorMsg *msg = ir_add_error(ira, value, - buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name))); - add_error_note(ira->codegen, msg, value->source_node, - buf_sprintf("unable to cast global error set into smaller set")); - return ImplicitCastMatchResultReportedError; - } - if (missing_errors != nullptr) { - ErrorMsg *msg = ir_add_error(ira, value, - buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name))); - for (size_t i = 0; i < missing_errors->length; i += 1) { - ErrorTableEntry *error_entry = missing_errors->at(i); - add_error_note(ira->codegen, msg, error_entry->decl_node, - buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name))); - } - - return ImplicitCastMatchResultReportedError; - } - - // implicit conversion from non maybe type to maybe type - if (expected_type->id == TypeTableEntryIdMaybe && - ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type, actual_type, value)) - { - return ImplicitCastMatchResultYes; - } - - // implicit conversion from null literal to maybe type - if (expected_type->id == TypeTableEntryIdMaybe && - actual_type->id == TypeTableEntryIdNullLit) - { - return ImplicitCastMatchResultYes; - } - - // implicit T to U!T - if (expected_type->id == TypeTableEntryIdErrorUnion && - ir_types_match_with_implicit_cast(ira, expected_type->data.error_union.payload_type, actual_type, value)) - { - return ImplicitCastMatchResultYes; - } - - // implicit conversion from error set to error union type - if (expected_type->id == TypeTableEntryIdErrorUnion && - actual_type->id == TypeTableEntryIdErrorSet) - { - return ImplicitCastMatchResultYes; - } - - // implicit conversion from T to U!?T - if (expected_type->id == TypeTableEntryIdErrorUnion && - expected_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe && - ir_types_match_with_implicit_cast(ira, - expected_type->data.error_union.payload_type->data.maybe.child_type, - actual_type, value)) - { - return ImplicitCastMatchResultYes; - } - - // implicit widening conversion - if (expected_type->id == TypeTableEntryIdInt && - actual_type->id == TypeTableEntryIdInt && - expected_type->data.integral.is_signed == actual_type->data.integral.is_signed && - expected_type->data.integral.bit_count >= actual_type->data.integral.bit_count) - { - return ImplicitCastMatchResultYes; - } - - // small enough unsigned ints can get casted to large enough signed ints - if (expected_type->id == TypeTableEntryIdInt && expected_type->data.integral.is_signed && - actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed && - expected_type->data.integral.bit_count > actual_type->data.integral.bit_count) - { - return ImplicitCastMatchResultYes; - } - - // implicit float widening conversion - if (expected_type->id == TypeTableEntryIdFloat && - actual_type->id == TypeTableEntryIdFloat && - expected_type->data.floating.bit_count >= actual_type->data.floating.bit_count) - { - return ImplicitCastMatchResultYes; - } - - // implicit [N]T to []const T - if (is_slice(expected_type) && actual_type->id == TypeTableEntryIdArray) { - TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry; - assert(ptr_type->id == TypeTableEntryIdPointer); - - if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) - { - return ImplicitCastMatchResultYes; - } - } - - // implicit &const [N]T to []const T - if (is_slice(expected_type) && - actual_type->id == TypeTableEntryIdPointer && - actual_type->data.pointer.is_const && - actual_type->data.pointer.child_type->id == TypeTableEntryIdArray) - { - TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry; - assert(ptr_type->id == TypeTableEntryIdPointer); - - TypeTableEntry *array_type = actual_type->data.pointer.child_type; - - if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk) - { - return ImplicitCastMatchResultYes; - } - } - - // implicit [N]T to &const []const T - if (expected_type->id == TypeTableEntryIdPointer && - expected_type->data.pointer.is_const && - is_slice(expected_type->data.pointer.child_type) && - actual_type->id == TypeTableEntryIdArray) - { - TypeTableEntry *ptr_type = - expected_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry; - assert(ptr_type->id == TypeTableEntryIdPointer); - if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) - { - return ImplicitCastMatchResultYes; - } - } - - // implicit [N]T to ?[]const T - if (expected_type->id == TypeTableEntryIdMaybe && - is_slice(expected_type->data.maybe.child_type) && - actual_type->id == TypeTableEntryIdArray) - { - TypeTableEntry *ptr_type = - expected_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry; - assert(ptr_type->id == TypeTableEntryIdPointer); - if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) - { - return ImplicitCastMatchResultYes; - } - } - - - // implicit number literal to typed number - // implicit number literal to &const integer - if (actual_type->id == TypeTableEntryIdNumLitFloat || - actual_type->id == TypeTableEntryIdNumLitInt) - { - if (expected_type->id == TypeTableEntryIdPointer && - expected_type->data.pointer.is_const) - { - if (ir_num_lit_fits_in_other_type(ira, value, expected_type->data.pointer.child_type, false)) { - return ImplicitCastMatchResultYes; - } else { - return ImplicitCastMatchResultReportedError; - } - } else if (ir_num_lit_fits_in_other_type(ira, value, expected_type, false)) { - return ImplicitCastMatchResultYes; - } else { - return ImplicitCastMatchResultReportedError; - } - } - - // implicit typed number to integer or float literal. - // works when the number is known - if (value->value.special == ConstValSpecialStatic) { - if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdNumLitInt) { - return ImplicitCastMatchResultYes; - } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdNumLitFloat) { - return ImplicitCastMatchResultYes; - } - } - - // implicit union to its enum tag type - if (expected_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion && - (actual_type->data.unionation.decl_node->data.container_decl.auto_enum || - actual_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)) - { - type_ensure_zero_bits_known(ira->codegen, actual_type); - if (actual_type->data.unionation.tag_type == expected_type) { - return ImplicitCastMatchResultYes; - } - } - - // implicit enum to union which has the enum as the tag type - if (expected_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum && - (expected_type->data.unionation.decl_node->data.container_decl.auto_enum || - expected_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)) - { - type_ensure_zero_bits_known(ira->codegen, expected_type); - if (expected_type->data.unionation.tag_type == actual_type) { - return ImplicitCastMatchResultYes; - } - } - - // implicit enum to &const union which has the enum as the tag type - if (actual_type->id == TypeTableEntryIdEnum && expected_type->id == TypeTableEntryIdPointer) { - TypeTableEntry *union_type = expected_type->data.pointer.child_type; - if (union_type->data.unionation.decl_node->data.container_decl.auto_enum || - union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr) - { - type_ensure_zero_bits_known(ira->codegen, union_type); - if (union_type->data.unionation.tag_type == actual_type) { - return ImplicitCastMatchResultYes; - } - } - } - - // implicit undefined literal to anything - if (actual_type->id == TypeTableEntryIdUndefLit) { - return ImplicitCastMatchResultYes; - } - - // implicitly take a const pointer to something - if (!type_requires_comptime(actual_type)) { - TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true); - if (types_match_const_cast_only(ira, expected_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) { - return ImplicitCastMatchResultYes; - } - } - - return ImplicitCastMatchResultNo; -} - static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *errors_count) { size_t old_errors_count = *errors_count; *errors_count = g->errors_by_index.length; *errors = reallocate(*errors, old_errors_count, *errors_count); } -static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, IrInstruction **instructions, size_t instruction_count) { +static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, TypeTableEntry *expected_type, IrInstruction **instructions, size_t instruction_count) { assert(instruction_count >= 1); IrInstruction *prev_inst = instructions[0]; if (type_is_invalid(prev_inst->value.type)) { @@ -7915,7 +8724,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod err_set_type = ira->codegen->builtin_types.entry_global_error_set; } else { err_set_type = prev_inst->value.type; - if (!resolve_inferred_error_set(ira, err_set_type, prev_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, err_set_type, prev_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } update_errors_helper(ira->codegen, &errors, &errors_count); @@ -7928,7 +8737,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNullLit); + bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNull); bool convert_to_const_slice = false; for (size_t i = 1; i < instruction_count; i += 1) { IrInstruction *cur_inst = instructions[i]; @@ -7948,23 +8757,13 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdNullLit) { - prev_inst = cur_inst; - continue; - } - - if (cur_type->id == TypeTableEntryIdNullLit) { - any_are_null = true; - continue; - } - if (prev_type->id == TypeTableEntryIdErrorSet) { assert(err_set_type != nullptr); if (cur_type->id == TypeTableEntryIdErrorSet) { if (type_is_global_error_set(err_set_type)) { continue; } - if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (type_is_global_error_set(cur_type)) { @@ -8030,7 +8829,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type; - if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (type_is_global_error_set(cur_err_set_type)) { @@ -8093,7 +8892,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod if (err_set_type != nullptr && type_is_global_error_set(err_set_type)) { continue; } - if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } @@ -8138,9 +8937,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod TypeTableEntry *cur_payload_type = cur_type->data.error_union.payload_type; bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type, - source_node).id == ConstCastResultIdOk; + source_node, false).id == ConstCastResultIdOk; bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type, - source_node).id == ConstCastResultIdOk; + source_node, false).id == ConstCastResultIdOk; if (const_cast_prev || const_cast_cur) { if (const_cast_cur) { @@ -8150,11 +8949,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod TypeTableEntry *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type; TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type; - if (!resolve_inferred_error_set(ira, prev_err_set_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } - if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } @@ -8217,11 +9016,21 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) { + if (prev_type->id == TypeTableEntryIdNull) { + prev_inst = cur_inst; + continue; + } + + if (cur_type->id == TypeTableEntryIdNull) { + any_are_null = true; + continue; + } + + if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) { continue; } - if (types_match_const_cast_only(ira, cur_type, prev_type, source_node).id == ConstCastResultIdOk) { + if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) { prev_inst = cur_inst; continue; } @@ -8244,17 +9053,19 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } if (prev_type->id == TypeTableEntryIdErrorUnion && - types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type, + source_node, false).id == ConstCastResultIdOk) { continue; } if (cur_type->id == TypeTableEntryIdErrorUnion && - types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type, + source_node, false).id == ConstCastResultIdOk) { if (err_set_type != nullptr) { TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type; - if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (type_is_global_error_set(cur_err_set_type) || type_is_global_error_set(err_set_type)) { @@ -8271,30 +9082,32 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdMaybe && - types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk) + if (prev_type->id == TypeTableEntryIdOptional && + types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, + source_node, false).id == ConstCastResultIdOk) { continue; } - if (cur_type->id == TypeTableEntryIdMaybe && - types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk) + if (cur_type->id == TypeTableEntryIdOptional && + types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, + source_node, false).id == ConstCastResultIdOk) { prev_inst = cur_inst; continue; } - if (cur_type->id == TypeTableEntryIdUndefLit) { + if (cur_type->id == TypeTableEntryIdUndefined) { continue; } - if (prev_type->id == TypeTableEntryIdUndefLit) { + if (prev_type->id == TypeTableEntryIdUndefined) { prev_inst = cur_inst; continue; } - if (prev_type->id == TypeTableEntryIdNumLitInt || - prev_type->id == TypeTableEntryIdNumLitFloat) + if (prev_type->id == TypeTableEntryIdComptimeInt || + prev_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, prev_inst, cur_type, false)) { prev_inst = cur_inst; @@ -8304,8 +9117,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - if (cur_type->id == TypeTableEntryIdNumLitInt || - cur_type->id == TypeTableEntryIdNumLitFloat) + if (cur_type->id == TypeTableEntryIdComptimeInt || + cur_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, cur_inst, prev_type, false)) { continue; @@ -8315,8 +9128,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray && - cur_type->data.array.len != prev_type->data.array.len && - types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + cur_type->data.array.len != prev_type->data.array.len && + types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { convert_to_const_slice = true; prev_inst = cur_inst; @@ -8324,8 +9138,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray && - cur_type->data.array.len != prev_type->data.array.len && - types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + cur_type->data.array.len != prev_type->data.array.len && + types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { convert_to_const_slice = true; continue; @@ -8334,8 +9149,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod if (cur_type->id == TypeTableEntryIdArray && is_slice(prev_type) && (prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const || cur_type->data.array.len == 0) && - types_match_const_cast_only(ira, prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type, - cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, + prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type, + cur_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk) { convert_to_const_slice = false; continue; @@ -8344,8 +9160,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod if (prev_type->id == TypeTableEntryIdArray && is_slice(cur_type) && (cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const || prev_type->data.array.len == 0) && - types_match_const_cast_only(ira, cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type, - prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, + cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type, + prev_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk) { prev_inst = cur_inst; convert_to_const_slice = false; @@ -8390,7 +9207,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod if (convert_to_const_slice) { assert(prev_inst->value.type->id == TypeTableEntryIdArray); - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, prev_inst->value.type->data.array.child_type, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra( + ira->codegen, prev_inst->value.type->data.array.child_type, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, prev_inst->value.type->data.array.child_type), + 0, 0); TypeTableEntry *slice_type = get_slice_type(ira->codegen, ptr_type); if (err_set_type != nullptr) { return get_error_union_type(ira->codegen, err_set_type, slice_type); @@ -8400,34 +9221,36 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } else if (err_set_type != nullptr) { if (prev_inst->value.type->id == TypeTableEntryIdErrorSet) { return err_set_type; + } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); + } else if (expected_type != nullptr && expected_type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, expected_type->data.error_union.payload_type); } else { - if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || - prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) + if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt || + prev_inst->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of number literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdNullLit) { + } else if (prev_inst->value.type->id == TypeTableEntryIdNull) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of null literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { - return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); } else { return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type); } } - } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNullLit) { - if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || - prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) + } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNull) { + if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt || + prev_inst->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make maybe out of number literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdMaybe) { + } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) { return prev_inst->value.type; } else { - return get_maybe_type(ira->codegen, prev_inst->value.type); + return get_optional_type(ira->codegen, prev_inst->value.type); } } else { return prev_inst->value.type; @@ -8448,10 +9271,15 @@ static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_ *dest = *src; if (!same_global_refs) { dest->global_refs = global_refs; + if (dest->type->id == TypeTableEntryIdStruct) { + dest->data.x_struct.fields = allocate_nonzero<ConstExprValue>(dest->type->data.structure.src_field_count); + memcpy(dest->data.x_struct.fields, src->data.x_struct.fields, sizeof(ConstExprValue) * dest->type->data.structure.src_field_count); + } } } -static void eval_const_expr_implicit_cast(CastOp cast_op, +static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_instr, + CastOp cast_op, ConstExprValue *other_val, TypeTableEntry *other_type, ConstExprValue *const_val, TypeTableEntry *new_type) { @@ -8462,17 +9290,23 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, case CastOpNoCast: zig_unreachable(); case CastOpErrSet: + case CastOpBitCast: + case CastOpPtrOfArrayToSlice: zig_panic("TODO"); case CastOpNoop: { - copy_const_val(const_val, other_val, other_val->special == ConstValSpecialStatic); + bool same_global_refs = other_val->special == ConstValSpecialStatic; + copy_const_val(const_val, other_val, same_global_refs); const_val->type = new_type; break; } case CastOpNumLitToConcrete: - if (other_val->type->id == TypeTableEntryIdNumLitFloat) { + if (other_val->type->id == TypeTableEntryIdComptimeFloat) { assert(new_type->id == TypeTableEntryIdFloat); switch (new_type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat); + break; case 32: const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat); break; @@ -8485,7 +9319,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, default: zig_unreachable(); } - } else if (other_val->type->id == TypeTableEntryIdNumLitInt) { + } else if (other_val->type->id == TypeTableEntryIdComptimeInt) { bigint_init_bigint(&const_val->data.x_bigint, &other_val->data.x_bigint); } else { zig_unreachable(); @@ -8503,6 +9337,9 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, BigFloat bigfloat; bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint); switch (new_type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = bigfloat_to_f16(&bigfloat); + break; case 32: const_val->data.x_f32 = bigfloat_to_f32(&bigfloat); break; @@ -8520,6 +9357,20 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, } case CastOpFloatToInt: float_init_bigint(&const_val->data.x_bigint, other_val); + if (new_type->id == TypeTableEntryIdInt) { + if (!bigint_fits_in_bits(&const_val->data.x_bigint, new_type->data.integral.bit_count, + new_type->data.integral.is_signed)) + { + Buf *int_buf = buf_alloc(); + bigint_append_buf(int_buf, &const_val->data.x_bigint, 10); + + ir_add_error(ira, source_instr, + buf_sprintf("integer value '%s' cannot be stored in type '%s'", + buf_ptr(int_buf), buf_ptr(&new_type->name))); + return false; + } + } + const_val->special = ConstValSpecialStatic; break; case CastOpBoolToInt: @@ -8527,17 +9378,21 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, const_val->special = ConstValSpecialStatic; break; } + return true; } static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type, CastOp cast_op, bool need_alloca) { - if (value->value.special != ConstValSpecialRuntime && + if ((instr_is_comptime(value) || !type_has_bits(wanted_type)) && cast_op != CastOpResizeSlice && cast_op != CastOpBytesToSlice) { IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type); - eval_const_expr_implicit_cast(cast_op, &value->value, value->value.type, - &result->value, wanted_type); + if (!eval_const_expr_implicit_cast(ira, source_instr, cast_op, &value->value, value->value.type, + &result->value, wanted_type)) + { + return ira->codegen->invalid_instruction; + } return result; } else { IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type, value, cast_op); @@ -8549,17 +9404,69 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst } } +static IrInstruction *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *value, TypeTableEntry *wanted_type) +{ + assert(value->value.type->id == TypeTableEntryIdPointer); + wanted_type = adjust_ptr_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment); + + if (instr_is_comptime(value)) { + ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value); + if (pointee->special != ConstValSpecialRuntime) { + IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, + source_instr->source_node, wanted_type); + result->value.type = wanted_type; + result->value.data.x_ptr.special = ConstPtrSpecialBaseArray; + result->value.data.x_ptr.mut = value->value.data.x_ptr.mut; + result->value.data.x_ptr.data.base_array.array_val = pointee; + result->value.data.x_ptr.data.base_array.elem_index = 0; + result->value.data.x_ptr.data.base_array.is_cstr = false; + return result; + } + } + + IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, + wanted_type, value, CastOpBitCast); + result->value.type = wanted_type; + return result; +} + +static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *value, TypeTableEntry *wanted_type) +{ + wanted_type = adjust_slice_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment); + + if (instr_is_comptime(value)) { + ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value); + if (pointee->special != ConstValSpecialRuntime) { + assert(value->value.type->id == TypeTableEntryIdPointer); + TypeTableEntry *array_type = value->value.type->data.pointer.child_type; + assert(is_slice(wanted_type)); + bool is_const = wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const; + + IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, + source_instr->source_node, wanted_type); + init_const_slice(ira->codegen, &result->value, pointee, 0, array_type->data.array.len, is_const); + result->value.data.x_struct.fields[slice_ptr_index].data.x_ptr.mut = + value->value.data.x_ptr.mut; + result->value.type = wanted_type; + return result; + } + } + + IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, + wanted_type, value, CastOpPtrOfArrayToSlice); + result->value.type = wanted_type; + ir_add_alloca(ira, result, wanted_type); + return result; +} + static bool is_container(TypeTableEntry *type) { return type->id == TypeTableEntryIdStruct || type->id == TypeTableEntryIdEnum || type->id == TypeTableEntryIdUnion; } -static bool is_u8(TypeTableEntry *type) { - return type->id == TypeTableEntryIdInt && - !type->data.integral.is_signed && type->data.integral.bit_count == 8; -} - static IrBasicBlock *ir_get_new_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrInstruction *ref_old_instruction) { assert(old_bb); @@ -8661,26 +9568,9 @@ static TypeTableEntry *ir_finish_anal(IrAnalyze *ira, TypeTableEntry *result_typ } static IrInstruction *ir_get_const(IrAnalyze *ira, IrInstruction *old_instruction) { - IrInstruction *new_instruction; - if (old_instruction->id == IrInstructionIdVarPtr) { - IrInstructionVarPtr *old_var_ptr_instruction = (IrInstructionVarPtr *)old_instruction; - IrInstructionVarPtr *var_ptr_instruction = ir_create_instruction<IrInstructionVarPtr>(&ira->new_irb, - old_instruction->scope, old_instruction->source_node); - var_ptr_instruction->var = old_var_ptr_instruction->var; - new_instruction = &var_ptr_instruction->base; - } else if (old_instruction->id == IrInstructionIdFieldPtr) { - IrInstructionFieldPtr *field_ptr_instruction = ir_create_instruction<IrInstructionFieldPtr>(&ira->new_irb, - old_instruction->scope, old_instruction->source_node); - new_instruction = &field_ptr_instruction->base; - } else if (old_instruction->id == IrInstructionIdElemPtr) { - IrInstructionElemPtr *elem_ptr_instruction = ir_create_instruction<IrInstructionElemPtr>(&ira->new_irb, - old_instruction->scope, old_instruction->source_node); - new_instruction = &elem_ptr_instruction->base; - } else { - IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb, - old_instruction->scope, old_instruction->source_node); - new_instruction = &const_instruction->base; - } + IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb, + old_instruction->scope, old_instruction->source_node); + IrInstruction *new_instruction = &const_instruction->base; new_instruction->value.special = ConstValSpecialStatic; return new_instruction; } @@ -8700,34 +9590,15 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio ConstExprValue *pointee, TypeTableEntry *pointee_type, ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align) { - if (pointee_type->id == TypeTableEntryIdMetaType) { - TypeTableEntry *type_entry = pointee->data.x_type; - if (type_entry->id == TypeTableEntryIdUnreachable) { - ir_add_error(ira, instruction, buf_sprintf("pointer to noreturn not allowed")); - return ira->codegen->invalid_instruction; - } - - IrInstruction *const_instr = ir_get_const(ira, instruction); - ConstExprValue *const_val = &const_instr->value; - const_val->type = pointee_type; - type_ensure_zero_bits_known(ira->codegen, type_entry); - if (type_is_invalid(type_entry)) { - return ira->codegen->invalid_instruction; - } - const_val->data.x_type = get_pointer_to_type_extra(ira->codegen, type_entry, - ptr_is_const, ptr_is_volatile, get_abi_alignment(ira->codegen, type_entry), 0, 0); - return const_instr; - } else { - TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type, - ptr_is_const, ptr_is_volatile, ptr_align, 0, 0); - IrInstruction *const_instr = ir_get_const(ira, instruction); - ConstExprValue *const_val = &const_instr->value; - const_val->type = ptr_type; - const_val->data.x_ptr.special = ConstPtrSpecialRef; - const_val->data.x_ptr.mut = ptr_mut; - const_val->data.x_ptr.data.ref.pointee = pointee; - return const_instr; - } + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type, + ptr_is_const, ptr_is_volatile, PtrLenSingle, ptr_align, 0, 0); + IrInstruction *const_instr = ir_get_const(ira, instruction); + ConstExprValue *const_val = &const_instr->value; + const_val->type = ptr_type; + const_val->data.x_ptr.special = ConstPtrSpecialRef; + const_val->data.x_ptr.mut = ptr_mut; + const_val->data.x_ptr.data.ref.pointee = pointee; + return const_instr; } static TypeTableEntry *ir_analyze_const_ptr(IrAnalyze *ira, IrInstruction *instruction, @@ -8757,6 +9628,9 @@ static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, Un case ConstValSpecialStatic: return &value->value; case ConstValSpecialRuntime: + if (!type_has_bits(value->value.type)) { + return &value->value; + } ir_add_error(ira, value, buf_sprintf("unable to evaluate constant expression")); return nullptr; case ConstValSpecialUndef: @@ -8861,7 +9735,7 @@ static FnTableEntry *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) { } static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) { - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); if (instr_is_comptime(value)) { TypeTableEntry *payload_type = wanted_type->data.maybe.child_type; @@ -8869,21 +9743,25 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc if (type_is_invalid(casted_payload->value.type)) return ira->codegen->invalid_instruction; - ConstExprValue *val = ir_resolve_const(ira, casted_payload, UndefBad); + ConstExprValue *val = ir_resolve_const(ira, casted_payload, UndefOk); if (!val) return ira->codegen->invalid_instruction; IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb, source_instr->scope, source_instr->source_node); - const_instruction->base.value.type = wanted_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_maybe = val; + if (get_codegen_ptr_type(wanted_type) != nullptr) { + copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst); + } else { + const_instruction->base.value.data.x_optional = val; + } + const_instruction->base.value.type = wanted_type; return &const_instruction->base; } IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value); result->value.type = wanted_type; - result->value.data.rh_maybe = RuntimeHintMaybeNonNull; + result->value.data.rh_maybe = RuntimeHintOptionalNonNull; ir_add_alloca(ira, result, wanted_type); return result; } @@ -8930,7 +9808,7 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou if (!val) return ira->codegen->invalid_instruction; - if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) { return ira->codegen->invalid_instruction; } if (!type_is_global_error_set(wanted_type)) { @@ -9025,16 +9903,21 @@ static IrInstruction *ir_analyze_cast_ref(IrAnalyze *ira, IrInstruction *source_ } static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) { - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); assert(instr_is_comptime(value)); ConstExprValue *val = ir_resolve_const(ira, value, UndefBad); assert(val); IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb, source_instr->scope, source_instr->source_node); - const_instruction->base.value.type = wanted_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_maybe = nullptr; + if (get_codegen_ptr_type(wanted_type) != nullptr) { + const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr; + const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0; + } else { + const_instruction->base.value.data.x_optional = nullptr; + } + const_instruction->base.value.type = wanted_type; return &const_instruction->base; } @@ -9044,25 +9927,17 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi if (type_is_invalid(value->value.type)) return ira->codegen->invalid_instruction; - if (value->id == IrInstructionIdLoadPtr) { - IrInstructionLoadPtr *load_ptr_inst = (IrInstructionLoadPtr *) value; - if (load_ptr_inst->ptr->value.type->data.pointer.is_const) { - return load_ptr_inst->ptr; - } - } - if (instr_is_comptime(value)) { ConstExprValue *val = ir_resolve_const(ira, value, UndefOk); if (!val) return ira->codegen->invalid_instruction; - bool final_is_const = (value->value.type->id == TypeTableEntryIdMetaType) ? is_const : true; return ir_get_const_ptr(ira, source_instruction, val, value->value.type, - ConstPtrMutComptimeConst, final_is_const, is_volatile, + ConstPtrMutComptimeConst, is_const, is_volatile, get_abi_alignment(ira->codegen, value->value.type)); } TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, value->value.type, - is_const, is_volatile, get_abi_alignment(ira->codegen, value->value.type), 0, 0); + is_const, is_volatile, PtrLenSingle, get_abi_alignment(ira->codegen, value->value.type), 0, 0); IrInstruction *new_instruction = ir_build_ref(&ira->new_irb, source_instruction->scope, source_instruction->source_node, value, is_const, is_volatile); new_instruction->value.type = ptr_type; @@ -9114,6 +9989,8 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s IrInstruction *result = ir_build_slice(&ira->new_irb, source_instr->scope, source_instr->source_node, array_ptr, start, end, false); result->value.type = wanted_type; + result->value.data.rh_slice.id = RuntimeHintSliceIdLen; + result->value.data.rh_slice.len = array_type->data.array.len; ir_add_alloca(ira, result, result->value.type); return result; @@ -9202,6 +10079,8 @@ static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *so TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag); assert(union_field != nullptr); type_ensure_zero_bits_known(ira->codegen, union_field->type_entry); + if (type_is_invalid(union_field->type_entry)) + return ira->codegen->invalid_instruction; if (!union_field->type_entry->zero_bits) { AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at( union_field->enum_field->decl_index); @@ -9332,7 +10211,7 @@ static IrInstruction *ir_analyze_int_to_enum(IrAnalyze *ira, IrInstruction *sour } IrInstruction *result = ir_build_int_to_enum(&ira->new_irb, source_instr->scope, - source_instr->source_node, target); + source_instr->source_node, nullptr, target); result->value.type = wanted_type; return result; } @@ -9346,9 +10225,9 @@ static IrInstruction *ir_analyze_number_to_literal(IrAnalyze *ira, IrInstruction IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type); - if (wanted_type->id == TypeTableEntryIdNumLitFloat) { + if (wanted_type->id == TypeTableEntryIdComptimeFloat) { float_init_float(&result->value, val); - } else if (wanted_type->id == TypeTableEntryIdNumLitInt) { + } else if (wanted_type->id == TypeTableEntryIdComptimeInt) { bigint_init_bigint(&result->value.data.x_bigint, &val->data.x_bigint); } else { zig_unreachable(); @@ -9371,7 +10250,7 @@ static IrInstruction *ir_analyze_int_to_err(IrAnalyze *ira, IrInstruction *sourc IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type); - if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) { return ira->codegen->invalid_instruction; } @@ -9469,7 +10348,7 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc zig_unreachable(); } if (!type_is_global_error_set(err_set_type)) { - if (!resolve_inferred_error_set(ira, err_set_type, source_instr->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, err_set_type, source_instr->source_node)) { return ira->codegen->invalid_instruction; } if (err_set_type->data.error_set.err_count == 0) { @@ -9501,6 +10380,140 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc return result; } +static IrInstruction *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target, + TypeTableEntry *wanted_type) +{ + assert(wanted_type->id == TypeTableEntryIdPointer); + wanted_type = adjust_ptr_align(ira->codegen, wanted_type, target->value.type->data.pointer.alignment); + TypeTableEntry *array_type = wanted_type->data.pointer.child_type; + assert(array_type->id == TypeTableEntryIdArray); + assert(array_type->data.array.len == 1); + + if (instr_is_comptime(target)) { + ConstExprValue *val = ir_resolve_const(ira, target, UndefBad); + if (!val) + return ira->codegen->invalid_instruction; + + assert(val->type->id == TypeTableEntryIdPointer); + ConstExprValue *pointee = const_ptr_pointee(ira->codegen, val); + if (pointee->special != ConstValSpecialRuntime) { + ConstExprValue *array_val = create_const_vals(1); + array_val->special = ConstValSpecialStatic; + array_val->type = array_type; + array_val->data.x_array.special = ConstArraySpecialNone; + array_val->data.x_array.s_none.elements = pointee; + array_val->data.x_array.s_none.parent.id = ConstParentIdScalar; + array_val->data.x_array.s_none.parent.data.p_scalar.scalar_val = pointee; + + IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb, + source_instr->scope, source_instr->source_node); + const_instruction->base.value.type = wanted_type; + const_instruction->base.value.special = ConstValSpecialStatic; + const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialRef; + const_instruction->base.value.data.x_ptr.data.ref.pointee = array_val; + const_instruction->base.value.data.x_ptr.mut = val->data.x_ptr.mut; + return &const_instruction->base; + } + } + + // pointer to array and pointer to single item are represented the same way at runtime + IrInstruction *result = ir_build_cast(&ira->new_irb, target->scope, target->source_node, + wanted_type, target, CastOpBitCast); + result->value.type = wanted_type; + return result; +} + +static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCastOnly *cast_result, + ErrorMsg *parent_msg) +{ + switch (cast_result->id) { + case ConstCastResultIdOk: + zig_unreachable(); + case ConstCastResultIdOptionalChild: { + ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("optional type child '%s' cannot cast into optional type child '%s'", + buf_ptr(&cast_result->data.optional->actual_child->name), + buf_ptr(&cast_result->data.optional->wanted_child->name))); + report_recursive_error(ira, source_node, &cast_result->data.optional->child, msg); + break; + } + case ConstCastResultIdErrorUnionErrorSet: { + ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("error set '%s' cannot cast into error set '%s'", + buf_ptr(&cast_result->data.error_union_error_set->actual_err_set->name), + buf_ptr(&cast_result->data.error_union_error_set->wanted_err_set->name))); + report_recursive_error(ira, source_node, &cast_result->data.error_union_error_set->child, msg); + break; + } + case ConstCastResultIdErrSet: { + ZigList<ErrorTableEntry *> *missing_errors = &cast_result->data.error_set_mismatch->missing_errors; + for (size_t i = 0; i < missing_errors->length; i += 1) { + ErrorTableEntry *error_entry = missing_errors->at(i); + add_error_note(ira->codegen, parent_msg, error_entry->decl_node, + buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name))); + } + break; + } + case ConstCastResultIdErrSetGlobal: { + add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("cannot cast global error set into smaller set")); + break; + } + case ConstCastResultIdPointerChild: { + ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("pointer type child '%s' cannot cast into pointer type child '%s'", + buf_ptr(&cast_result->data.pointer_mismatch->actual_child->name), + buf_ptr(&cast_result->data.pointer_mismatch->wanted_child->name))); + report_recursive_error(ira, source_node, &cast_result->data.pointer_mismatch->child, msg); + break; + } + case ConstCastResultIdSliceChild: { + ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("slice type child '%s' cannot cast into slice type child '%s'", + buf_ptr(&cast_result->data.slice_mismatch->actual_child->name), + buf_ptr(&cast_result->data.slice_mismatch->wanted_child->name))); + report_recursive_error(ira, source_node, &cast_result->data.slice_mismatch->child, msg); + break; + } + case ConstCastResultIdErrorUnionPayload: { + ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node, + buf_sprintf("error union payload '%s' cannot cast into error union payload '%s'", + buf_ptr(&cast_result->data.error_union_payload->actual_payload->name), + buf_ptr(&cast_result->data.error_union_payload->wanted_payload->name))); + report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg); + break; + } + case ConstCastResultIdType: { + AstNode *wanted_decl_node = type_decl_node(cast_result->data.type_mismatch->wanted_type); + AstNode *actual_decl_node = type_decl_node(cast_result->data.type_mismatch->actual_type); + if (wanted_decl_node != nullptr) { + add_error_note(ira->codegen, parent_msg, wanted_decl_node, + buf_sprintf("%s declared here", + buf_ptr(&cast_result->data.type_mismatch->wanted_type->name))); + } + if (actual_decl_node != nullptr) { + add_error_note(ira->codegen, parent_msg, actual_decl_node, + buf_sprintf("%s declared here", + buf_ptr(&cast_result->data.type_mismatch->actual_type->name))); + } + break; + } + case ConstCastResultIdFnAlign: // TODO + case ConstCastResultIdFnCC: // TODO + case ConstCastResultIdFnVarArgs: // TODO + case ConstCastResultIdFnIsGeneric: // TODO + case ConstCastResultIdFnReturnType: // TODO + case ConstCastResultIdFnArgCount: // TODO + case ConstCastResultIdFnGenericArgCount: // TODO + case ConstCastResultIdFnArg: // TODO + case ConstCastResultIdFnArgNoAlias: // TODO + case ConstCastResultIdUnresolvedInferredErrSet: // TODO + case ConstCastResultIdAsyncAllocatorType: // TODO + case ConstCastResultIdNullWrapPtr: // TODO + break; + } +} + static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr, TypeTableEntry *wanted_type, IrInstruction *value) { @@ -9511,60 +10524,52 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst return ira->codegen->invalid_instruction; } - // explicit match or non-const to const - if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node).id == ConstCastResultIdOk) { + // perfect match or non-const to const + ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type, + source_node, false); + if (const_cast_result.id == ConstCastResultIdOk) { return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false); } - // explicit cast from bool to int + // widening conversion if (wanted_type->id == TypeTableEntryIdInt && - actual_type->id == TypeTableEntryIdBool) - { - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpBoolToInt, false); - } - - // explicit widening or shortening cast - if ((wanted_type->id == TypeTableEntryIdInt && - actual_type->id == TypeTableEntryIdInt) || - (wanted_type->id == TypeTableEntryIdFloat && - actual_type->id == TypeTableEntryIdFloat)) + actual_type->id == TypeTableEntryIdInt && + wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed && + wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count) { return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type); } - // explicit error set cast - if (wanted_type->id == TypeTableEntryIdErrorSet && - actual_type->id == TypeTableEntryIdErrorSet) + // small enough unsigned ints can get casted to large enough signed ints + if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed && + actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed && + wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count) { - return ir_analyze_err_set_cast(ira, source_instr, value, wanted_type); + return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type); } - // explicit cast from int to float + // float widening conversion if (wanted_type->id == TypeTableEntryIdFloat && - actual_type->id == TypeTableEntryIdInt) + actual_type->id == TypeTableEntryIdFloat && + wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count) { - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpIntToFloat, false); + return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type); } - // explicit cast from float to int - if (wanted_type->id == TypeTableEntryIdInt && - actual_type->id == TypeTableEntryIdFloat) - { - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpFloatToInt, false); - } - // explicit cast from [N]T to []const T + // cast from [N]T to []const T if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) { TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry; assert(ptr_type->id == TypeTableEntryIdPointer); if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type); } } - // explicit cast from &const [N]T to []const T + // cast from *const [N]T to []const T if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.is_const && @@ -9576,13 +10581,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst TypeTableEntry *array_type = actual_type->data.pointer.child_type; if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type); } } - // explicit cast from [N]T to &const []const N + // cast from [N]T to *const []const T if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.is_const && is_slice(wanted_type->data.pointer.child_type) && @@ -9592,7 +10598,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry; assert(ptr_type->id == TypeTableEntryIdPointer); if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value); if (type_is_invalid(cast1->value.type)) @@ -9606,8 +10613,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from [N]T to ?[]const N - if (wanted_type->id == TypeTableEntryIdMaybe && + // cast from [N]T to ?[]const T + if (wanted_type->id == TypeTableEntryIdOptional && is_slice(wanted_type->data.maybe.child_type) && actual_type->id == TypeTableEntryIdArray) { @@ -9615,7 +10622,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry; assert(ptr_type->id == TypeTableEntryIdPointer); if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value); if (type_is_invalid(cast1->value.type)) @@ -9629,59 +10637,47 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from []T to []u8 or []u8 to []T - if (is_slice(wanted_type) && is_slice(actual_type)) { - TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry; - TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry; - if ((is_u8(wanted_ptr_type->data.pointer.child_type) || is_u8(actual_ptr_type->data.pointer.child_type)) && - (wanted_ptr_type->data.pointer.is_const || !actual_ptr_type->data.pointer.is_const)) - { - uint32_t src_align_bytes = get_ptr_align(actual_ptr_type); - uint32_t dest_align_bytes = get_ptr_align(wanted_ptr_type); - - if (dest_align_bytes > src_align_bytes) { - ErrorMsg *msg = ir_add_error(ira, source_instr, - buf_sprintf("cast increases pointer alignment")); - add_error_note(ira->codegen, msg, source_instr->source_node, - buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name), src_align_bytes)); - add_error_note(ira->codegen, msg, source_instr->source_node, - buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name), dest_align_bytes)); - return ira->codegen->invalid_instruction; - } - - if (!ir_emit_global_runtime_side_effect(ira, source_instr)) - return ira->codegen->invalid_instruction; - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpResizeSlice, true); - } + // *[N]T to [*]T + if (wanted_type->id == TypeTableEntryIdPointer && + wanted_type->data.pointer.ptr_len == PtrLenUnknown && + actual_type->id == TypeTableEntryIdPointer && + actual_type->data.pointer.ptr_len == PtrLenSingle && + actual_type->data.pointer.child_type->id == TypeTableEntryIdArray && + actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment && + types_match_const_cast_only(ira, wanted_type->data.pointer.child_type, + actual_type->data.pointer.child_type->data.array.child_type, source_node, + !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk) + { + return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type); } - // explicit cast from [N]u8 to []const T + // *[N]T to []T if (is_slice(wanted_type) && - wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const && - actual_type->id == TypeTableEntryIdArray && - is_u8(actual_type->data.array.child_type)) + actual_type->id == TypeTableEntryIdPointer && + actual_type->data.pointer.ptr_len == PtrLenSingle && + actual_type->data.pointer.child_type->id == TypeTableEntryIdArray) { - if (!ir_emit_global_runtime_side_effect(ira, source_instr)) - return ira->codegen->invalid_instruction; - uint64_t child_type_size = type_size(ira->codegen, - wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type); - if (actual_type->data.array.len % child_type_size == 0) { - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpBytesToSlice, true); - } else { - ir_add_error_node(ira, source_instr->source_node, - buf_sprintf("unable to convert %s to %s: size mismatch", - buf_ptr(&actual_type->name), buf_ptr(&wanted_type->name))); - return ira->codegen->invalid_instruction; + TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry; + assert(slice_ptr_type->id == TypeTableEntryIdPointer); + if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type, + actual_type->data.pointer.child_type->data.array.child_type, source_node, + !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk) + { + return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type); } } - // explicit cast from child type of maybe type to maybe type - if (wanted_type->id == TypeTableEntryIdMaybe) { + + // cast from T to ?T + // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism + if (wanted_type->id == TypeTableEntryIdOptional) { TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type; - if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) { + if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, + false).id == ConstCastResultIdOk) + { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); - } else if (actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + } else if (actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); @@ -9704,19 +10700,21 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from null literal to maybe type - if (wanted_type->id == TypeTableEntryIdMaybe && - actual_type->id == TypeTableEntryIdNullLit) + // cast from null literal to maybe type + if (wanted_type->id == TypeTableEntryIdOptional && + actual_type->id == TypeTableEntryIdNull) { return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type); } - // explicit cast from child type of error type to error type + // cast from child type of error type to error type if (wanted_type->id == TypeTableEntryIdErrorUnion) { - if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node).id == ConstCastResultIdOk) { + if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, + source_node, false).id == ConstCastResultIdOk) + { return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); - } else if (actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + } else if (actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) { return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); @@ -9726,7 +10724,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from [N]T to %[]const T + // cast from [N]T to E![]const T if (wanted_type->id == TypeTableEntryIdErrorUnion && is_slice(wanted_type->data.error_union.payload_type) && actual_type->id == TypeTableEntryIdArray) @@ -9735,7 +10733,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry; assert(ptr_type->id == TypeTableEntryIdPointer); if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) && - types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk) + types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, + source_node, false).id == ConstCastResultIdOk) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value); if (type_is_invalid(cast1->value.type)) @@ -9749,23 +10748,23 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from error set to error union type + // cast from error set to error union type if (wanted_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorSet) { return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type); } - // explicit cast from T to %?T + // cast from T to E!?T if (wanted_type->id == TypeTableEntryIdErrorUnion && - wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe && - actual_type->id != TypeTableEntryIdMaybe) + wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional && + actual_type->id != TypeTableEntryIdOptional) { TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type; - if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk || - actual_type->id == TypeTableEntryIdNullLit || - actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk || + actual_type->id == TypeTableEntryIdNull || + actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value); if (type_is_invalid(cast1->value.type)) @@ -9779,11 +10778,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from number literal to another type - // explicit cast from number literal to &const integer - if (actual_type->id == TypeTableEntryIdNumLitFloat || - actual_type->id == TypeTableEntryIdNumLitInt) + // cast from number literal to another type + // cast from number literal to *const integer + if (actual_type->id == TypeTableEntryIdComptimeFloat || + actual_type->id == TypeTableEntryIdComptimeInt) { + ensure_complete_type(ira->codegen, wanted_type); + if (type_is_invalid(wanted_type)) + return ira->codegen->invalid_instruction; if (wanted_type->id == TypeTableEntryIdEnum) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value); if (type_is_invalid(cast1->value.type)) @@ -9808,9 +10810,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst return cast2; } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) { CastOp op; - if ((actual_type->id == TypeTableEntryIdNumLitFloat && + if ((actual_type->id == TypeTableEntryIdComptimeFloat && wanted_type->id == TypeTableEntryIdFloat) || - (actual_type->id == TypeTableEntryIdNumLitInt && + (actual_type->id == TypeTableEntryIdComptimeInt && wanted_type->id == TypeTableEntryIdInt)) { op = CastOpNumLitToConcrete; @@ -9827,41 +10829,16 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from typed number to integer or float literal. + // cast from typed number to integer or float literal. // works when the number is known at compile time if (instr_is_comptime(value) && - ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdNumLitInt) || - (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdNumLitFloat))) + ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) || + (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat))) { return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type); } - // explicit cast from T!void to integer type which can fit it - bool actual_type_is_void_err = actual_type->id == TypeTableEntryIdErrorUnion && - !type_has_bits(actual_type->data.error_union.payload_type); - bool actual_type_is_err_set = actual_type->id == TypeTableEntryIdErrorSet; - if ((actual_type_is_void_err || actual_type_is_err_set) && wanted_type->id == TypeTableEntryIdInt) { - return ir_analyze_err_to_int(ira, source_instr, value, wanted_type); - } - - // explicit cast from integer to error set - if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdInt && - !actual_type->data.integral.is_signed) - { - return ir_analyze_int_to_err(ira, source_instr, value, wanted_type); - } - - // explicit cast from integer to enum type with no payload - if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdEnum) { - return ir_analyze_int_to_enum(ira, source_instr, value, wanted_type); - } - - // explicit cast from enum type with no payload to integer - if (wanted_type->id == TypeTableEntryIdInt && actual_type->id == TypeTableEntryIdEnum) { - return ir_analyze_enum_to_int(ira, source_instr, value, wanted_type); - } - - // explicit cast from union to the enum type of the union + // cast from union to the enum type of the union if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) { type_ensure_zero_bits_known(ira->codegen, actual_type); if (type_is_invalid(actual_type)) @@ -9872,7 +10849,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit enum to union which has the enum as the tag type + // enum to union which has the enum as the tag type if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum && (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum || wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)) @@ -9883,7 +10860,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit enum to &const union which has the enum as the tag type + // enum to &const union which has the enum as the tag type if (actual_type->id == TypeTableEntryIdEnum && wanted_type->id == TypeTableEntryIdPointer) { TypeTableEntry *union_type = wanted_type->data.pointer.child_type; if (union_type->data.unionation.decl_node->data.container_decl.auto_enum || @@ -9904,23 +10881,63 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from undefined to anything - if (actual_type->id == TypeTableEntryIdUndefLit) { + // cast from *T to *[1]T + if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle && + actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle) + { + TypeTableEntry *array_type = wanted_type->data.pointer.child_type; + if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 && + types_match_const_cast_only(ira, array_type->data.array.child_type, + actual_type->data.pointer.child_type, source_node, + !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk) + { + if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) { + ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment")); + add_error_note(ira->codegen, msg, value->source_node, + buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name), + actual_type->data.pointer.alignment)); + add_error_note(ira->codegen, msg, source_instr->source_node, + buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name), + wanted_type->data.pointer.alignment)); + return ira->codegen->invalid_instruction; + } + return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type); + } + } + + // cast from T to *T where T is zero bits + if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle && + types_match_const_cast_only(ira, wanted_type->data.pointer.child_type, + actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk) + { + type_ensure_zero_bits_known(ira->codegen, actual_type); + if (type_is_invalid(actual_type)) { + return ira->codegen->invalid_instruction; + } + if (!type_has_bits(actual_type)) { + return ir_get_ref(ira, source_instr, value, false, false); + } + } + + + // cast from undefined to anything + if (actual_type->id == TypeTableEntryIdUndefined) { return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type); } - // explicit cast from something to const pointer of it + // cast from something to const pointer of it if (!type_requires_comptime(actual_type)) { TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true); - if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) { + if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) { return ir_analyze_cast_ref(ira, source_instr, value, wanted_type); } } - ir_add_error_node(ira, source_instr->source_node, - buf_sprintf("invalid cast from type '%s' to '%s'", - buf_ptr(&actual_type->name), - buf_ptr(&wanted_type->name))); + ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node, + buf_sprintf("expected type '%s', found '%s'", + buf_ptr(&wanted_type->name), + buf_ptr(&actual_type->name))); + report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg); return ira->codegen->invalid_instruction; } @@ -9937,29 +10954,7 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Typ if (value->value.type->id == TypeTableEntryIdUnreachable) return value; - ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, expected_type, value->value.type, value); - switch (result) { - case ImplicitCastMatchResultNo: - ir_add_error(ira, value, - buf_sprintf("expected type '%s', found '%s'", - buf_ptr(&expected_type->name), - buf_ptr(&value->value.type->name))); - return ira->codegen->invalid_instruction; - - case ImplicitCastMatchResultYes: - return ir_analyze_cast(ira, value, expected_type, value); - case ImplicitCastMatchResultReportedError: - return ira->codegen->invalid_instruction; - } - - zig_unreachable(); -} - -static IrInstruction *ir_implicit_byval_const_ref_cast(IrAnalyze *ira, IrInstruction *inst) { - if (type_is_copyable(ira->codegen, inst->value.type)) - return inst; - TypeTableEntry *const_ref_type = get_pointer_to_type(ira->codegen, inst->value.type, true); - return ir_implicit_cast(ira, inst, const_ref_type); + return ir_analyze_cast(ira, value, expected_type, value); } static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr) { @@ -9977,6 +10972,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope, source_instruction->source_node, child_type); copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst); + result->value.type = child_type; return result; } } @@ -9986,21 +10982,6 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc source_instruction->source_node, ptr); load_ptr_instruction->value.type = child_type; return load_ptr_instruction; - } else if (type_entry->id == TypeTableEntryIdMetaType) { - ConstExprValue *ptr_val = ir_resolve_const(ira, ptr, UndefBad); - if (!ptr_val) - return ira->codegen->invalid_instruction; - - TypeTableEntry *ptr_type = ptr_val->data.x_type; - if (ptr_type->id == TypeTableEntryIdPointer) { - TypeTableEntry *child_type = ptr_type->data.pointer.child_type; - return ir_create_const_type(&ira->new_irb, source_instruction->scope, - source_instruction->source_node, child_type); - } else { - ir_add_error(ira, source_instruction, - buf_sprintf("attempt to dereference non pointer type '%s'", buf_ptr(&ptr_type->name))); - return ira->codegen->invalid_instruction; - } } else { ir_add_error_node(ira, source_instruction->source_node, buf_sprintf("attempt to dereference non pointer type '%s'", @@ -10044,11 +11025,11 @@ static bool ir_resolve_align(IrAnalyze *ira, IrInstruction *value, uint32_t *out return true; } -static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) { +static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstruction *value, TypeTableEntry *int_type, uint64_t *out) { if (type_is_invalid(value->value.type)) return false; - IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_usize); + IrInstruction *casted_value = ir_implicit_cast(ira, value, int_type); if (type_is_invalid(casted_value->value.type)) return false; @@ -10060,6 +11041,10 @@ static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out return true; } +static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) { + return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out); +} + static bool ir_resolve_bool(IrAnalyze *ira, IrInstruction *value, bool *out) { if (type_is_invalid(value->value.type)) return false; @@ -10169,7 +11154,9 @@ static Buf *ir_resolve_str(IrAnalyze *ira, IrInstruction *value) { if (type_is_invalid(value->value.type)) return nullptr; - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, ptr_type); IrInstruction *casted_value = ir_implicit_cast(ira, value, str_type); if (type_is_invalid(casted_value->value.type)) @@ -10267,10 +11254,15 @@ static TypeTableEntry *ir_analyze_bin_op_bool(IrAnalyze *ira, IrInstructionBinOp if (casted_op2 == ira->codegen->invalid_instruction) return ira->codegen->builtin_types.entry_invalid; - ConstExprValue *op1_val = &casted_op1->value; - ConstExprValue *op2_val = &casted_op2->value; - if (op1_val->special != ConstValSpecialRuntime && op2_val->special != ConstValSpecialRuntime) { + if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) { ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); + ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; assert(casted_op1->value.type->id == TypeTableEntryIdBool); assert(casted_op2->value.type->id == TypeTableEntryIdBool); @@ -10307,6 +11299,16 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) { } } +static bool optional_value_is_null(ConstExprValue *val) { + assert(val->special == ConstValSpecialStatic); + if (get_codegen_ptr_type(val->type) != nullptr) { + return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr && + val->data.x_ptr.data.hard_coded_addr.addr == 0; + } else { + return val->data.x_optional == nullptr; + } +} + static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) { IrInstruction *op1 = bin_op_instruction->op1->other; IrInstruction *op2 = bin_op_instruction->op2->other; @@ -10315,19 +11317,19 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp IrBinOp op_id = bin_op_instruction->op_id; bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq); if (is_equality_cmp && - ((op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdMaybe) || - (op2->value.type->id == TypeTableEntryIdNullLit && op1->value.type->id == TypeTableEntryIdMaybe) || - (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit))) + ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdOptional) || + (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdOptional) || + (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull))) { - if (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit) { + if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) { ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); out_val->data.x_bool = (op_id == IrBinOpCmpEq); return ira->codegen->builtin_types.entry_bool; } IrInstruction *maybe_op; - if (op1->value.type->id == TypeTableEntryIdNullLit) { + if (op1->value.type->id == TypeTableEntryIdNull) { maybe_op = op2; - } else if (op2->value.type->id == TypeTableEntryIdNullLit) { + } else if (op2->value.type->id == TypeTableEntryIdNull) { maybe_op = op1; } else { zig_unreachable(); @@ -10336,7 +11338,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad); if (!maybe_val) return ira->codegen->builtin_types.entry_invalid; - bool is_null = (maybe_val->data.x_maybe == nullptr); + bool is_null = optional_value_is_null(maybe_val); ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null; return ira->codegen->builtin_types.entry_bool; @@ -10364,7 +11366,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp return ira->codegen->builtin_types.entry_invalid; } - if (!resolve_inferred_error_set(ira, intersect_type, source_node)) { + if (!resolve_inferred_error_set(ira->codegen, intersect_type, source_node)) { return ira->codegen->builtin_types.entry_invalid; } @@ -10410,9 +11412,14 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp } } - ConstExprValue *op1_val = &op1->value; - ConstExprValue *op2_val = &op2->value; - if (value_is_comptime(op1_val) && value_is_comptime(op2_val)) { + if (instr_is_comptime(op1) && instr_is_comptime(op2)) { + ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + bool answer; bool are_equal = op1_val->data.x_err_set->value == op2_val->data.x_err_set->value; if (op_id == IrBinOpCmpEq) { @@ -10435,22 +11442,23 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp } IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; type_ensure_zero_bits_known(ira->codegen, resolved_type); if (type_is_invalid(resolved_type)) return resolved_type; - + bool operator_allowed; switch (resolved_type->id) { case TypeTableEntryIdInvalid: zig_unreachable(); // handled above - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: + operator_allowed = true; break; case TypeTableEntryIdBool: @@ -10465,32 +11473,27 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: - if (!is_equality_cmp) { - ir_add_error_node(ira, source_node, - buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name))); - return ira->codegen->builtin_types.entry_invalid; - } - break; - case TypeTableEntryIdEnum: - if (!is_equality_cmp) { - ir_add_error_node(ira, source_node, - buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name))); - return ira->codegen->builtin_types.entry_invalid; - } + operator_allowed = is_equality_cmp; break; case TypeTableEntryIdUnreachable: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdUnion: - ir_add_error_node(ira, source_node, - buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name))); - return ira->codegen->builtin_types.entry_invalid; + operator_allowed = false; + break; + case TypeTableEntryIdOptional: + operator_allowed = is_equality_cmp && get_codegen_ptr_type(resolved_type) != nullptr; + break; + } + if (!operator_allowed) { + ir_add_error_node(ira, source_node, + buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name))); + return ira->codegen->builtin_types.entry_invalid; } IrInstruction *casted_op1 = ir_implicit_cast(ira, op1, resolved_type); @@ -10501,15 +11504,20 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp if (casted_op2 == ira->codegen->invalid_instruction) return ira->codegen->builtin_types.entry_invalid; - ConstExprValue *op1_val = &casted_op1->value; - ConstExprValue *op2_val = &casted_op2->value; bool one_possible_value = !type_requires_comptime(resolved_type) && !type_has_bits(resolved_type); - if (one_possible_value || (value_is_comptime(op1_val) && value_is_comptime(op2_val))) { + if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) { + ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *op2_val = one_possible_value ? &casted_op2->value : ir_resolve_const(ira, casted_op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + bool answer; - if (resolved_type->id == TypeTableEntryIdNumLitFloat || resolved_type->id == TypeTableEntryIdFloat) { + if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdFloat) { Cmp cmp_result = float_cmp(op1_val, op2_val); answer = resolve_cmp_op_id(op_id, cmp_result); - } else if (resolved_type->id == TypeTableEntryIdNumLitInt || resolved_type->id == TypeTableEntryIdInt) { + } else if (resolved_type->id == TypeTableEntryIdComptimeInt || resolved_type->id == TypeTableEntryIdInt) { Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint); answer = resolve_cmp_op_id(op_id, cmp_result); } else { @@ -10532,11 +11540,17 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp if (resolved_type->id == TypeTableEntryIdInt && !resolved_type->data.integral.is_signed) { ConstExprValue *known_left_val; IrBinOp flipped_op_id; - if (value_is_comptime(op1_val)) { - known_left_val = op1_val; + if (instr_is_comptime(casted_op1)) { + known_left_val = ir_resolve_const(ira, casted_op1, UndefBad); + if (known_left_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + flipped_op_id = op_id; - } else if (value_is_comptime(op2_val)) { - known_left_val = op2_val; + } else if (instr_is_comptime(casted_op2)) { + known_left_val = ir_resolve_const(ira, casted_op2, UndefBad); + if (known_left_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + if (op_id == IrBinOpCmpLessThan) { flipped_op_id = IrBinOpCmpGreaterThan; } else if (op_id == IrBinOpCmpGreaterThan) { @@ -10573,12 +11587,12 @@ static int ir_eval_math_op(TypeTableEntry *type_entry, ConstExprValue *op1_val, bool is_int; bool is_float; Cmp op2_zcmp; - if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdNumLitInt) { + if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdComptimeInt) { is_int = true; is_float = false; op2_zcmp = bigint_cmp_zero(&op2_val->data.x_bigint); } else if (type_entry->id == TypeTableEntryIdFloat || - type_entry->id == TypeTableEntryIdNumLitFloat) + type_entry->id == TypeTableEntryIdComptimeFloat) { is_int = false; is_float = true; @@ -10752,7 +11766,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * if (type_is_invalid(op1->value.type)) return ira->codegen->builtin_types.entry_invalid; - if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdNumLitInt) { + if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, &bin_op_instruction->base, buf_sprintf("bit shifting operation expected integer type, found '%s'", buf_ptr(&op1->value.type->name))); @@ -10765,7 +11779,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * IrInstruction *casted_op2; IrBinOp op_id = bin_op_instruction->op_id; - if (op1->value.type->id == TypeTableEntryIdNumLitInt) { + if (op1->value.type->id == TypeTableEntryIdComptimeInt) { casted_op2 = op2; if (op_id == IrBinOpBitShiftLeftLossy) { @@ -10781,6 +11795,26 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * } else { TypeTableEntry *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen, op1->value.type->data.integral.bit_count - 1); + if (bin_op_instruction->op_id == IrBinOpBitShiftLeftLossy && + op2->value.type->id == TypeTableEntryIdComptimeInt) { + if (!bigint_fits_in_bits(&op2->value.data.x_bigint, + shift_amt_type->data.integral.bit_count, + op2->value.data.x_bigint.is_negative)) { + Buf *val_buf = buf_alloc(); + bigint_append_buf(val_buf, &op2->value.data.x_bigint, 10); + ErrorMsg* msg = ir_add_error(ira, + &bin_op_instruction->base, + buf_sprintf("RHS of shift is too large for LHS type")); + add_error_note( + ira->codegen, + msg, + op2->source_node, + buf_sprintf("value %s cannot fit into type %s", + buf_ptr(val_buf), + buf_ptr(&shift_amt_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + } casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type); if (casted_op2 == ira->codegen->invalid_instruction) @@ -10788,8 +11822,14 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * } if (instr_is_comptime(op1) && instr_is_comptime(casted_op2)) { - ConstExprValue *op1_val = &op1->value; - ConstExprValue *op2_val = &casted_op2->value; + ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base); ir_link_new_instruction(result_instruction, &bin_op_instruction->base); ConstExprValue *out_val = &result_instruction->value; @@ -10810,7 +11850,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * ir_num_lit_fits_in_other_type(ira, result_instruction, op1->value.type, false); return op1->value.type; - } else if (op1->value.type->id == TypeTableEntryIdNumLitInt) { + } else if (op1->value.type->id == TypeTableEntryIdComptimeInt) { ir_add_error(ira, &bin_op_instruction->base, buf_sprintf("LHS of shift must be an integer type, or RHS must be compile-time known")); return ira->codegen->builtin_types.entry_invalid; @@ -10823,22 +11863,44 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) { IrInstruction *op1 = bin_op_instruction->op1->other; + if (type_is_invalid(op1->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *op2 = bin_op_instruction->op2->other; + if (type_is_invalid(op2->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrBinOp op_id = bin_op_instruction->op_id; + + // look for pointer math + if (op1->value.type->id == TypeTableEntryIdPointer && op1->value.type->data.pointer.ptr_len == PtrLenUnknown && + (op_id == IrBinOpAdd || op_id == IrBinOpSub)) + { + IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, ira->codegen->builtin_types.entry_usize); + if (casted_op2 == ira->codegen->invalid_instruction) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_bin_op(&ira->new_irb, bin_op_instruction->base.scope, + bin_op_instruction->base.source_node, op_id, op1, casted_op2, true); + result->value.type = op1->value.type; + ir_link_new_instruction(result, &bin_op_instruction->base); + return result->value.type; + } + IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; - IrBinOp op_id = bin_op_instruction->op_id; - bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdNumLitInt; - bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdNumLitFloat; + bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdComptimeInt; + bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdComptimeFloat; bool is_signed_div = ( (resolved_type->id == TypeTableEntryIdInt && resolved_type->data.integral.is_signed) || resolved_type->id == TypeTableEntryIdFloat || - (resolved_type->id == TypeTableEntryIdNumLitFloat && + (resolved_type->id == TypeTableEntryIdComptimeFloat && ((bigfloat_cmp_zero(&op1->value.data.x_bigfloat) != CmpGT) != (bigfloat_cmp_zero(&op2->value.data.x_bigfloat) != CmpGT))) || - (resolved_type->id == TypeTableEntryIdNumLitInt && + (resolved_type->id == TypeTableEntryIdComptimeInt && ((bigint_cmp_zero(&op1->value.data.x_bigint) != CmpGT) != (bigint_cmp_zero(&op2->value.data.x_bigint) != CmpGT))) ); @@ -10846,7 +11908,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp if (is_signed_div) { bool ok = false; if (instr_is_comptime(op1) && instr_is_comptime(op2)) { - if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) { + ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + + if (bigint_cmp_zero(&op2_val->data.x_bigint) == CmpEQ) { // the division by zero error will be caught later, but we don't have a // division function ambiguity problem. op_id = IrBinOpDivTrunc; @@ -10854,8 +11924,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp } else { BigInt trunc_result; BigInt floor_result; - bigint_div_trunc(&trunc_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint); - bigint_div_floor(&floor_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint); + bigint_div_trunc(&trunc_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint); + bigint_div_floor(&floor_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint); if (bigint_cmp(&trunc_result, &floor_result) == CmpEQ) { ok = true; op_id = IrBinOpDivTrunc; @@ -10876,7 +11946,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp if (is_signed_div && (is_int || is_float)) { bool ok = false; if (instr_is_comptime(op1) && instr_is_comptime(op2)) { + ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + if (is_int) { + ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) { // the division by zero error will be caught later, but we don't // have a remainder function ambiguity problem @@ -10884,14 +11962,19 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp } else { BigInt rem_result; BigInt mod_result; - bigint_rem(&rem_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint); - bigint_mod(&mod_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint); + bigint_rem(&rem_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint); + bigint_mod(&mod_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint); ok = bigint_cmp(&rem_result, &mod_result) == CmpEQ; } } else { IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, resolved_type); if (casted_op2 == ira->codegen->invalid_instruction) return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + if (float_cmp_zero(&casted_op2->value) == CmpEQ) { // the division by zero error will be caught later, but we don't // have a remainder function ambiguity problem @@ -10899,8 +11982,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp } else { ConstExprValue rem_result; ConstExprValue mod_result; - float_rem(&rem_result, &op1->value, &casted_op2->value); - float_mod(&mod_result, &op1->value, &casted_op2->value); + float_rem(&rem_result, op1_val, op2_val); + float_mod(&mod_result, op1_val, op2_val); ok = float_cmp(&rem_result, &mod_result) == CmpEQ; } } @@ -10939,7 +12022,7 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp return ira->codegen->builtin_types.entry_invalid; } - if (resolved_type->id == TypeTableEntryIdNumLitInt) { + if (resolved_type->id == TypeTableEntryIdComptimeInt) { if (op_id == IrBinOpAddWrap) { op_id = IrBinOpAdd; } else if (op_id == IrBinOpSubWrap) { @@ -10958,8 +12041,13 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp return ira->codegen->builtin_types.entry_invalid; if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) { - ConstExprValue *op1_val = &casted_op1->value; - ConstExprValue *op2_val = &casted_op2->value; + ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad); + if (op1_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad); + if (op2_val == nullptr) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base); ir_link_new_instruction(result_instruction, &bin_op_instruction->base); ConstExprValue *out_val = &result_instruction->value; @@ -11101,7 +12189,8 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp * out_array_val = out_val; } else if (is_slice(op1_type) || is_slice(op2_type)) { - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, child_type, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, child_type, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0); result_type = get_slice_type(ira->codegen, ptr_type); out_array_val = create_const_vals(1); out_array_val->special = ConstValSpecialStatic; @@ -11121,7 +12210,9 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp * } else { new_len += 1; // null byte - result_type = get_pointer_to_type(ira->codegen, child_type, true); + // TODO make this `[*]null T` instead of `[*]T` + result_type = get_pointer_to_type_extra(ira->codegen, child_type, true, false, + PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0); out_array_val = create_const_vals(1); out_array_val->special = ConstValSpecialStatic; @@ -11131,9 +12222,16 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp * out_val->data.x_ptr.data.base_array.array_val = out_array_val; out_val->data.x_ptr.data.base_array.elem_index = 0; } - out_array_val->data.x_array.s_none.elements = create_const_vals(new_len); + if (op1_array_val->data.x_array.special == ConstArraySpecialUndef && + op2_array_val->data.x_array.special == ConstArraySpecialUndef) { + out_array_val->data.x_array.special = ConstArraySpecialUndef; + return result_type; + } + + out_array_val->data.x_array.s_none.elements = create_const_vals(new_len); expand_undef_array(ira->codegen, op1_array_val); + expand_undef_array(ira->codegen, op2_array_val); size_t next_index = 0; for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) { @@ -11185,10 +12283,14 @@ static TypeTableEntry *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp } ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + if (array_val->data.x_array.special == ConstArraySpecialUndef) { + out_val->data.x_array.special = ConstArraySpecialUndef; - out_val->data.x_array.s_none.elements = create_const_vals(new_array_len); + TypeTableEntry *child_type = array_type->data.array.child_type; + return get_array_type(ira->codegen, child_type, new_array_len); + } - expand_undef_array(ira->codegen, array_val); + out_val->data.x_array.s_none.elements = create_const_vals(new_array_len); uint64_t i = 0; for (uint64_t x = 0; x < mult_amt; x += 1) { @@ -11220,11 +12322,11 @@ static TypeTableEntry *ir_analyze_merge_error_sets(IrAnalyze *ira, IrInstruction return ira->codegen->builtin_types.entry_type; } - if (!resolve_inferred_error_set(ira, op1_type, instruction->op1->other->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, op1_type, instruction->op1->other->source_node)) { return ira->codegen->builtin_types.entry_invalid; } - if (!resolve_inferred_error_set(ira, op2_type, instruction->op2->other->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, op2_type, instruction->op2->other->source_node)) { return ira->codegen->builtin_types.entry_invalid; } @@ -11290,61 +12392,6 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi zig_unreachable(); } -enum VarClassRequired { - VarClassRequiredAny, - VarClassRequiredConst, - VarClassRequiredIllegal, -}; - -static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { - switch (type_entry->id) { - case TypeTableEntryIdInvalid: - zig_unreachable(); - case TypeTableEntryIdUnreachable: - return VarClassRequiredIllegal; - case TypeTableEntryIdBool: - case TypeTableEntryIdInt: - case TypeTableEntryIdFloat: - case TypeTableEntryIdVoid: - case TypeTableEntryIdErrorSet: - case TypeTableEntryIdFn: - case TypeTableEntryIdPromise: - return VarClassRequiredAny; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdBlock: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdOpaque: - case TypeTableEntryIdMetaType: - case TypeTableEntryIdNamespace: - case TypeTableEntryIdBoundFn: - case TypeTableEntryIdArgTuple: - return VarClassRequiredConst; - - case TypeTableEntryIdPointer: - if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) { - return VarClassRequiredAny; - } else { - return get_var_class_required(type_entry->data.pointer.child_type); - } - case TypeTableEntryIdArray: - return get_var_class_required(type_entry->data.array.child_type); - case TypeTableEntryIdMaybe: - return get_var_class_required(type_entry->data.maybe.child_type); - case TypeTableEntryIdErrorUnion: - return get_var_class_required(type_entry->data.error_union.payload_type); - - case TypeTableEntryIdStruct: - case TypeTableEntryIdEnum: - case TypeTableEntryIdUnion: - // TODO check the fields of these things and make sure that they don't recursively - // contain any of the other variable classes - return VarClassRequiredAny; - } - zig_unreachable(); -} - static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) { VariableTableEntry *var = decl_var_instruction->var; @@ -11379,36 +12426,41 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc if (type_is_invalid(result_type)) { result_type = ira->codegen->builtin_types.entry_invalid; } else { - switch (get_var_class_required(result_type)) { - case VarClassRequiredIllegal: + type_ensure_zero_bits_known(ira->codegen, result_type); + if (type_is_invalid(result_type)) { + result_type = ira->codegen->builtin_types.entry_invalid; + } + } + + if (!type_is_invalid(result_type)) { + if (result_type->id == TypeTableEntryIdUnreachable || + result_type->id == TypeTableEntryIdOpaque) + { + ir_add_error_node(ira, source_node, + buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name))); + result_type = ira->codegen->builtin_types.entry_invalid; + } else if (type_requires_comptime(result_type)) { + var_class_requires_const = true; + if (!var->gen_is_const && !is_comptime_var) { ir_add_error_node(ira, source_node, - buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name))); + buf_sprintf("variable of type '%s' must be const or comptime", + buf_ptr(&result_type->name))); result_type = ira->codegen->builtin_types.entry_invalid; - break; - case VarClassRequiredConst: + } + } else { + if (casted_init_value->value.special == ConstValSpecialStatic && + casted_init_value->value.type->id == TypeTableEntryIdFn && + casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) + { var_class_requires_const = true; if (!var->src_is_const && !is_comptime_var) { - ir_add_error_node(ira, source_node, - buf_sprintf("variable of type '%s' must be const or comptime", - buf_ptr(&result_type->name))); + ErrorMsg *msg = ir_add_error_node(ira, source_node, + buf_sprintf("functions marked inline must be stored in const or comptime var")); + AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node; + add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); result_type = ira->codegen->builtin_types.entry_invalid; } - break; - case VarClassRequiredAny: - if (casted_init_value->value.special == ConstValSpecialStatic && - casted_init_value->value.type->id == TypeTableEntryIdFn && - casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) - { - var_class_requires_const = true; - if (!var->src_is_const && !is_comptime_var) { - ErrorMsg *msg = ir_add_error_node(ira, source_node, - buf_sprintf("functions marked inline must be stored in const or comptime var")); - AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node; - add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); - result_type = ira->codegen->builtin_types.entry_invalid; - } - } - break; + } } } @@ -11432,7 +12484,8 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc if (var->mem_slot_index != SIZE_MAX) { assert(var->mem_slot_index < ira->exec_context.mem_slot_count); ConstExprValue *mem_slot = &ira->exec_context.mem_slot_list[var->mem_slot_index]; - *mem_slot = casted_init_value->value; + copy_const_val(mem_slot, &casted_init_value->value, + !is_comptime_var || var->gen_is_const); if (is_comptime_var || (var_class_requires_const && var->gen_is_const)) { ir_build_const_from(ira, &decl_var_instruction->base); @@ -11578,11 +12631,11 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdMetaType: case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -11602,11 +12655,11 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: zig_panic("TODO export const value of type %s", buf_ptr(&target->value.type->name)); @@ -11633,22 +12686,24 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) { static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, IrInstructionErrorReturnTrace *instruction) { - if (instruction->nullable == IrInstructionErrorReturnTrace::Null) { + if (instruction->optional == IrInstructionErrorReturnTrace::Null) { TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); - TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); + TypeTableEntry *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_maybe = nullptr; - return nullable_type; + assert(get_codegen_ptr_type(optional_type) != nullptr); + out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr; + out_val->data.x_ptr.data.hard_coded_addr.addr = 0; + return optional_type; } IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, instruction->nullable); + instruction->base.source_node, instruction->optional); ir_link_new_instruction(new_instruction, &instruction->base); - return nullable_type; + return optional_type; } else { assert(ira->codegen->have_err_ret_tracing); IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, instruction->nullable); + instruction->base.source_node, instruction->optional); ir_link_new_instruction(new_instruction, &instruction->base); return get_ptr_to_stack_trace_type(ira->codegen); } @@ -11706,7 +12761,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i { VariableTableEntry *coro_allocator_var = ira->old_irb.exec->coro_allocator_var; assert(coro_allocator_var != nullptr); - IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var, true, false); + IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var); IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst); assert(result->value.type != nullptr); return result; @@ -11749,7 +12804,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst); + fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, nullptr); result->value.type = async_return_type; return result; } @@ -11799,7 +12854,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod IrInstruction *casted_arg; if (is_var_args) { arg_part_of_generic_id = true; - casted_arg = ir_implicit_byval_const_ref_cast(ira, arg); + casted_arg = arg; } else { if (param_decl_node->data.param_decl.var_token == nullptr) { AstNode *param_type_node = param_decl_node->data.param_decl.type; @@ -11812,12 +12867,12 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod return false; } else { arg_part_of_generic_id = true; - casted_arg = ir_implicit_byval_const_ref_cast(ira, arg); + casted_arg = arg; } } bool comptime_arg = param_decl_node->data.param_decl.is_inline || - casted_arg->value.type->id == TypeTableEntryIdNumLitInt || casted_arg->value.type->id == TypeTableEntryIdNumLitFloat; + casted_arg->value.type->id == TypeTableEntryIdComptimeInt || casted_arg->value.type->id == TypeTableEntryIdComptimeFloat; ConstExprValue *arg_val; @@ -11835,6 +12890,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod } Buf *param_name = param_decl_node->data.param_decl.name; + if (!param_name) return false; if (!is_var_args) { VariableTableEntry *var = add_variable(ira->codegen, param_decl_node, *child_scope, param_name, true, arg_val, nullptr); @@ -11842,11 +12898,11 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod var->shadowable = !comptime_arg; *next_proto_i += 1; - } else if (casted_arg->value.type->id == TypeTableEntryIdNumLitInt || - casted_arg->value.type->id == TypeTableEntryIdNumLitFloat) + } else if (casted_arg->value.type->id == TypeTableEntryIdComptimeInt || + casted_arg->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error(ira, casted_arg, - buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/zig-lang/zig/issues/557")); + buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557")); return false; } @@ -11887,7 +12943,7 @@ static VariableTableEntry *get_fn_var_by_index(FnTableEntry *fn_entry, size_t in } static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, - VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr) + VariableTableEntry *var) { if (var->mem_slot_index != SIZE_MAX && var->owner_exec->analysis == nullptr) { assert(ira->codegen->errors.length != 0); @@ -11913,8 +12969,8 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, } } - bool is_const = (var->value->type->id == TypeTableEntryIdMetaType) ? is_const_ptr : var->src_is_const; - bool is_volatile = (var->value->type->id == TypeTableEntryIdMetaType) ? is_volatile_ptr : false; + bool is_const = var->src_is_const; + bool is_volatile = false; if (mem_slot != nullptr) { switch (mem_slot->special) { case ConstValSpecialRuntime: @@ -11940,9 +12996,9 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, no_mem_slot: IrInstruction *var_ptr_instruction = ir_build_var_ptr(&ira->new_irb, - instruction->scope, instruction->source_node, var, is_const, is_volatile); + instruction->scope, instruction->source_node, var); var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type, - var->src_is_const, is_volatile, var->align_bytes, 0, 0); + var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0); type_ensure_zero_bits_known(ira->codegen, var->value->type); bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr); @@ -11961,14 +13017,22 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal // for extern functions, the var args argument is not counted. // for zig functions, it is. size_t var_args_1_or_0; - if (fn_type_id->cc == CallingConventionUnspecified) { - var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0; - } else { + if (fn_type_id->cc == CallingConventionC) { var_args_1_or_0 = 0; + } else { + var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0; } size_t src_param_count = fn_type_id->param_count - var_args_1_or_0; size_t call_param_count = call_instruction->arg_count + first_arg_1_or_0; + for (size_t i = 0; i < call_instruction->arg_count; i += 1) { + ConstExprValue *arg_tuple_value = &call_instruction->args[i]->other->value; + if (arg_tuple_value->type->id == TypeTableEntryIdArgTuple) { + call_param_count -= 1; + call_param_count += arg_tuple_value->data.x_arg_tuple.end_index - + arg_tuple_value->data.x_arg_tuple.start_index; + } + } AstNode *source_node = call_instruction->base.source_node; AstNode *fn_proto_node = fn_entry ? fn_entry->proto_node : nullptr;; @@ -12031,9 +13095,18 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal size_t next_proto_i = 0; if (first_arg_ptr) { - IrInstruction *first_arg; assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer); - if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { + + bool first_arg_known_bare = false; + if (fn_type_id->next_param_index >= 1) { + TypeTableEntry *param_type = fn_type_id->param_info[next_proto_i].type; + if (type_is_invalid(param_type)) + return ira->codegen->builtin_types.entry_invalid; + first_arg_known_bare = param_type->id != TypeTableEntryIdPointer; + } + + IrInstruction *first_arg; + if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { first_arg = first_arg_ptr; } else { first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); @@ -12047,7 +13120,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (fn_proto_node->data.fn_proto.is_var_args) { ir_add_error(ira, &call_instruction->base, - buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/zig-lang/zig/issues/313")); + buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/ziglang/zig/issues/313")); return ira->codegen->builtin_types.entry_invalid; } @@ -12119,17 +13192,27 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ir_finish_anal(ira, return_type); } + IrInstruction *casted_new_stack = nullptr; + if (call_instruction->new_stack != nullptr) { + TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); + TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr); + IrInstruction *new_stack = call_instruction->new_stack->other; + if (type_is_invalid(new_stack->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + casted_new_stack = ir_implicit_cast(ira, new_stack, u8_slice); + if (type_is_invalid(casted_new_stack->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + if (fn_type->data.fn.is_generic) { if (!fn_entry) { ir_add_error(ira, call_instruction->fn_ref, buf_sprintf("calling a generic function requires compile-time known function value")); return ira->codegen->builtin_types.entry_invalid; } - if (call_instruction->is_async && fn_type_id->is_var_args) { - ir_add_error(ira, call_instruction->fn_ref, - buf_sprintf("compiler bug: TODO: implement var args async functions. https://github.com/zig-lang/zig/issues/557")); - return ira->codegen->builtin_types.entry_invalid; - } // Count the arguments of the function type id we are creating size_t new_fn_arg_count = first_arg_1_or_0; @@ -12168,9 +13251,18 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal size_t next_proto_i = 0; if (first_arg_ptr) { - IrInstruction *first_arg; assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer); - if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { + + bool first_arg_known_bare = false; + if (fn_type_id->next_param_index >= 1) { + TypeTableEntry *param_type = fn_type_id->param_info[next_proto_i].type; + if (type_is_invalid(param_type)) + return ira->codegen->builtin_types.entry_invalid; + first_arg_known_bare = param_type->id != TypeTableEntryIdPointer; + } + + IrInstruction *first_arg; + if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { first_arg = first_arg_ptr; } else { first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); @@ -12195,25 +13287,25 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (type_is_invalid(arg->value.type)) return ira->codegen->builtin_types.entry_invalid; - AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i); - assert(param_decl_node->type == NodeTypeParamDecl); - bool is_var_args = param_decl_node->data.param_decl.is_var_args; - if (is_var_args && !found_first_var_arg) { - first_var_arg = inst_fn_type_id.param_count; - found_first_var_arg = true; - } - if (arg->value.type->id == TypeTableEntryIdArgTuple) { for (size_t arg_tuple_i = arg->value.data.x_arg_tuple.start_index; arg_tuple_i < arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1) { + AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i); + assert(param_decl_node->type == NodeTypeParamDecl); + bool is_var_args = param_decl_node->data.param_decl.is_var_args; + if (is_var_args && !found_first_var_arg) { + first_var_arg = inst_fn_type_id.param_count; + found_first_var_arg = true; + } + VariableTableEntry *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i); if (arg_var == nullptr) { ir_add_error(ira, arg, - buf_sprintf("compiler bug: var args can't handle void. https://github.com/zig-lang/zig/issues/557")); + buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557")); return ira->codegen->builtin_types.entry_invalid; } - IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var, true, false); + IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var); if (type_is_invalid(arg_var_ptr_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; @@ -12227,10 +13319,20 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ira->codegen->builtin_types.entry_invalid; } } - } else if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &impl_fn->child_scope, - &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn)) - { - return ira->codegen->builtin_types.entry_invalid; + } else { + AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i); + assert(param_decl_node->type == NodeTypeParamDecl); + bool is_var_args = param_decl_node->data.param_decl.is_var_args; + if (is_var_args && !found_first_var_arg) { + first_var_arg = inst_fn_type_id.param_count; + found_first_var_arg = true; + } + + if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &impl_fn->child_scope, + &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn)) + { + return ira->codegen->builtin_types.entry_invalid; + } } } @@ -12273,6 +13375,10 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal inst_fn_type_id.return_type = specified_return_type; } + type_ensure_zero_bits_known(ira->codegen, specified_return_type); + if (type_is_invalid(specified_return_type)) + return ira->codegen->builtin_types.entry_invalid; + if (type_requires_comptime(specified_return_type)) { // Throw out our work and call the function as if it were comptime. return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto); @@ -12299,10 +13405,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ira->codegen->builtin_types.entry_invalid; } if (inst_fn_type_id.async_allocator_type == nullptr) { - IrInstruction *casted_inst = ir_implicit_byval_const_ref_cast(ira, uncasted_async_allocator_inst); - if (type_is_invalid(casted_inst->value.type)) - return ira->codegen->builtin_types.entry_invalid; - inst_fn_type_id.async_allocator_type = casted_inst->value.type; + inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type; } async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type); if (type_is_invalid(async_allocator_inst->value.type)) @@ -12323,6 +13426,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal impl_fn->ir_executable.parent_exec = ira->new_irb.exec; impl_fn->analyzed_executable.source_node = call_instruction->base.source_node; impl_fn->analyzed_executable.parent_exec = ira->new_irb.exec; + impl_fn->analyzed_executable.backward_branch_quota = ira->new_irb.exec->backward_branch_quota; impl_fn->analyzed_executable.is_generic_instantiation = true; ira->codegen->fn_defs.append(impl_fn); @@ -12345,7 +13449,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal assert(async_allocator_inst == nullptr); IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, - call_instruction->is_async, nullptr); + call_instruction->is_async, nullptr, casted_new_stack); ir_add_alloca(ira, new_call_instruction, return_type); @@ -12363,9 +13467,16 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction **casted_args = allocate<IrInstruction *>(call_param_count); size_t next_arg_index = 0; if (first_arg_ptr) { - IrInstruction *first_arg; assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer); - if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { + + TypeTableEntry *param_type = fn_type_id->param_info[next_arg_index].type; + if (type_is_invalid(param_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *first_arg; + if (param_type->id == TypeTableEntryIdPointer && + handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) + { first_arg = first_arg_ptr; } else { first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); @@ -12373,10 +13484,6 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ira->codegen->builtin_types.entry_invalid; } - TypeTableEntry *param_type = fn_type_id->param_info[next_arg_index].type; - if (type_is_invalid(param_type)) - return ira->codegen->builtin_types.entry_invalid; - IrInstruction *casted_arg = ir_implicit_cast(ira, first_arg, param_type); if (type_is_invalid(casted_arg->value.type)) return ira->codegen->builtin_types.entry_invalid; @@ -12436,7 +13543,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); + fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, casted_new_stack); ir_add_alloca(ira, new_call_instruction, return_type); return ir_finish_anal(ira, return_type); @@ -12474,6 +13581,8 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction return ir_finish_anal(ira, cast_instruction->value.type); } else if (fn_ref->value.type->id == TypeTableEntryIdFn) { FnTableEntry *fn_table_entry = ir_resolve_fn(ira, fn_ref); + if (fn_table_entry == nullptr) + return ira->codegen->builtin_types.entry_invalid; return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry, fn_ref, nullptr, is_comptime, call_instruction->fn_inline); } else if (fn_ref->value.type->id == TypeTableEntryIdBoundFn) { @@ -12481,7 +13590,7 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction FnTableEntry *fn_table_entry = fn_ref->value.data.x_bound_fn.fn; IrInstruction *first_arg_ptr = fn_ref->value.data.x_bound_fn.first_arg; return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry, - nullptr, first_arg_ptr, is_comptime, call_instruction->fn_inline); + fn_ref, first_arg_ptr, is_comptime, call_instruction->fn_inline); } else { ir_add_error_node(ira, fn_ref->source_node, buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value.type->name))); @@ -12507,6 +13616,12 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp if (type_is_invalid(ptr_type)) { return ira->codegen->builtin_types.entry_invalid; } else if (ptr_type->id == TypeTableEntryIdPointer) { + if (ptr_type->data.pointer.ptr_len == PtrLenUnknown) { + ir_add_error_node(ira, un_op_instruction->base.source_node, + buf_sprintf("index syntax required for unknown-length pointer type '%s'", + buf_ptr(&ptr_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } child_type = ptr_type->data.pointer.child_type; } else { ir_add_error_node(ira, un_op_instruction->base.source_node, @@ -12519,7 +13634,11 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp // one of the ptr instructions if (instr_is_comptime(value)) { - ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value); + ConstExprValue *comptime_value = ir_resolve_const(ira, value, UndefBad); + if (comptime_value == nullptr) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *pointee = const_ptr_pointee(ira->codegen, comptime_value); if (pointee->type == child_type) { ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base); copy_const_val(out_val, pointee, value->value.data.x_ptr.mut == ConstPtrMutComptimeConst); @@ -12536,6 +13655,10 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op TypeTableEntry *type_entry = ir_resolve_type(ira, value); if (type_is_invalid(type_entry)) return ira->codegen->builtin_types.entry_invalid; + ensure_complete_type(ira->codegen, type_entry); + if (type_is_invalid(type_entry)) + return ira->codegen->builtin_types.entry_invalid; + switch (type_entry->id) { case TypeTableEntryIdInvalid: zig_unreachable(); @@ -12547,11 +13670,11 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -12564,13 +13687,13 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdPromise: { ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base); - out_val->data.x_type = get_maybe_type(ira->codegen, type_entry); + out_val->data.x_type = get_optional_type(ira->codegen, type_entry); return ira->codegen->builtin_types.entry_type; } case TypeTableEntryIdUnreachable: case TypeTableEntryIdOpaque: ir_add_error_node(ira, un_op_instruction->base.source_node, - buf_sprintf("type '%s' not nullable", buf_ptr(&type_entry->name))); + buf_sprintf("type '%s' not optional", buf_ptr(&type_entry->name))); return ira->codegen->builtin_types.entry_invalid; } zig_unreachable(); @@ -12584,10 +13707,10 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un bool is_wrap_op = (un_op_instruction->op_id == IrUnOpNegationWrap); - bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdNumLitFloat); + bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdComptimeFloat); if ((expr_type->id == TypeTableEntryIdInt && expr_type->data.integral.is_signed) || - expr_type->id == TypeTableEntryIdNumLitInt || (is_float && !is_wrap_op)) + expr_type->id == TypeTableEntryIdComptimeInt || (is_float && !is_wrap_op)) { if (instr_is_comptime(value)) { ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad); @@ -12603,7 +13726,7 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un } else { bigint_negate(&out_val->data.x_bigint, &target_const_val->data.x_bigint); } - if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdNumLitInt) { + if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdComptimeInt) { return expr_type; } @@ -12632,7 +13755,7 @@ static TypeTableEntry *ir_analyze_bin_not(IrAnalyze *ira, IrInstructionUnOp *ins if (expr_type->id == TypeTableEntryIdInt) { if (instr_is_comptime(value)) { ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad); - if (!target_const_val) + if (target_const_val == nullptr) return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); @@ -12662,7 +13785,7 @@ static TypeTableEntry *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstructio return ir_analyze_negation(ira, un_op_instruction); case IrUnOpDereference: return ir_analyze_dereference(ira, un_op_instruction); - case IrUnOpMaybe: + case IrUnOpOptional: return ir_analyze_maybe(ira, un_op_instruction); } zig_unreachable(); @@ -12794,15 +13917,15 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP return first_value->value.type; } - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, nullptr, new_incoming_values.items, new_incoming_values.length); if (type_is_invalid(resolved_type)) return resolved_type; - if (resolved_type->id == TypeTableEntryIdNumLitFloat || - resolved_type->id == TypeTableEntryIdNumLitInt || - resolved_type->id == TypeTableEntryIdNullLit || - resolved_type->id == TypeTableEntryIdUndefLit) + if (resolved_type->id == TypeTableEntryIdComptimeFloat || + resolved_type->id == TypeTableEntryIdComptimeInt || + resolved_type->id == TypeTableEntryIdNull || + resolved_type->id == TypeTableEntryIdUndefined) { ir_add_error_node(ira, phi_instruction->base.source_node, buf_sprintf("unable to infer expression type")); @@ -12847,17 +13970,16 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP } static TypeTableEntry *ir_analyze_var_ptr(IrAnalyze *ira, IrInstruction *instruction, - VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr) + VariableTableEntry *var) { - IrInstruction *result = ir_get_var_ptr(ira, instruction, var, is_const_ptr, is_volatile_ptr); + IrInstruction *result = ir_get_var_ptr(ira, instruction, var); ir_link_new_instruction(result, instruction); return result->value.type; } static TypeTableEntry *ir_analyze_instruction_var_ptr(IrAnalyze *ira, IrInstructionVarPtr *var_ptr_instruction) { VariableTableEntry *var = var_ptr_instruction->var; - return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var, var_ptr_instruction->is_const, - var_ptr_instruction->is_volatile); + return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var); } static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align) { @@ -12865,25 +13987,40 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui return get_pointer_to_type_extra(g, ptr_type->data.pointer.child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + ptr_type->data.pointer.ptr_len, new_align, ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count); } +static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align) { + assert(is_slice(slice_type)); + TypeTableEntry *ptr_type = adjust_ptr_align(g, slice_type->data.structure.fields[slice_ptr_index].type_entry, + new_align); + return get_slice_type(g, ptr_type); +} + +static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrLen ptr_len) { + assert(ptr_type->id == TypeTableEntryIdPointer); + return get_pointer_to_type_extra(g, + ptr_type->data.pointer.child_type, + ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + ptr_len, + ptr_type->data.pointer.alignment, + ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count); +} + static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstructionElemPtr *elem_ptr_instruction) { IrInstruction *array_ptr = elem_ptr_instruction->array_ptr->other; if (type_is_invalid(array_ptr->value.type)) return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *orig_array_ptr_val = &array_ptr->value; + IrInstruction *elem_index = elem_ptr_instruction->elem_index->other; if (type_is_invalid(elem_index->value.type)) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *ptr_type = array_ptr->value.type; - if (ptr_type->id == TypeTableEntryIdMetaType) { - ir_add_error(ira, &elem_ptr_instruction->base, - buf_sprintf("array access of non-array type '%s'", buf_ptr(&ptr_type->name))); - return ira->codegen->builtin_types.entry_invalid; - } + TypeTableEntry *ptr_type = orig_array_ptr_val->type; assert(ptr_type->id == TypeTableEntryIdPointer); TypeTableEntry *array_type = ptr_type->data.pointer.child_type; @@ -12894,7 +14031,18 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc if (type_is_invalid(array_type)) { return array_type; - } else if (array_type->id == TypeTableEntryIdArray) { + } else if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && + array_type->data.pointer.ptr_len == PtrLenSingle && + array_type->data.pointer.child_type->id == TypeTableEntryIdArray)) + { + if (array_type->id == TypeTableEntryIdPointer) { + array_type = array_type->data.pointer.child_type; + ptr_type = ptr_type->data.pointer.child_type; + if (orig_array_ptr_val->special != ConstValSpecialRuntime) { + orig_array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val); + } + } if (array_type->data.array.len == 0) { ir_add_error_node(ira, elem_ptr_instruction->base.source_node, buf_sprintf("index 0 outside array of size 0")); @@ -12904,6 +14052,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc if (ptr_type->data.pointer.unaligned_bit_count == 0) { return_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + elem_ptr_instruction->ptr_len, ptr_type->data.pointer.alignment, 0, 0); } else { uint64_t elem_val_scalar; @@ -12915,12 +14064,19 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc return_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + elem_ptr_instruction->ptr_len, 1, (uint32_t)bit_offset, (uint32_t)bit_width); } } else if (array_type->id == TypeTableEntryIdPointer) { - return_type = array_type; + if (array_type->data.pointer.ptr_len == PtrLenSingle) { + ir_add_error_node(ira, elem_ptr_instruction->base.source_node, + buf_sprintf("index of single-item pointer")); + return ira->codegen->builtin_types.entry_invalid; + } + return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len); } else if (is_slice(array_type)) { - return_type = array_type->data.structure.fields[slice_ptr_index].type_entry; + return_type = adjust_ptr_len(ira->codegen, array_type->data.structure.fields[slice_ptr_index].type_entry, + elem_ptr_instruction->ptr_len); } else if (array_type->id == TypeTableEntryIdArgTuple) { ConstExprValue *ptr_val = ir_resolve_const(ira, array_ptr, UndefBad); if (!ptr_val) @@ -12945,8 +14101,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc bool is_const = true; bool is_volatile = false; if (var) { - return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var, - is_const, is_volatile); + return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var); } else { return ir_analyze_const_ptr(ira, &elem_ptr_instruction->base, &ira->codegen->const_void_val, ira->codegen->builtin_types.entry_void, ConstPtrMutComptimeConst, is_const, is_volatile); @@ -12964,6 +14119,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc bool safety_check_on = elem_ptr_instruction->safety_check_on; ensure_complete_type(ira->codegen, return_type->data.pointer.child_type); + if (type_is_invalid(return_type->data.pointer.child_type)) + return ira->codegen->builtin_types.entry_invalid; + uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type); uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type); uint64_t ptr_align = return_type->data.pointer.alignment; @@ -13001,9 +14159,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } ConstExprValue *array_ptr_val; - if (array_ptr->value.special != ConstValSpecialRuntime && - (array_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) && - (array_ptr_val = const_ptr_pointee(ira->codegen, &array_ptr->value)) && + if (orig_array_ptr_val->special != ConstValSpecialRuntime && + (orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) && + (array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val)) && array_ptr_val->special != ConstValSpecialRuntime && (array_type->id != TypeTableEntryIdPointer || array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr)) @@ -13060,8 +14218,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } else if (is_slice(array_type)) { ConstExprValue *ptr_field = &array_ptr_val->data.x_struct.fields[slice_ptr_index]; if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) { - ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr, - casted_elem_index, false); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, + array_ptr, casted_elem_index, false, elem_ptr_instruction->ptr_len); + result->value.type = return_type; + ir_link_new_instruction(result, &elem_ptr_instruction->base); return return_type; } ConstExprValue *len_field = &array_ptr_val->data.x_struct.fields[slice_len_index]; @@ -13106,7 +14266,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } else if (array_type->id == TypeTableEntryIdArray) { ConstExprValue *out_val = ir_build_const_from(ira, &elem_ptr_instruction->base); out_val->data.x_ptr.special = ConstPtrSpecialBaseArray; - out_val->data.x_ptr.mut = array_ptr->value.data.x_ptr.mut; + out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut; out_val->data.x_ptr.data.base_array.array_val = array_ptr_val; out_val->data.x_ptr.data.base_array.elem_index = index; return return_type; @@ -13129,8 +14289,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } } - ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr, - casted_elem_index, safety_check_on); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, + array_ptr, casted_elem_index, safety_check_on, elem_ptr_instruction->ptr_len); + result->value.type = return_type; + ir_link_new_instruction(result, &elem_ptr_instruction->base); return return_type; } @@ -13174,7 +14336,6 @@ static IrInstruction *ir_analyze_container_member_access_inner(IrAnalyze *ira, return ira->codegen->invalid_instruction; } - static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type) { @@ -13206,7 +14367,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ return ira->codegen->invalid_instruction; ConstExprValue *field_val = &struct_val->data.x_struct.fields[field->src_index]; TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_val->type, - is_const, is_volatile, align_bytes, + is_const, is_volatile, PtrLenSingle, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); IrInstruction *result = ir_get_const(ira, source_instr); @@ -13222,6 +14383,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, + PtrLenSingle, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); @@ -13236,9 +14398,56 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ } else if (bare_type->id == TypeTableEntryIdUnion) { TypeUnionField *field = find_union_type_field(bare_type, field_name); if (field) { + if (instr_is_comptime(container_ptr)) { + ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad); + if (!ptr_val) + return ira->codegen->invalid_instruction; + + if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { + ConstExprValue *union_val = const_ptr_pointee(ira->codegen, ptr_val); + if (type_is_invalid(union_val->type)) + return ira->codegen->invalid_instruction; + + TypeUnionField *actual_field = find_union_field_by_tag(bare_type, &union_val->data.x_union.tag); + if (actual_field == nullptr) + zig_unreachable(); + + if (field != actual_field) { + ir_add_error_node(ira, source_instr->source_node, + buf_sprintf("accessing union field '%s' while field '%s' is set", buf_ptr(field_name), + buf_ptr(actual_field->name))); + return ira->codegen->invalid_instruction; + } + + ConstExprValue *payload_val = union_val->data.x_union.payload; + + TypeTableEntry *field_type = field->type_entry; + if (field_type->id == TypeTableEntryIdVoid) + { + assert(payload_val == nullptr); + payload_val = create_const_vals(1); + payload_val->special = ConstValSpecialStatic; + payload_val->type = field_type; + } + + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type, + is_const, is_volatile, + PtrLenSingle, + get_abi_alignment(ira->codegen, field_type), 0, 0); + + IrInstruction *result = ir_get_const(ira, source_instr); + ConstExprValue *const_val = &result->value; + const_val->data.x_ptr.special = ConstPtrSpecialRef; + const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut; + const_val->data.x_ptr.data.ref.pointee = payload_val; + const_val->type = ptr_type; + return result; + } + } + IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, - get_abi_alignment(ira->codegen, field->type_entry), 0, 0); + PtrLenSingle, get_abi_alignment(ira->codegen, field->type_entry), 0, 0); return result; } else { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, @@ -13286,7 +14495,7 @@ static TypeTableEntry *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, source_instruction->source_node); } - return ir_analyze_var_ptr(ira, source_instruction, var, false, false); + return ir_analyze_var_ptr(ira, source_instruction, var); } case TldIdFn: { @@ -13335,16 +14544,18 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru if (type_is_invalid(container_ptr->value.type)) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *container_type; - if (container_ptr->value.type->id == TypeTableEntryIdPointer) { - container_type = container_ptr->value.type->data.pointer.child_type; - } else if (container_ptr->value.type->id == TypeTableEntryIdMetaType) { - container_type = container_ptr->value.type; - } else { - zig_unreachable(); + TypeTableEntry *container_type = container_ptr->value.type->data.pointer.child_type; + assert(container_ptr->value.type->id == TypeTableEntryIdPointer); + + Buf *field_name = field_ptr_instruction->field_name_buffer; + if (!field_name) { + IrInstruction *field_name_expr = field_ptr_instruction->field_name_expr->other; + field_name = ir_resolve_str(ira, field_name_expr); + if (!field_name) + return ira->codegen->builtin_types.entry_invalid; } - Buf *field_name = field_ptr_instruction->field_name; + AstNode *source_node = field_ptr_instruction->base.source_node; if (type_is_invalid(container_type)) { @@ -13362,10 +14573,14 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru ir_link_new_instruction(result, &field_ptr_instruction->base); return result->value.type; } - } else if (container_type->id == TypeTableEntryIdArray) { + } else if (is_array_ref(container_type)) { if (buf_eql_str(field_name, "len")) { ConstExprValue *len_val = create_const_vals(1); - init_const_usize(ira->codegen, len_val, container_type->data.array.len); + if (container_type->id == TypeTableEntryIdPointer) { + init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len); + } else { + init_const_usize(ira->codegen, len_val, container_type->data.array.len); + } TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; bool ptr_is_const = true; @@ -13407,17 +14622,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru if (!container_ptr_val) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *child_type; - if (container_ptr->value.type->id == TypeTableEntryIdMetaType) { - TypeTableEntry *ptr_type = container_ptr_val->data.x_type; - assert(ptr_type->id == TypeTableEntryIdPointer); - child_type = ptr_type->data.pointer.child_type; - } else if (container_ptr->value.type->id == TypeTableEntryIdPointer) { - ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val); - child_type = child_val->data.x_type; - } else { - zig_unreachable(); - } + assert(container_ptr->value.type->id == TypeTableEntryIdPointer); + ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val); + TypeTableEntry *child_type = child_val->data.x_type; if (type_is_invalid(child_type)) { return ira->codegen->builtin_types.entry_invalid; @@ -13435,7 +14642,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru } if (child_type->id == TypeTableEntryIdEnum) { ensure_complete_type(ira->codegen, child_type); - if (child_type->data.enumeration.is_invalid) + if (type_is_invalid(child_type)) return ira->codegen->builtin_types.entry_invalid; TypeEnumField *field = find_enum_type_field(child_type, field_name); @@ -13446,7 +14653,16 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru create_const_enum(child_type, &field->value), child_type, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile); } - } else if (child_type->id == TypeTableEntryIdUnion && + } + ScopeDecls *container_scope = get_container_scope(child_type); + if (container_scope != nullptr) { + auto entry = container_scope->decl_table.maybe_get(field_name); + Tld *tld = entry ? entry->value : nullptr; + if (tld) { + return ir_analyze_decl_ref(ira, &field_ptr_instruction->base, tld); + } + } + if (child_type->id == TypeTableEntryIdUnion && (child_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr || child_type->data.unionation.decl_node->data.container_decl.auto_enum)) { @@ -13463,14 +14679,6 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile); } } - ScopeDecls *container_scope = get_container_scope(child_type); - if (container_scope != nullptr) { - auto entry = container_scope->decl_table.maybe_get(field_name); - Tld *tld = entry ? entry->value : nullptr; - if (tld) { - return ir_analyze_decl_ref(ira, &field_ptr_instruction->base, tld); - } - } ir_add_error(ira, &field_ptr_instruction->base, buf_sprintf("container '%s' has no member called '%s'", buf_ptr(&child_type->name), buf_ptr(field_name))); @@ -13501,7 +14709,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru } err_set_type = err_entry->set_with_only_this_in_it; } else { - if (!resolve_inferred_error_set(ira, child_type, field_ptr_instruction->base.source_node)) { + if (!resolve_inferred_error_set(ira->codegen, child_type, field_ptr_instruction->base.source_node)) { return ira->codegen->builtin_types.entry_invalid; } err_entry = find_err_table_entry(child_type, field_name); @@ -13623,7 +14831,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru buf_ptr(&child_type->name), buf_ptr(field_name))); return ira->codegen->builtin_types.entry_invalid; } - } else if (child_type->id == TypeTableEntryIdMaybe) { + } else if (child_type->id == TypeTableEntryIdOptional) { if (buf_eql_str(field_name, "Child")) { bool ptr_is_const = true; bool ptr_is_volatile = false; @@ -13639,6 +14847,15 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru } } else if (child_type->id == TypeTableEntryIdFn) { if (buf_eql_str(field_name, "ReturnType")) { + if (child_type->data.fn.fn_type_id.return_type == nullptr) { + // Return type can only ever be null, if the function is generic + assert(child_type->data.fn.is_generic); + + ir_add_error(ira, &field_ptr_instruction->base, + buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + bool ptr_is_const = true; bool ptr_is_volatile = false; return ir_analyze_const_ptr(ira, &field_ptr_instruction->base, @@ -13707,6 +14924,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru static TypeTableEntry *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *load_ptr_instruction) { IrInstruction *ptr = load_ptr_instruction->ptr->other; + if (type_is_invalid(ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *result = ir_get_deref(ira, &load_ptr_instruction->base, ptr); ir_link_new_instruction(result, &load_ptr_instruction->base); assert(result->value.type); @@ -13779,10 +14999,10 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi switch (type_entry->id) { case TypeTableEntryIdInvalid: zig_unreachable(); // handled above - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -13795,7 +15015,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -13827,7 +15047,7 @@ static TypeTableEntry *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira, if (type_entry->id == TypeTableEntryIdArray) { ptr_type = get_pointer_to_type(ira->codegen, type_entry->data.array.child_type, false); } else if (is_slice(type_entry)) { - ptr_type = type_entry->data.structure.fields[0].type_entry; + ptr_type = adjust_ptr_len(ira->codegen, type_entry->data.structure.fields[0].type_entry, PtrLenSingle); } else if (type_entry->id == TypeTableEntryIdArgTuple) { ConstExprValue *arg_tuple_val = ir_resolve_const(ira, value, UndefBad); if (!arg_tuple_val) @@ -14045,8 +15265,8 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -14061,9 +15281,9 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14075,7 +15295,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, { type_ensure_zero_bits_known(ira->codegen, child_type); TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, child_type, - is_const, is_volatile, align_bytes, 0, 0); + is_const, is_volatile, PtrLenUnknown, align_bytes, 0, 0); TypeTableEntry *result_type = get_slice_type(ira->codegen, slice_ptr_type); ConstExprValue *out_val = ir_build_const_from(ira, &slice_type_instruction->base); out_val->data.x_type = result_type; @@ -14153,8 +15373,8 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -14169,9 +15389,9 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14222,11 +15442,11 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdBoundFn: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: @@ -14242,7 +15462,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14266,20 +15486,20 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn TypeTableEntry *type_entry = value->value.type; - if (type_entry->id == TypeTableEntryIdMaybe) { + if (type_entry->id == TypeTableEntryIdOptional) { if (instr_is_comptime(value)) { ConstExprValue *maybe_val = ir_resolve_const(ira, value, UndefBad); if (!maybe_val) return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_bool = (maybe_val->data.x_maybe != nullptr); + out_val->data.x_bool = !optional_value_is_null(maybe_val); return ira->codegen->builtin_types.entry_bool; } ir_build_test_nonnull_from(&ira->new_irb, &instruction->base, value); return ira->codegen->builtin_types.entry_bool; - } else if (type_entry->id == TypeTableEntryIdNullLit) { + } else if (type_entry->id == TypeTableEntryIdNull) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_bool = false; return ira->codegen->builtin_types.entry_bool; @@ -14291,42 +15511,27 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn } static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, - IrInstructionUnwrapMaybe *unwrap_maybe_instruction) + IrInstructionUnwrapOptional *unwrap_maybe_instruction) { IrInstruction *value = unwrap_maybe_instruction->value->other; if (type_is_invalid(value->value.type)) return ira->codegen->builtin_types.entry_invalid; TypeTableEntry *ptr_type = value->value.type; - if (ptr_type->id == TypeTableEntryIdMetaType) { - // surprise! actually this is just ??T not an unwrap maybe instruction - TypeTableEntry *ptr_type_ptr = ir_resolve_type(ira, value); - assert(ptr_type_ptr->id == TypeTableEntryIdPointer); - TypeTableEntry *child_type = ptr_type_ptr->data.pointer.child_type; - type_ensure_zero_bits_known(ira->codegen, child_type); - TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type); - TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1); - TypeTableEntry *result_type = get_pointer_to_type(ira->codegen, layer2, true); - - IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope, - unwrap_maybe_instruction->base.source_node, result_type); - ir_link_new_instruction(const_instr, &unwrap_maybe_instruction->base); - return const_instr->value.type; - } - assert(ptr_type->id == TypeTableEntryIdPointer); TypeTableEntry *type_entry = ptr_type->data.pointer.child_type; if (type_is_invalid(type_entry)) { return ira->codegen->builtin_types.entry_invalid; - } else if (type_entry->id != TypeTableEntryIdMaybe) { + } else if (type_entry->id != TypeTableEntryIdOptional) { ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node, - buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name))); + buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name))); return ira->codegen->builtin_types.entry_invalid; } TypeTableEntry *child_type = type_entry->data.maybe.child_type; TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + PtrLenSingle, get_abi_alignment(ira->codegen, child_type), 0, 0); if (instr_is_comptime(value)) { @@ -14336,13 +15541,18 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val); if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - if (!maybe_val->data.x_maybe) { + if (optional_value_is_null(maybe_val)) { ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null")); return ira->codegen->builtin_types.entry_invalid; } ConstExprValue *out_val = ir_build_const_from(ira, &unwrap_maybe_instruction->base); out_val->data.x_ptr.special = ConstPtrSpecialRef; - out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_maybe; + out_val->data.x_ptr.mut = val->data.x_ptr.mut; + if (type_is_codegen_pointer(child_type)) { + out_val->data.x_ptr.data.ref.pointee = maybe_val; + } else { + out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional; + } return result_type; } } @@ -14400,6 +15610,48 @@ static TypeTableEntry *ir_analyze_instruction_clz(IrAnalyze *ira, IrInstructionC } } +static TypeTableEntry *ir_analyze_instruction_pop_count(IrAnalyze *ira, IrInstructionPopCount *instruction) { + IrInstruction *value = instruction->value->other; + if (type_is_invalid(value->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (value->value.type->id != TypeTableEntryIdInt && value->value.type->id != TypeTableEntryIdComptimeInt) { + ir_add_error(ira, value, + buf_sprintf("expected integer type, found '%s'", buf_ptr(&value->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + if (instr_is_comptime(value)) { + ConstExprValue *val = ir_resolve_const(ira, value, UndefBad); + if (!val) + return ira->codegen->builtin_types.entry_invalid; + if (bigint_cmp_zero(&val->data.x_bigint) != CmpLT) { + size_t result = bigint_popcount_unsigned(&val->data.x_bigint); + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + bigint_init_unsigned(&out_val->data.x_bigint, result); + return ira->codegen->builtin_types.entry_num_lit_int; + } + if (value->value.type->id == TypeTableEntryIdComptimeInt) { + Buf *val_buf = buf_alloc(); + bigint_append_buf(val_buf, &val->data.x_bigint, 10); + ir_add_error(ira, &instruction->base, + buf_sprintf("@popCount on negative %s value %s", + buf_ptr(&value->value.type->name), buf_ptr(val_buf))); + return ira->codegen->builtin_types.entry_invalid; + } + size_t result = bigint_popcount_signed(&val->data.x_bigint, value->value.type->data.integral.bit_count); + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + bigint_init_unsigned(&out_val->data.x_bigint, result); + return ira->codegen->builtin_types.entry_num_lit_int; + } + + IrInstruction *result = ir_build_pop_count(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, value); + result->value.type = get_smallest_unsigned_int_type(ira->codegen, value->value.type->data.integral.bit_count); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + static IrInstruction *ir_analyze_union_tag(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value) { if (type_is_invalid(value->value.type)) return ira->codegen->invalid_instruction; @@ -14409,7 +15661,7 @@ static IrInstruction *ir_analyze_union_tag(IrAnalyze *ira, IrInstruction *source } if (value->value.type->id != TypeTableEntryIdUnion) { - ir_add_error(ira, source_instr, + ir_add_error(ira, value, buf_sprintf("expected enum or union type, found '%s'", buf_ptr(&value->value.type->name))); return ira->codegen->invalid_instruction; } @@ -14450,6 +15702,13 @@ static TypeTableEntry *ir_analyze_instruction_switch_br(IrAnalyze *ira, if (type_is_invalid(target_value->value.type)) return ir_unreach_error(ira); + if (switch_br_instruction->switch_prongs_void != nullptr) { + if (type_is_invalid(switch_br_instruction->switch_prongs_void->other->value.type)) { + return ir_unreach_error(ira); + } + } + + size_t case_count = switch_br_instruction->case_count; bool is_comptime; @@ -14540,7 +15799,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_br(IrAnalyze *ira, IrBasicBlock *new_else_block = ir_get_new_bb(ira, switch_br_instruction->else_block, &switch_br_instruction->base); ir_build_switch_br_from(&ira->new_irb, &switch_br_instruction->base, - target_value, new_else_block, case_count, cases, nullptr); + target_value, new_else_block, case_count, cases, nullptr, nullptr); return ir_finish_anal(ira, ira->codegen->builtin_types.entry_unreachable); } @@ -14561,7 +15820,10 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, return out_val->type; } - assert(target_value_ptr->value.type->id == TypeTableEntryIdPointer); + if (target_value_ptr->value.type->id != TypeTableEntryIdPointer) { + ir_add_error(ira, target_value_ptr, buf_sprintf("invalid deref on switch target")); + return ira->codegen->builtin_types.entry_invalid; + } TypeTableEntry *target_type = target_value_ptr->value.type->data.pointer.child_type; ConstExprValue *pointee_val = nullptr; @@ -14582,8 +15844,8 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdBool: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdPointer: case TypeTableEntryIdPromise: case TypeTableEntryIdFn: @@ -14661,9 +15923,9 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdUnreachable: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: @@ -14841,6 +16103,8 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir assert(container_type->id == TypeTableEntryIdUnion); ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; if (instr_field_count != 1) { ir_add_error(ira, instruction, @@ -14868,8 +16132,14 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir if (casted_field_value == ira->codegen->invalid_instruction) return ira->codegen->builtin_types.entry_invalid; + type_ensure_zero_bits_known(ira->codegen, casted_field_value->value.type); + if (type_is_invalid(casted_field_value->value.type)) + return ira->codegen->builtin_types.entry_invalid; + bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope); - if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime) { + if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime || + !type_has_bits(casted_field_value->value.type)) + { ConstExprValue *field_val = ir_resolve_const(ira, casted_field_value, UndefOk); if (!field_val) return ira->codegen->builtin_types.entry_invalid; @@ -14908,6 +16178,8 @@ static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstru } ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; size_t actual_field_count = container_type->data.structure.src_field_count; @@ -15176,11 +16448,11 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdUnion: @@ -15267,7 +16539,8 @@ static TypeTableEntry *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstruc if (type_is_invalid(casted_value->value.type)) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type); if (casted_value->value.special == ConstValSpecialStatic) { ErrorTableEntry *err = casted_value->value.data.x_err_set; @@ -15293,6 +16566,9 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn assert(target->value.type->id == TypeTableEntryIdEnum); if (instr_is_comptime(target)) { + type_ensure_zero_bits_known(ira->codegen, target->value.type); + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; TypeEnumField *field = find_enum_field_by_tag(target->value.type, &target->value.data.x_bigint); ConstExprValue *array_val = create_const_str_lit(ira->codegen, field->name); ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); @@ -15300,15 +16576,14 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn return out_val->type; } - if (!target->value.type->data.enumeration.generate_name_table) { - target->value.type->data.enumeration.generate_name_table = true; - ira->codegen->name_table_enums.append(target->value.type); - } - IrInstruction *result = ir_build_tag_name(&ira->new_irb, instruction->base.scope, instruction->base.source_node, target); ir_link_new_instruction(result, &instruction->base); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra( + ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), + 0, 0); result->value.type = get_slice_type(ira->codegen, u8_ptr_type); return result->value.type; } @@ -15337,6 +16612,8 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira, } ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; TypeStructField *field = find_struct_type_field(container_type, field_name); if (field == nullptr) { @@ -15359,6 +16636,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira, TypeTableEntry *field_ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry, field_ptr->value.type->data.pointer.is_const, field_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, field_ptr_align, 0, 0); IrInstruction *casted_field_ptr = ir_implicit_cast(ira, field_ptr, field_ptr_type); if (type_is_invalid(casted_field_ptr->value.type)) @@ -15367,6 +16645,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira, TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, container_type, casted_field_ptr->value.type->data.pointer.is_const, casted_field_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, parent_ptr_align, 0, 0); if (instr_is_comptime(casted_field_ptr)) { @@ -15411,6 +16690,8 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira, return ira->codegen->builtin_types.entry_invalid; ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; IrInstruction *field_name_value = instruction->field_name->other; Buf *field_name = ir_resolve_str(ira, field_name_value); @@ -15443,6 +16724,974 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira, return ira->codegen->builtin_types.entry_num_lit_int; } +static void ensure_field_index(TypeTableEntry *type, const char *field_name, size_t index) +{ + Buf *field_name_buf; + + assert(type != nullptr && !type_is_invalid(type)); + // Check for our field by creating a buffer in place then using the comma operator to free it so that we don't + // leak memory in debug mode. + assert(find_struct_type_field(type, field_name_buf = buf_create_from_str(field_name))->src_index == index && + (buf_deinit(field_name_buf), true)); +} + +static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, TypeTableEntry *root = nullptr) +{ + static ConstExprValue *type_info_var = nullptr; + static TypeTableEntry *type_info_type = nullptr; + if (type_info_var == nullptr) + { + type_info_var = get_builtin_value(ira->codegen, "TypeInfo"); + assert(type_info_var->type->id == TypeTableEntryIdMetaType); + + ensure_complete_type(ira->codegen, type_info_var->data.x_type); + if (type_is_invalid(type_info_var->data.x_type)) + return ira->codegen->builtin_types.entry_invalid; + + type_info_type = type_info_var->data.x_type; + assert(type_info_type->id == TypeTableEntryIdUnion); + } + + if (type_name == nullptr && root == nullptr) + return type_info_type; + else if (type_name == nullptr) + return root; + + TypeTableEntry *root_type = (root == nullptr) ? type_info_type : root; + + ScopeDecls *type_info_scope = get_container_scope(root_type); + assert(type_info_scope != nullptr); + + Buf field_name = BUF_INIT; + buf_init_from_str(&field_name, type_name); + auto entry = type_info_scope->decl_table.get(&field_name); + buf_deinit(&field_name); + + TldVar *tld = (TldVar *)entry; + assert(tld->base.id == TldIdVar); + + VariableTableEntry *var = tld->var; + + ensure_complete_type(ira->codegen, var->value->type); + if (type_is_invalid(var->value->type)) + return ira->codegen->builtin_types.entry_invalid; + assert(var->value->type->id == TypeTableEntryIdMetaType); + return var->value->data.x_type; +} + +static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope) +{ + TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition"); + ensure_complete_type(ira->codegen, type_info_definition_type); + if (type_is_invalid(type_info_definition_type)) + return false; + + ensure_field_index(type_info_definition_type, "name", 0); + ensure_field_index(type_info_definition_type, "is_pub", 1); + ensure_field_index(type_info_definition_type, "data", 2); + + TypeTableEntry *type_info_definition_data_type = ir_type_info_get_type(ira, "Data", type_info_definition_type); + ensure_complete_type(ira->codegen, type_info_definition_data_type); + if (type_is_invalid(type_info_definition_data_type)) + return false; + + TypeTableEntry *type_info_fn_def_type = ir_type_info_get_type(ira, "FnDef", type_info_definition_data_type); + ensure_complete_type(ira->codegen, type_info_fn_def_type); + if (type_is_invalid(type_info_fn_def_type)) + return false; + + TypeTableEntry *type_info_fn_def_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_def_type); + ensure_complete_type(ira->codegen, type_info_fn_def_inline_type); + if (type_is_invalid(type_info_fn_def_inline_type)) + return false; + + // Loop through our definitions once to figure out how many definitions we will generate info for. + auto decl_it = decls_scope->decl_table.entry_iterator(); + decltype(decls_scope->decl_table)::Entry *curr_entry = nullptr; + int definition_count = 0; + + while ((curr_entry = decl_it.next()) != nullptr) + { + // If the definition is unresolved, force it to be resolved again. + if (curr_entry->value->resolution == TldResolutionUnresolved) + { + resolve_top_level_decl(ira->codegen, curr_entry->value, false, curr_entry->value->source_node); + if (curr_entry->value->resolution != TldResolutionOk) + { + return false; + } + } + + // Skip comptime blocks and test functions. + if (curr_entry->value->id != TldIdCompTime) + { + if (curr_entry->value->id == TldIdFn) + { + FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry; + if (fn_entry->is_test) + continue; + } + + definition_count += 1; + } + } + + ConstExprValue *definition_array = create_const_vals(1); + definition_array->special = ConstValSpecialStatic; + definition_array->type = get_array_type(ira->codegen, type_info_definition_type, definition_count); + definition_array->data.x_array.special = ConstArraySpecialNone; + definition_array->data.x_array.s_none.parent.id = ConstParentIdNone; + definition_array->data.x_array.s_none.elements = create_const_vals(definition_count); + init_const_slice(ira->codegen, out_val, definition_array, 0, definition_count, false); + + // Loop through the definitions and generate info. + decl_it = decls_scope->decl_table.entry_iterator(); + curr_entry = nullptr; + int definition_index = 0; + while ((curr_entry = decl_it.next()) != nullptr) + { + // Skip comptime blocks and test functions. + if (curr_entry->value->id == TldIdCompTime) + continue; + else if (curr_entry->value->id == TldIdFn) + { + FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry; + if (fn_entry->is_test) + continue; + } + + ConstExprValue *definition_val = &definition_array->data.x_array.s_none.elements[definition_index]; + + definition_val->special = ConstValSpecialStatic; + definition_val->type = type_info_definition_type; + + ConstExprValue *inner_fields = create_const_vals(3); + ConstExprValue *name = create_const_str_lit(ira->codegen, curr_entry->key); + init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(curr_entry->key), true); + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = ira->codegen->builtin_types.entry_bool; + inner_fields[1].data.x_bool = curr_entry->value->visib_mod == VisibModPub; + inner_fields[2].special = ConstValSpecialStatic; + inner_fields[2].type = type_info_definition_data_type; + inner_fields[2].data.x_union.parent.id = ConstParentIdStruct; + inner_fields[2].data.x_union.parent.data.p_struct.struct_val = definition_val; + inner_fields[2].data.x_union.parent.data.p_struct.field_index = 1; + + switch (curr_entry->value->id) + { + case TldIdVar: + { + VariableTableEntry *var = ((TldVar *)curr_entry->value)->var; + ensure_complete_type(ira->codegen, var->value->type); + if (type_is_invalid(var->value->type)) + return false; + + if (var->value->type->id == TypeTableEntryIdMetaType) + { + // We have a variable of type 'type', so it's actually a type definition. + // 0: Data.Type: type + bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0); + inner_fields[2].data.x_union.payload = var->value; + } + else + { + // We have a variable of another type, so we store the type of the variable. + // 1: Data.Var: type + bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 1); + + ConstExprValue *payload = create_const_vals(1); + payload->type = ira->codegen->builtin_types.entry_type; + payload->data.x_type = var->value->type; + + inner_fields[2].data.x_union.payload = payload; + } + + break; + } + case TldIdFn: + { + // 2: Data.Fn: Data.FnDef + bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 2); + + FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry; + assert(!fn_entry->is_test); + + AstNodeFnProto *fn_node = (AstNodeFnProto *)(fn_entry->proto_node); + + ConstExprValue *fn_def_val = create_const_vals(1); + fn_def_val->special = ConstValSpecialStatic; + fn_def_val->type = type_info_fn_def_type; + fn_def_val->data.x_struct.parent.id = ConstParentIdUnion; + fn_def_val->data.x_struct.parent.data.p_union.union_val = &inner_fields[2]; + + ConstExprValue *fn_def_fields = create_const_vals(9); + fn_def_val->data.x_struct.fields = fn_def_fields; + + // fn_type: type + ensure_field_index(fn_def_val->type, "fn_type", 0); + fn_def_fields[0].special = ConstValSpecialStatic; + fn_def_fields[0].type = ira->codegen->builtin_types.entry_type; + fn_def_fields[0].data.x_type = fn_entry->type_entry; + // inline_type: Data.FnDef.Inline + ensure_field_index(fn_def_val->type, "inline_type", 1); + fn_def_fields[1].special = ConstValSpecialStatic; + fn_def_fields[1].type = type_info_fn_def_inline_type; + bigint_init_unsigned(&fn_def_fields[1].data.x_enum_tag, fn_entry->fn_inline); + // calling_convention: TypeInfo.CallingConvention + ensure_field_index(fn_def_val->type, "calling_convention", 2); + fn_def_fields[2].special = ConstValSpecialStatic; + fn_def_fields[2].type = ir_type_info_get_type(ira, "CallingConvention"); + bigint_init_unsigned(&fn_def_fields[2].data.x_enum_tag, fn_node->cc); + // is_var_args: bool + ensure_field_index(fn_def_val->type, "is_var_args", 3); + bool is_varargs = fn_node->is_var_args; + fn_def_fields[3].special = ConstValSpecialStatic; + fn_def_fields[3].type = ira->codegen->builtin_types.entry_bool; + fn_def_fields[3].data.x_bool = is_varargs; + // is_extern: bool + ensure_field_index(fn_def_val->type, "is_extern", 4); + fn_def_fields[4].special = ConstValSpecialStatic; + fn_def_fields[4].type = ira->codegen->builtin_types.entry_bool; + fn_def_fields[4].data.x_bool = fn_node->is_extern; + // is_export: bool + ensure_field_index(fn_def_val->type, "is_export", 5); + fn_def_fields[5].special = ConstValSpecialStatic; + fn_def_fields[5].type = ira->codegen->builtin_types.entry_bool; + fn_def_fields[5].data.x_bool = fn_node->is_export; + // lib_name: ?[]const u8 + ensure_field_index(fn_def_val->type, "lib_name", 6); + fn_def_fields[6].special = ConstValSpecialStatic; + TypeTableEntry *u8_ptr = get_pointer_to_type_extra( + ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), + 0, 0); + fn_def_fields[6].type = get_optional_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); + if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) { + fn_def_fields[6].data.x_optional = create_const_vals(1); + ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name); + init_const_slice(ira->codegen, fn_def_fields[6].data.x_optional, lib_name, 0, buf_len(fn_node->lib_name), true); + } else { + fn_def_fields[6].data.x_optional = nullptr; + } + // return_type: type + ensure_field_index(fn_def_val->type, "return_type", 7); + fn_def_fields[7].special = ConstValSpecialStatic; + fn_def_fields[7].type = ira->codegen->builtin_types.entry_type; + if (fn_entry->src_implicit_return_type != nullptr) + fn_def_fields[7].data.x_type = fn_entry->src_implicit_return_type; + else if (fn_entry->type_entry->data.fn.gen_return_type != nullptr) + fn_def_fields[7].data.x_type = fn_entry->type_entry->data.fn.gen_return_type; + else + fn_def_fields[7].data.x_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; + // arg_names: [][] const u8 + ensure_field_index(fn_def_val->type, "arg_names", 8); + size_t fn_arg_count = fn_entry->variable_list.length; + ConstExprValue *fn_arg_name_array = create_const_vals(1); + fn_arg_name_array->special = ConstValSpecialStatic; + fn_arg_name_array->type = get_array_type(ira->codegen, + get_slice_type(ira->codegen, u8_ptr), fn_arg_count); + fn_arg_name_array->data.x_array.special = ConstArraySpecialNone; + fn_arg_name_array->data.x_array.s_none.parent.id = ConstParentIdNone; + fn_arg_name_array->data.x_array.s_none.elements = create_const_vals(fn_arg_count); + + init_const_slice(ira->codegen, &fn_def_fields[8], fn_arg_name_array, 0, fn_arg_count, false); + + for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) + { + VariableTableEntry *arg_var = fn_entry->variable_list.at(fn_arg_index); + ConstExprValue *fn_arg_name_val = &fn_arg_name_array->data.x_array.s_none.elements[fn_arg_index]; + ConstExprValue *arg_name = create_const_str_lit(ira->codegen, &arg_var->name); + init_const_slice(ira->codegen, fn_arg_name_val, arg_name, 0, buf_len(&arg_var->name), true); + fn_arg_name_val->data.x_struct.parent.id = ConstParentIdArray; + fn_arg_name_val->data.x_struct.parent.data.p_array.array_val = fn_arg_name_array; + fn_arg_name_val->data.x_struct.parent.data.p_array.elem_index = fn_arg_index; + } + + inner_fields[2].data.x_union.payload = fn_def_val; + break; + } + case TldIdContainer: + { + TypeTableEntry *type_entry = ((TldContainer *)curr_entry->value)->type_entry; + ensure_complete_type(ira->codegen, type_entry); + if (type_is_invalid(type_entry)) + return false; + + // This is a type. + bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0); + + ConstExprValue *payload = create_const_vals(1); + payload->type = ira->codegen->builtin_types.entry_type; + payload->data.x_type = type_entry; + + inner_fields[2].data.x_union.payload = payload; + + break; + } + default: + zig_unreachable(); + } + + definition_val->data.x_struct.fields = inner_fields; + definition_index++; + } + + assert(definition_index == definition_count); + return true; +} + +static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) { + assert(type_entry != nullptr); + assert(!type_is_invalid(type_entry)); + + ensure_complete_type(ira->codegen, type_entry); + if (type_is_invalid(type_entry)) + return nullptr; + + const auto make_enum_field_val = [ira](ConstExprValue *enum_field_val, TypeEnumField *enum_field, + TypeTableEntry *type_info_enum_field_type) { + enum_field_val->special = ConstValSpecialStatic; + enum_field_val->type = type_info_enum_field_type; + + ConstExprValue *inner_fields = create_const_vals(2); + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = ira->codegen->builtin_types.entry_usize; + + ConstExprValue *name = create_const_str_lit(ira->codegen, enum_field->name); + init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(enum_field->name), true); + + bigint_init_bigint(&inner_fields[1].data.x_bigint, &enum_field->value); + + enum_field_val->data.x_struct.fields = inner_fields; + }; + + const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) { + TypeTableEntry *attrs_type; + uint32_t size_enum_index; + if (is_slice(ptr_type_entry)) { + attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry; + size_enum_index = 2; + } else if (ptr_type_entry->id == TypeTableEntryIdPointer) { + attrs_type = ptr_type_entry; + size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1; + } else { + zig_unreachable(); + } + + TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer"); + ensure_complete_type(ira->codegen, type_info_pointer_type); + assert(!type_is_invalid(type_info_pointer_type)); + + ConstExprValue *result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = type_info_pointer_type; + + ConstExprValue *fields = create_const_vals(5); + result->data.x_struct.fields = fields; + + // size: Size + ensure_field_index(result->type, "size", 0); + TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type); + ensure_complete_type(ira->codegen, type_info_pointer_size_type); + assert(!type_is_invalid(type_info_pointer_size_type)); + fields[0].special = ConstValSpecialStatic; + fields[0].type = type_info_pointer_size_type; + bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index); + + // is_const: bool + ensure_field_index(result->type, "is_const", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_bool; + fields[1].data.x_bool = attrs_type->data.pointer.is_const; + // is_volatile: bool + ensure_field_index(result->type, "is_volatile", 2); + fields[2].special = ConstValSpecialStatic; + fields[2].type = ira->codegen->builtin_types.entry_bool; + fields[2].data.x_bool = attrs_type->data.pointer.is_volatile; + // alignment: u32 + ensure_field_index(result->type, "alignment", 3); + fields[3].special = ConstValSpecialStatic; + fields[3].type = ira->codegen->builtin_types.entry_u32; + bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment); + // child: type + ensure_field_index(result->type, "child", 4); + fields[4].special = ConstValSpecialStatic; + fields[4].type = ira->codegen->builtin_types.entry_type; + fields[4].data.x_type = attrs_type->data.pointer.child_type; + + return result; + }; + + if (type_entry == ira->codegen->builtin_types.entry_global_error_set) { + zig_panic("TODO implement @typeInfo for global error set"); + } + + ConstExprValue *result = nullptr; + switch (type_entry->id) + { + case TypeTableEntryIdInvalid: + zig_unreachable(); + case TypeTableEntryIdMetaType: + case TypeTableEntryIdVoid: + case TypeTableEntryIdBool: + case TypeTableEntryIdUnreachable: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdNamespace: + case TypeTableEntryIdBlock: + case TypeTableEntryIdArgTuple: + case TypeTableEntryIdOpaque: + return nullptr; + default: + { + // Lookup an available value in our cache. + auto entry = ira->codegen->type_info_cache.maybe_get(type_entry); + if (entry != nullptr) + return entry->value; + + // Fallthrough if we don't find one. + } + case TypeTableEntryIdInt: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Int"); + + ConstExprValue *fields = create_const_vals(2); + result->data.x_struct.fields = fields; + + // is_signed: bool + ensure_field_index(result->type, "is_signed", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_bool; + fields[0].data.x_bool = type_entry->data.integral.is_signed; + // bits: u8 + ensure_field_index(result->type, "bits", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_u8; + bigint_init_unsigned(&fields[1].data.x_bigint, type_entry->data.integral.bit_count); + + break; + } + case TypeTableEntryIdFloat: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Float"); + + ConstExprValue *fields = create_const_vals(1); + result->data.x_struct.fields = fields; + + // bits: u8 + ensure_field_index(result->type, "bits", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_u8; + bigint_init_unsigned(&fields->data.x_bigint, type_entry->data.floating.bit_count); + + break; + } + case TypeTableEntryIdPointer: + { + result = create_ptr_like_type_info(type_entry); + break; + } + case TypeTableEntryIdArray: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Array"); + + ConstExprValue *fields = create_const_vals(2); + result->data.x_struct.fields = fields; + + // len: usize + ensure_field_index(result->type, "len", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_usize; + bigint_init_unsigned(&fields[0].data.x_bigint, type_entry->data.array.len); + // child: type + ensure_field_index(result->type, "child", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_type; + fields[1].data.x_type = type_entry->data.array.child_type; + + break; + } + case TypeTableEntryIdOptional: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Optional"); + + ConstExprValue *fields = create_const_vals(1); + result->data.x_struct.fields = fields; + + // child: type + ensure_field_index(result->type, "child", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_type; + fields[0].data.x_type = type_entry->data.maybe.child_type; + + break; + } + case TypeTableEntryIdPromise: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Promise"); + + ConstExprValue *fields = create_const_vals(1); + result->data.x_struct.fields = fields; + + // child: ?type + ensure_field_index(result->type, "child", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + + if (type_entry->data.promise.result_type == nullptr) + fields[0].data.x_optional = nullptr; + else { + ConstExprValue *child_type = create_const_vals(1); + child_type->special = ConstValSpecialStatic; + child_type->type = ira->codegen->builtin_types.entry_type; + child_type->data.x_type = type_entry->data.promise.result_type; + fields[0].data.x_optional = child_type; + } + + break; + } + case TypeTableEntryIdEnum: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Enum"); + + ConstExprValue *fields = create_const_vals(4); + result->data.x_struct.fields = fields; + + // layout: ContainerLayout + ensure_field_index(result->type, "layout", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ir_type_info_get_type(ira, "ContainerLayout"); + bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.enumeration.layout); + // tag_type: type + ensure_field_index(result->type, "tag_type", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_type; + fields[1].data.x_type = type_entry->data.enumeration.tag_int_type; + // fields: []TypeInfo.EnumField + ensure_field_index(result->type, "fields", 2); + + TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField"); + uint32_t enum_field_count = type_entry->data.enumeration.src_field_count; + + ConstExprValue *enum_field_array = create_const_vals(1); + enum_field_array->special = ConstValSpecialStatic; + enum_field_array->type = get_array_type(ira->codegen, type_info_enum_field_type, enum_field_count); + enum_field_array->data.x_array.special = ConstArraySpecialNone; + enum_field_array->data.x_array.s_none.parent.id = ConstParentIdNone; + enum_field_array->data.x_array.s_none.elements = create_const_vals(enum_field_count); + + init_const_slice(ira->codegen, &fields[2], enum_field_array, 0, enum_field_count, false); + + for (uint32_t enum_field_index = 0; enum_field_index < enum_field_count; enum_field_index++) + { + TypeEnumField *enum_field = &type_entry->data.enumeration.fields[enum_field_index]; + ConstExprValue *enum_field_val = &enum_field_array->data.x_array.s_none.elements[enum_field_index]; + make_enum_field_val(enum_field_val, enum_field, type_info_enum_field_type); + enum_field_val->data.x_struct.parent.id = ConstParentIdArray; + enum_field_val->data.x_struct.parent.data.p_array.array_val = enum_field_array; + enum_field_val->data.x_struct.parent.data.p_array.elem_index = enum_field_index; + } + // defs: []TypeInfo.Definition + ensure_field_index(result->type, "defs", 3); + if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope)) + return nullptr; + + break; + } + case TypeTableEntryIdErrorSet: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "ErrorSet"); + + ConstExprValue *fields = create_const_vals(1); + result->data.x_struct.fields = fields; + + // errors: []TypeInfo.Error + ensure_field_index(result->type, "errors", 0); + + TypeTableEntry *type_info_error_type = ir_type_info_get_type(ira, "Error"); + uint32_t error_count = type_entry->data.error_set.err_count; + ConstExprValue *error_array = create_const_vals(1); + error_array->special = ConstValSpecialStatic; + error_array->type = get_array_type(ira->codegen, type_info_error_type, error_count); + error_array->data.x_array.special = ConstArraySpecialNone; + error_array->data.x_array.s_none.parent.id = ConstParentIdNone; + error_array->data.x_array.s_none.elements = create_const_vals(error_count); + + init_const_slice(ira->codegen, &fields[0], error_array, 0, error_count, false); + for (uint32_t error_index = 0; error_index < error_count; error_index++) + { + ErrorTableEntry *error = type_entry->data.error_set.errors[error_index]; + ConstExprValue *error_val = &error_array->data.x_array.s_none.elements[error_index]; + + error_val->special = ConstValSpecialStatic; + error_val->type = type_info_error_type; + + ConstExprValue *inner_fields = create_const_vals(2); + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = ira->codegen->builtin_types.entry_usize; + + ConstExprValue *name = nullptr; + if (error->cached_error_name_val != nullptr) + name = error->cached_error_name_val; + if (name == nullptr) + name = create_const_str_lit(ira->codegen, &error->name); + init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(&error->name), true); + bigint_init_unsigned(&inner_fields[1].data.x_bigint, error->value); + + error_val->data.x_struct.fields = inner_fields; + error_val->data.x_struct.parent.id = ConstParentIdArray; + error_val->data.x_struct.parent.data.p_array.array_val = error_array; + error_val->data.x_struct.parent.data.p_array.elem_index = error_index; + } + + break; + } + case TypeTableEntryIdErrorUnion: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "ErrorUnion"); + + ConstExprValue *fields = create_const_vals(2); + result->data.x_struct.fields = fields; + + // error_set: type + ensure_field_index(result->type, "error_set", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_type; + fields[0].data.x_type = type_entry->data.error_union.err_set_type; + + // payload: type + ensure_field_index(result->type, "payload", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_type; + fields[1].data.x_type = type_entry->data.error_union.payload_type; + + break; + } + case TypeTableEntryIdUnion: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Union"); + + ConstExprValue *fields = create_const_vals(4); + result->data.x_struct.fields = fields; + + // layout: ContainerLayout + ensure_field_index(result->type, "layout", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ir_type_info_get_type(ira, "ContainerLayout"); + bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.unionation.layout); + // tag_type: ?type + ensure_field_index(result->type, "tag_type", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + + AstNode *union_decl_node = type_entry->data.unionation.decl_node; + if (union_decl_node->data.container_decl.auto_enum || + union_decl_node->data.container_decl.init_arg_expr != nullptr) + { + ConstExprValue *tag_type = create_const_vals(1); + tag_type->special = ConstValSpecialStatic; + tag_type->type = ira->codegen->builtin_types.entry_type; + tag_type->data.x_type = type_entry->data.unionation.tag_type; + fields[1].data.x_optional = tag_type; + } + else + fields[1].data.x_optional = nullptr; + // fields: []TypeInfo.UnionField + ensure_field_index(result->type, "fields", 2); + + TypeTableEntry *type_info_union_field_type = ir_type_info_get_type(ira, "UnionField"); + uint32_t union_field_count = type_entry->data.unionation.src_field_count; + + ConstExprValue *union_field_array = create_const_vals(1); + union_field_array->special = ConstValSpecialStatic; + union_field_array->type = get_array_type(ira->codegen, type_info_union_field_type, union_field_count); + union_field_array->data.x_array.special = ConstArraySpecialNone; + union_field_array->data.x_array.s_none.parent.id = ConstParentIdNone; + union_field_array->data.x_array.s_none.elements = create_const_vals(union_field_count); + + init_const_slice(ira->codegen, &fields[2], union_field_array, 0, union_field_count, false); + + TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField"); + + for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) { + TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index]; + ConstExprValue *union_field_val = &union_field_array->data.x_array.s_none.elements[union_field_index]; + + union_field_val->special = ConstValSpecialStatic; + union_field_val->type = type_info_union_field_type; + + ConstExprValue *inner_fields = create_const_vals(3); + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = get_optional_type(ira->codegen, type_info_enum_field_type); + + if (fields[1].data.x_optional == nullptr) { + inner_fields[1].data.x_optional = nullptr; + } else { + inner_fields[1].data.x_optional = create_const_vals(1); + make_enum_field_val(inner_fields[1].data.x_optional, union_field->enum_field, type_info_enum_field_type); + } + + inner_fields[2].special = ConstValSpecialStatic; + inner_fields[2].type = ira->codegen->builtin_types.entry_type; + inner_fields[2].data.x_type = union_field->type_entry; + + ConstExprValue *name = create_const_str_lit(ira->codegen, union_field->name); + init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(union_field->name), true); + + union_field_val->data.x_struct.fields = inner_fields; + union_field_val->data.x_struct.parent.id = ConstParentIdArray; + union_field_val->data.x_struct.parent.data.p_array.array_val = union_field_array; + union_field_val->data.x_struct.parent.data.p_array.elem_index = union_field_index; + } + // defs: []TypeInfo.Definition + ensure_field_index(result->type, "defs", 3); + if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope)) + return nullptr; + + break; + } + case TypeTableEntryIdStruct: + { + if (type_entry->data.structure.is_slice) { + result = create_ptr_like_type_info(type_entry); + break; + } + + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Struct"); + + ConstExprValue *fields = create_const_vals(3); + result->data.x_struct.fields = fields; + + // layout: ContainerLayout + ensure_field_index(result->type, "layout", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ir_type_info_get_type(ira, "ContainerLayout"); + bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.structure.layout); + // fields: []TypeInfo.StructField + ensure_field_index(result->type, "fields", 1); + + TypeTableEntry *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField"); + uint32_t struct_field_count = type_entry->data.structure.src_field_count; + + ConstExprValue *struct_field_array = create_const_vals(1); + struct_field_array->special = ConstValSpecialStatic; + struct_field_array->type = get_array_type(ira->codegen, type_info_struct_field_type, struct_field_count); + struct_field_array->data.x_array.special = ConstArraySpecialNone; + struct_field_array->data.x_array.s_none.parent.id = ConstParentIdNone; + struct_field_array->data.x_array.s_none.elements = create_const_vals(struct_field_count); + + init_const_slice(ira->codegen, &fields[1], struct_field_array, 0, struct_field_count, false); + + for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) { + TypeStructField *struct_field = &type_entry->data.structure.fields[struct_field_index]; + ConstExprValue *struct_field_val = &struct_field_array->data.x_array.s_none.elements[struct_field_index]; + + struct_field_val->special = ConstValSpecialStatic; + struct_field_val->type = type_info_struct_field_type; + + ConstExprValue *inner_fields = create_const_vals(3); + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_usize); + + if (!type_has_bits(struct_field->type_entry)) { + inner_fields[1].data.x_optional = nullptr; + } else { + size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index); + inner_fields[1].data.x_optional = create_const_vals(1); + inner_fields[1].data.x_optional->special = ConstValSpecialStatic; + inner_fields[1].data.x_optional->type = ira->codegen->builtin_types.entry_usize; + bigint_init_unsigned(&inner_fields[1].data.x_optional->data.x_bigint, byte_offset); + } + + inner_fields[2].special = ConstValSpecialStatic; + inner_fields[2].type = ira->codegen->builtin_types.entry_type; + inner_fields[2].data.x_type = struct_field->type_entry; + + ConstExprValue *name = create_const_str_lit(ira->codegen, struct_field->name); + init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(struct_field->name), true); + + struct_field_val->data.x_struct.fields = inner_fields; + struct_field_val->data.x_struct.parent.id = ConstParentIdArray; + struct_field_val->data.x_struct.parent.data.p_array.array_val = struct_field_array; + struct_field_val->data.x_struct.parent.data.p_array.elem_index = struct_field_index; + } + // defs: []TypeInfo.Definition + ensure_field_index(result->type, "defs", 2); + if (!ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope)) + return nullptr; + + break; + } + case TypeTableEntryIdFn: + { + result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, "Fn"); + + ConstExprValue *fields = create_const_vals(6); + result->data.x_struct.fields = fields; + + // calling_convention: TypeInfo.CallingConvention + ensure_field_index(result->type, "calling_convention", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ir_type_info_get_type(ira, "CallingConvention"); + bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.fn.fn_type_id.cc); + // is_generic: bool + ensure_field_index(result->type, "is_generic", 1); + bool is_generic = type_entry->data.fn.is_generic; + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_bool; + fields[1].data.x_bool = is_generic; + // is_varargs: bool + ensure_field_index(result->type, "is_var_args", 2); + bool is_varargs = type_entry->data.fn.fn_type_id.is_var_args; + fields[2].special = ConstValSpecialStatic; + fields[2].type = ira->codegen->builtin_types.entry_bool; + fields[2].data.x_bool = type_entry->data.fn.fn_type_id.is_var_args; + // return_type: ?type + ensure_field_index(result->type, "return_type", 3); + fields[3].special = ConstValSpecialStatic; + fields[3].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + if (type_entry->data.fn.fn_type_id.return_type == nullptr) + fields[3].data.x_optional = nullptr; + else { + ConstExprValue *return_type = create_const_vals(1); + return_type->special = ConstValSpecialStatic; + return_type->type = ira->codegen->builtin_types.entry_type; + return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type; + fields[3].data.x_optional = return_type; + } + // async_allocator_type: type + ensure_field_index(result->type, "async_allocator_type", 4); + fields[4].special = ConstValSpecialStatic; + fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr) + fields[4].data.x_optional = nullptr; + else { + ConstExprValue *async_alloc_type = create_const_vals(1); + async_alloc_type->special = ConstValSpecialStatic; + async_alloc_type->type = ira->codegen->builtin_types.entry_type; + async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type; + fields[4].data.x_optional = async_alloc_type; + } + // args: []TypeInfo.FnArg + TypeTableEntry *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg"); + size_t fn_arg_count = type_entry->data.fn.fn_type_id.param_count - + (is_varargs && type_entry->data.fn.fn_type_id.cc != CallingConventionC); + + ConstExprValue *fn_arg_array = create_const_vals(1); + fn_arg_array->special = ConstValSpecialStatic; + fn_arg_array->type = get_array_type(ira->codegen, type_info_fn_arg_type, fn_arg_count); + fn_arg_array->data.x_array.special = ConstArraySpecialNone; + fn_arg_array->data.x_array.s_none.parent.id = ConstParentIdNone; + fn_arg_array->data.x_array.s_none.elements = create_const_vals(fn_arg_count); + + init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false); + + for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++) + { + FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index]; + ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.s_none.elements[fn_arg_index]; + + fn_arg_val->special = ConstValSpecialStatic; + fn_arg_val->type = type_info_fn_arg_type; + + bool arg_is_generic = fn_param_info->type == nullptr; + if (arg_is_generic) assert(is_generic); + + ConstExprValue *inner_fields = create_const_vals(3); + inner_fields[0].special = ConstValSpecialStatic; + inner_fields[0].type = ira->codegen->builtin_types.entry_bool; + inner_fields[0].data.x_bool = arg_is_generic; + inner_fields[1].special = ConstValSpecialStatic; + inner_fields[1].type = ira->codegen->builtin_types.entry_bool; + inner_fields[1].data.x_bool = fn_param_info->is_noalias; + inner_fields[2].special = ConstValSpecialStatic; + inner_fields[2].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); + + if (arg_is_generic) + inner_fields[2].data.x_optional = nullptr; + else { + ConstExprValue *arg_type = create_const_vals(1); + arg_type->special = ConstValSpecialStatic; + arg_type->type = ira->codegen->builtin_types.entry_type; + arg_type->data.x_type = fn_param_info->type; + inner_fields[2].data.x_optional = arg_type; + } + + fn_arg_val->data.x_struct.fields = inner_fields; + fn_arg_val->data.x_struct.parent.id = ConstParentIdArray; + fn_arg_val->data.x_struct.parent.data.p_array.array_val = fn_arg_array; + fn_arg_val->data.x_struct.parent.data.p_array.elem_index = fn_arg_index; + } + + break; + } + case TypeTableEntryIdBoundFn: + { + TypeTableEntry *fn_type = type_entry->data.bound_fn.fn_type; + assert(fn_type->id == TypeTableEntryIdFn); + result = ir_make_type_info_value(ira, fn_type); + + break; + } + } + + assert(result != nullptr); + ira->codegen->type_info_cache.put(type_entry, result); + return result; +} + +static TypeTableEntry *ir_analyze_instruction_type_info(IrAnalyze *ira, + IrInstructionTypeInfo *instruction) +{ + IrInstruction *type_value = instruction->type_value->other; + TypeTableEntry *type_entry = ir_resolve_type(ira, type_value); + if (type_is_invalid(type_entry)) + return ira->codegen->builtin_types.entry_invalid; + + TypeTableEntry *result_type = ir_type_info_get_type(ira, nullptr); + + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + out_val->type = result_type; + bigint_init_unsigned(&out_val->data.x_union.tag, type_id_index(type_entry)); + + ConstExprValue *payload = ir_make_type_info_value(ira, type_entry); + out_val->data.x_union.payload = payload; + + if (payload != nullptr) + { + assert(payload->type->id == TypeTableEntryIdStruct); + payload->data.x_struct.parent.id = ConstParentIdUnion; + payload->data.x_struct.parent.data.p_union.union_val = out_val; + } + + return result_type; +} + static TypeTableEntry *ir_analyze_instruction_type_id(IrAnalyze *ira, IrInstructionTypeId *instruction) { @@ -15456,7 +17705,7 @@ static TypeTableEntry *ir_analyze_instruction_type_id(IrAnalyze *ira, TypeTableEntry *result_type = var_value->data.x_type; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry->id)); + bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry)); return result_type; } @@ -15659,10 +17908,20 @@ static TypeTableEntry *ir_analyze_instruction_embed_file(IrAnalyze *ira, IrInstr } static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstructionCmpxchg *instruction) { + TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->other); + if (type_is_invalid(operand_type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *ptr = instruction->ptr->other; if (type_is_invalid(ptr->value.type)) return ira->codegen->builtin_types.entry_invalid; + // TODO let this be volatile + TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false); + IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr, ptr_type); + if (type_is_invalid(casted_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *cmp_value = instruction->cmp_value->other; if (type_is_invalid(cmp_value->value.type)) return ira->codegen->builtin_types.entry_invalid; @@ -15687,28 +17946,11 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct if (!ir_resolve_atomic_order(ira, failure_order_value, &failure_order)) return ira->codegen->builtin_types.entry_invalid; - if (ptr->value.type->id != TypeTableEntryIdPointer) { - ir_add_error(ira, instruction->ptr, - buf_sprintf("expected pointer argument, found '%s'", buf_ptr(&ptr->value.type->name))); - return ira->codegen->builtin_types.entry_invalid; - } - - TypeTableEntry *child_type = ptr->value.type->data.pointer.child_type; - - uint32_t align_bytes = ptr->value.type->data.pointer.alignment; - uint64_t size_bytes = type_size(ira->codegen, child_type); - if (align_bytes < size_bytes) { - ir_add_error(ira, instruction->ptr, - buf_sprintf("expected pointer alignment of at least %" ZIG_PRI_u64 ", found %" PRIu32, - size_bytes, align_bytes)); - return ira->codegen->builtin_types.entry_invalid; - } - - IrInstruction *casted_cmp_value = ir_implicit_cast(ira, cmp_value, child_type); + IrInstruction *casted_cmp_value = ir_implicit_cast(ira, cmp_value, operand_type); if (type_is_invalid(casted_cmp_value->value.type)) return ira->codegen->builtin_types.entry_invalid; - IrInstruction *casted_new_value = ir_implicit_cast(ira, new_value, child_type); + IrInstruction *casted_new_value = ir_implicit_cast(ira, new_value, operand_type); if (type_is_invalid(casted_new_value->value.type)) return ira->codegen->builtin_types.entry_invalid; @@ -15733,9 +17975,17 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct return ira->codegen->builtin_types.entry_invalid; } - ir_build_cmpxchg_from(&ira->new_irb, &instruction->base, ptr, casted_cmp_value, casted_new_value, - success_order_value, failure_order_value, success_order, failure_order); - return ira->codegen->builtin_types.entry_bool; + if (instr_is_comptime(casted_ptr) && instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) { + zig_panic("TODO compile-time execution of cmpxchg"); + } + + IrInstruction *result = ir_build_cmpxchg(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + nullptr, casted_ptr, casted_cmp_value, casted_new_value, nullptr, nullptr, instruction->is_weak, + operand_type, success_order, failure_order); + result->value.type = get_optional_type(ira->codegen, operand_type); + ir_link_new_instruction(result, &instruction->base); + ir_add_alloca(ira, result, result->value.type); + return result->value.type; } static TypeTableEntry *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstructionFence *instruction) { @@ -15758,7 +18008,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; if (dest_type->id != TypeTableEntryIdInt && - dest_type->id != TypeTableEntryIdNumLitInt) + dest_type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, dest_type_value, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -15770,7 +18020,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; if (src_type->id != TypeTableEntryIdInt && - src_type->id != TypeTableEntryIdNumLitInt) + src_type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, target, buf_sprintf("expected integer type, found '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -15793,10 +18043,326 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc return dest_type; } - ir_build_truncate_from(&ira->new_irb, &instruction->base, dest_type_value, target); + IrInstruction *new_instruction = ir_build_truncate(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, dest_type_value, target); + ir_link_new_instruction(new_instruction, &instruction->base); return dest_type; } +static TypeTableEntry *ir_analyze_instruction_int_cast(IrAnalyze *ira, IrInstructionIntCast *instruction) { + TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + if (dest_type->id != TypeTableEntryIdInt) { + ir_add_error(ira, instruction->dest_type, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id == TypeTableEntryIdComptimeInt) { + if (ir_num_lit_fits_in_other_type(ira, target, dest_type, true)) { + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, + CastOpNumLitToConcrete, false); + if (type_is_invalid(result->value.type)) + return ira->codegen->builtin_types.entry_invalid; + ir_link_new_instruction(result, &instruction->base); + return dest_type; + } else { + return ira->codegen->builtin_types.entry_invalid; + } + } + + if (target->value.type->id != TypeTableEntryIdInt) { + ir_add_error(ira, instruction->target, buf_sprintf("expected integer type, found '%s'", + buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_analyze_widen_or_shorten(ira, &instruction->base, target, dest_type); + if (type_is_invalid(result->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + ir_link_new_instruction(result, &instruction->base); + return dest_type; +} + +static TypeTableEntry *ir_analyze_instruction_float_cast(IrAnalyze *ira, IrInstructionFloatCast *instruction) { + TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + if (dest_type->id != TypeTableEntryIdFloat) { + ir_add_error(ira, instruction->dest_type, + buf_sprintf("expected float type, found '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id == TypeTableEntryIdComptimeInt || + target->value.type->id == TypeTableEntryIdComptimeFloat) + { + if (ir_num_lit_fits_in_other_type(ira, target, dest_type, true)) { + CastOp op; + if (target->value.type->id == TypeTableEntryIdComptimeInt) { + op = CastOpIntToFloat; + } else { + op = CastOpNumLitToConcrete; + } + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, op, false); + if (type_is_invalid(result->value.type)) + return ira->codegen->builtin_types.entry_invalid; + ir_link_new_instruction(result, &instruction->base); + return dest_type; + } else { + return ira->codegen->builtin_types.entry_invalid; + } + } + + if (target->value.type->id != TypeTableEntryIdFloat) { + ir_add_error(ira, instruction->target, buf_sprintf("expected float type, found '%s'", + buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_analyze_widen_or_shorten(ira, &instruction->base, target, dest_type); + if (type_is_invalid(result->value.type)) + return ira->codegen->builtin_types.entry_invalid; + ir_link_new_instruction(result, &instruction->base); + return dest_type; +} + +static TypeTableEntry *ir_analyze_instruction_err_set_cast(IrAnalyze *ira, IrInstructionErrSetCast *instruction) { + TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + if (dest_type->id != TypeTableEntryIdErrorSet) { + ir_add_error(ira, instruction->dest_type, + buf_sprintf("expected error set type, found '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id != TypeTableEntryIdErrorSet) { + ir_add_error(ira, instruction->target, + buf_sprintf("expected error set type, found '%s'", buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_analyze_err_set_cast(ira, &instruction->base, target, dest_type); + if (type_is_invalid(result->value.type)) + return ira->codegen->builtin_types.entry_invalid; + ir_link_new_instruction(result, &instruction->base); + return dest_type; +} + +static TypeTableEntry *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstructionFromBytes *instruction) { + TypeTableEntry *dest_child_type = ir_resolve_type(ira, instruction->dest_child_type->other); + if (type_is_invalid(dest_child_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + bool src_ptr_const; + bool src_ptr_volatile; + uint32_t src_ptr_align; + if (target->value.type->id == TypeTableEntryIdPointer) { + src_ptr_const = target->value.type->data.pointer.is_const; + src_ptr_volatile = target->value.type->data.pointer.is_volatile; + src_ptr_align = target->value.type->data.pointer.alignment; + } else if (is_slice(target->value.type)) { + TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry; + src_ptr_const = src_ptr_type->data.pointer.is_const; + src_ptr_volatile = src_ptr_type->data.pointer.is_volatile; + src_ptr_align = src_ptr_type->data.pointer.alignment; + } else { + src_ptr_const = true; + src_ptr_volatile = false; + src_ptr_align = get_abi_alignment(ira->codegen, target->value.type); + } + + TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_child_type, + src_ptr_const, src_ptr_volatile, PtrLenUnknown, + src_ptr_align, 0, 0); + TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type); + + TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + src_ptr_const, src_ptr_volatile, PtrLenUnknown, + src_ptr_align, 0, 0); + TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr); + + IrInstruction *casted_value = ir_implicit_cast(ira, target, u8_slice); + if (type_is_invalid(casted_value->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + bool have_known_len = false; + uint64_t known_len; + + if (instr_is_comptime(casted_value)) { + ConstExprValue *val = ir_resolve_const(ira, casted_value, UndefBad); + if (!val) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *len_val = &val->data.x_struct.fields[slice_len_index]; + if (value_is_comptime(len_val)) { + known_len = bigint_as_unsigned(&len_val->data.x_bigint); + have_known_len = true; + } + } + + if (casted_value->value.data.rh_slice.id == RuntimeHintSliceIdLen) { + known_len = casted_value->value.data.rh_slice.len; + have_known_len = true; + } + + if (have_known_len) { + uint64_t child_type_size = type_size(ira->codegen, dest_child_type); + uint64_t remainder = known_len % child_type_size; + if (remainder != 0) { + ErrorMsg *msg = ir_add_error(ira, &instruction->base, + buf_sprintf("unable to convert [%" ZIG_PRI_u64 "]u8 to %s: size mismatch", + known_len, buf_ptr(&dest_slice_type->name))); + add_error_note(ira->codegen, msg, instruction->dest_child_type->source_node, + buf_sprintf("%s has size %" ZIG_PRI_u64 "; remaining bytes: %" ZIG_PRI_u64, + buf_ptr(&dest_child_type->name), child_type_size, remainder)); + return ira->codegen->builtin_types.entry_invalid; + } + } + + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, casted_value, dest_slice_type, CastOpResizeSlice, true); + ir_link_new_instruction(result, &instruction->base); + return dest_slice_type; +} + +static TypeTableEntry *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstructionToBytes *instruction) { + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (!is_slice(target->value.type)) { + ir_add_error(ira, instruction->target, + buf_sprintf("expected slice, found '%s'", buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry; + + TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + src_ptr_type->data.pointer.is_const, src_ptr_type->data.pointer.is_volatile, PtrLenUnknown, + src_ptr_type->data.pointer.alignment, 0, 0); + TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type); + + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_slice_type, CastOpResizeSlice, true); + ir_link_new_instruction(result, &instruction->base); + return dest_slice_type; +} + +static TypeTableEntry *ir_analyze_instruction_int_to_float(IrAnalyze *ira, IrInstructionIntToFloat *instruction) { + TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id != TypeTableEntryIdInt && target->value.type->id != TypeTableEntryIdComptimeInt) { + ir_add_error(ira, instruction->target, buf_sprintf("expected int type, found '%s'", + buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpIntToFloat, false); + ir_link_new_instruction(result, &instruction->base); + return dest_type; +} + +static TypeTableEntry *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrInstructionFloatToInt *instruction) { + TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpFloatToInt, false); + ir_link_new_instruction(result, &instruction->base); + return dest_type; +} + +static TypeTableEntry *ir_analyze_instruction_err_to_int(IrAnalyze *ira, IrInstructionErrToInt *instruction) { + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *casted_target; + if (target->value.type->id == TypeTableEntryIdErrorSet) { + casted_target = target; + } else { + casted_target = ir_implicit_cast(ira, target, ira->codegen->builtin_types.entry_global_error_set); + if (type_is_invalid(casted_target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_analyze_err_to_int(ira, &instruction->base, casted_target, ira->codegen->err_tag_type); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_int_to_err(IrAnalyze *ira, IrInstructionIntToErr *instruction) { + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *casted_target = ir_implicit_cast(ira, target, ira->codegen->err_tag_type); + if (type_is_invalid(casted_target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_analyze_int_to_err(ira, &instruction->base, casted_target, ira->codegen->builtin_types.entry_global_error_set); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_bool_to_int(IrAnalyze *ira, IrInstructionBoolToInt *instruction) { + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id != TypeTableEntryIdBool) { + ir_add_error(ira, instruction->target, buf_sprintf("expected bool, found '%s'", + buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + if (instr_is_comptime(target)) { + bool is_true; + if (!ir_resolve_bool(ira, target, &is_true)) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + bigint_init_unsigned(&out_val->data.x_bigint, is_true ? 1 : 0); + return ira->codegen->builtin_types.entry_num_lit_int; + } + + TypeTableEntry *u1_type = get_int_type(ira->codegen, false, 1); + IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, u1_type, CastOpBoolToInt, false); + ir_link_new_instruction(result, &instruction->base); + return u1_type; +} + static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstructionIntType *instruction) { IrInstruction *is_signed_value = instruction->is_signed->other; bool is_signed; @@ -15805,7 +18371,7 @@ static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstruc IrInstruction *bit_count_value = instruction->bit_count->other; uint64_t bit_count; - if (!ir_resolve_usize(ira, bit_count_value, &bit_count)) + if (!ir_resolve_unsigned(ira, bit_count_value, ira->codegen->builtin_types.entry_u32, &bit_count)) return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); @@ -15824,9 +18390,13 @@ static TypeTableEntry *ir_analyze_instruction_bool_not(IrAnalyze *ira, IrInstruc if (type_is_invalid(casted_value->value.type)) return ira->codegen->builtin_types.entry_invalid; - if (casted_value->value.special != ConstValSpecialRuntime) { + if (instr_is_comptime(casted_value)) { + ConstExprValue *value = ir_resolve_const(ira, casted_value, UndefBad); + if (value == nullptr) + return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_bool = !casted_value->value.data.x_bool; + out_val->data.x_bool = !value->data.x_bool; return bool_type; } @@ -15855,7 +18425,8 @@ static TypeTableEntry *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstructi TypeTableEntry *u8 = ira->codegen->builtin_types.entry_u8; uint32_t dest_align = (dest_uncasted_type->id == TypeTableEntryIdPointer) ? dest_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8); - TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0); + TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, + PtrLenUnknown, dest_align, 0, 0); IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr); if (type_is_invalid(casted_dest_ptr->value.type)) @@ -15951,8 +18522,10 @@ static TypeTableEntry *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructi src_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8); TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; - TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0); - TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile, src_align, 0, 0); + TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, + PtrLenUnknown, dest_align, 0, 0); + TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile, + PtrLenUnknown, src_align, 0, 0); IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr_mut); if (type_is_invalid(casted_dest_ptr->value.type)) @@ -16095,18 +18668,38 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio if (array_type->data.array.len == 0 && byte_alignment == 0) { byte_alignment = get_abi_alignment(ira->codegen, array_type->data.array.child_type); } + bool is_comptime_const = ptr_ptr->value.special == ConstValSpecialStatic && + ptr_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst; TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.array.child_type, - ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + ptr_type->data.pointer.is_const || is_comptime_const, + ptr_type->data.pointer.is_volatile, + PtrLenUnknown, byte_alignment, 0, 0); return_type = get_slice_type(ira->codegen, slice_ptr_type); } else if (array_type->id == TypeTableEntryIdPointer) { - TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type, - array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, - array_type->data.pointer.alignment, 0, 0); - return_type = get_slice_type(ira->codegen, slice_ptr_type); - if (!end) { - ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value")); - return ira->codegen->builtin_types.entry_invalid; + if (array_type->data.pointer.ptr_len == PtrLenSingle) { + TypeTableEntry *main_type = array_type->data.pointer.child_type; + if (main_type->id == TypeTableEntryIdArray) { + TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, + main_type->data.pointer.child_type, + array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, + PtrLenUnknown, + array_type->data.pointer.alignment, 0, 0); + return_type = get_slice_type(ira->codegen, slice_ptr_type); + } else { + ir_add_error(ira, &instruction->base, buf_sprintf("slice of single-item pointer")); + return ira->codegen->builtin_types.entry_invalid; + } + } else { + TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type, + array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, + PtrLenUnknown, + array_type->data.pointer.alignment, 0, 0); + return_type = get_slice_type(ira->codegen, slice_ptr_type); + if (!end) { + ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value")); + return ira->codegen->builtin_types.entry_invalid; + } } } else if (is_slice(array_type)) { TypeTableEntry *ptr_type = array_type->data.structure.fields[slice_ptr_index].type_entry; @@ -16126,12 +18719,24 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio size_t abs_offset; size_t rel_end; bool ptr_is_undef = false; - if (array_type->id == TypeTableEntryIdArray) { - array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value); - abs_offset = 0; - rel_end = array_type->data.array.len; - parent_ptr = nullptr; + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + TypeTableEntry *child_array_type = array_type->data.pointer.child_type; + assert(child_array_type->id == TypeTableEntryIdArray); + parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value); + array_val = const_ptr_pointee(ira->codegen, parent_ptr); + rel_end = child_array_type->data.array.len; + abs_offset = 0; + } else { + array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value); + rel_end = array_type->data.array.len; + parent_ptr = nullptr; + abs_offset = 0; + } } else if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.ptr_len == PtrLenUnknown); parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value); if (parent_ptr->special == ConstValSpecialUndef) { array_val = nullptr; @@ -16230,7 +18835,7 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio if (array_val) { size_t index = abs_offset + start_scalar; bool is_const = slice_is_const(return_type); - init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const); + init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const, PtrLenUnknown); if (array_type->id == TypeTableEntryIdArray) { ptr_val->data.x_ptr.mut = ptr_ptr->value.data.x_ptr.mut; } else if (is_slice(array_type)) { @@ -16283,6 +18888,10 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns return ira->codegen->builtin_types.entry_invalid; TypeTableEntry *container_type = ir_resolve_type(ira, container); + ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; + uint64_t result; if (type_is_invalid(container_type)) { return ira->codegen->builtin_types.entry_invalid; @@ -16293,7 +18902,7 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns } else if (container_type->id == TypeTableEntryIdUnion) { result = container_type->data.unionation.src_field_count; } else if (container_type->id == TypeTableEntryIdErrorSet) { - if (!resolve_inferred_error_set(ira, container_type, instruction->base.source_node)) { + if (!resolve_inferred_error_set(ira->codegen, container_type, instruction->base.source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (type_is_global_error_set(container_type)) { @@ -16317,6 +18926,11 @@ static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInst if (type_is_invalid(container_type)) return ira->codegen->builtin_types.entry_invalid; + ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; + + uint64_t member_index; IrInstruction *index_value = instruction->member_index->other; if (!ir_resolve_usize(ira, index_value, &member_index)) @@ -16359,6 +18973,10 @@ static TypeTableEntry *ir_analyze_instruction_member_name(IrAnalyze *ira, IrInst if (type_is_invalid(container_type)) return ira->codegen->builtin_types.entry_invalid; + ensure_complete_type(ira->codegen, container_type); + if (type_is_invalid(container_type)) + return ira->codegen->builtin_types.entry_invalid; + uint64_t member_index; IrInstruction *index_value = instruction->member_index->other; if (!ir_resolve_usize(ira, index_value, &member_index)) @@ -16428,6 +19046,14 @@ static TypeTableEntry *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIn return u8_ptr_const; } +static TypeTableEntry *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) { + ir_build_handle_from(&ira->new_irb, &instruction->base); + + FnTableEntry *fn_entry = exec_fn_entry(ira->new_irb.exec); + assert(fn_entry != nullptr); + return get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type); +} + static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) { IrInstruction *type_value = instruction->type_value->other; if (type_is_invalid(type_value->value.type)) @@ -16443,10 +19069,10 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc zig_unreachable(); case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -16463,7 +19089,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -16525,6 +19151,7 @@ static TypeTableEntry *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInst if (result_ptr->value.type->id == TypeTableEntryIdPointer) { expected_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_type, false, result_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, result_ptr->value.type->data.pointer.alignment, 0, 0); } else { expected_ptr_type = get_pointer_to_type(ira->codegen, dest_type, false); @@ -16597,7 +19224,7 @@ static TypeTableEntry *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruc } TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type; - if (!resolve_inferred_error_set(ira, err_set_type, instruction->base.source_node)) { + if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) { return ira->codegen->builtin_types.entry_invalid; } if (!type_is_global_error_set(err_set_type) && @@ -16678,8 +19305,12 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, return ira->codegen->builtin_types.entry_invalid; } else if (type_entry->id == TypeTableEntryIdErrorUnion) { TypeTableEntry *payload_type = type_entry->data.error_union.payload_type; + if (type_is_invalid(payload_type)) { + return ira->codegen->builtin_types.entry_invalid; + } TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, payload_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + PtrLenSingle, get_abi_alignment(ira->codegen, payload_type), 0, 0); if (instr_is_comptime(value)) { ConstExprValue *ptr_val = ir_resolve_const(ira, value, UndefBad); @@ -16769,6 +19400,11 @@ static TypeTableEntry *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruc fn_type_id.return_type = ir_resolve_type(ira, return_type_value); if (type_is_invalid(fn_type_id.return_type)) return ira->codegen->builtin_types.entry_invalid; + if (fn_type_id.return_type->id == TypeTableEntryIdOpaque) { + ir_add_error(ira, instruction->return_type, + buf_sprintf("return type cannot be opaque")); + return ira->codegen->builtin_types.entry_invalid; + } if (fn_type_id.cc == CallingConventionAsync) { if (instruction->async_allocator_type_value == nullptr) { @@ -16820,7 +19456,11 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira if (type_is_invalid(end_value->value.type)) return ira->codegen->builtin_types.entry_invalid; - assert(start_value->value.type->id == TypeTableEntryIdEnum); + if (start_value->value.type->id != TypeTableEntryIdEnum) { + ir_add_error(ira, range->start, buf_sprintf("not an enum type")); + return ira->codegen->builtin_types.entry_invalid; + } + BigInt start_index; bigint_init_bigint(&start_index, &start_value->value.data.x_enum_tag); @@ -16861,7 +19501,7 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira } } } else if (switch_type->id == TypeTableEntryIdErrorSet) { - if (!resolve_inferred_error_set(ira, switch_type, target_value->source_node)) { + if (!resolve_inferred_error_set(ira->codegen, switch_type, target_value->source_node)) { return ira->codegen->builtin_types.entry_invalid; } @@ -16925,21 +19565,27 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira IrInstruction *start_value = range->start->other; if (type_is_invalid(start_value->value.type)) return ira->codegen->builtin_types.entry_invalid; + IrInstruction *casted_start_value = ir_implicit_cast(ira, start_value, switch_type); + if (type_is_invalid(casted_start_value->value.type)) + return ira->codegen->builtin_types.entry_invalid; IrInstruction *end_value = range->end->other; if (type_is_invalid(end_value->value.type)) return ira->codegen->builtin_types.entry_invalid; + IrInstruction *casted_end_value = ir_implicit_cast(ira, end_value, switch_type); + if (type_is_invalid(casted_end_value->value.type)) + return ira->codegen->builtin_types.entry_invalid; - ConstExprValue *start_val = ir_resolve_const(ira, start_value, UndefBad); + ConstExprValue *start_val = ir_resolve_const(ira, casted_start_value, UndefBad); if (!start_val) return ira->codegen->builtin_types.entry_invalid; - ConstExprValue *end_val = ir_resolve_const(ira, end_value, UndefBad); + ConstExprValue *end_val = ir_resolve_const(ira, casted_end_value, UndefBad); if (!end_val) return ira->codegen->builtin_types.entry_invalid; - assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdNumLitInt); - assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdNumLitInt); + assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdComptimeInt); + assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdComptimeInt); AstNode *prev_node = rangeset_add_range(&rs, &start_val->data.x_bigint, &end_val->data.x_bigint, start_value->source_node); if (prev_node != nullptr) { @@ -16983,30 +19629,6 @@ static TypeTableEntry *ir_analyze_instruction_check_statement_is_void(IrAnalyze return ira->codegen->builtin_types.entry_void; } -static TypeTableEntry *ir_analyze_instruction_can_implicit_cast(IrAnalyze *ira, - IrInstructionCanImplicitCast *instruction) -{ - IrInstruction *type_value = instruction->type_value->other; - TypeTableEntry *type_entry = ir_resolve_type(ira, type_value); - if (type_is_invalid(type_entry)) - return ira->codegen->builtin_types.entry_invalid; - - IrInstruction *target_value = instruction->target_value->other; - if (type_is_invalid(target_value->value.type)) - return ira->codegen->builtin_types.entry_invalid; - - ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, type_entry, target_value->value.type, - target_value); - - if (result == ImplicitCastMatchResultReportedError) { - zig_panic("TODO refactor implicit cast tester to return bool without reporting errors"); - } - - ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_bool = (result == ImplicitCastMatchResultYes); - return ira->codegen->builtin_types.entry_bool; -} - static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructionPanic *instruction) { IrInstruction *msg = instruction->msg->other; if (type_is_invalid(msg->value.type)) @@ -17017,7 +19639,8 @@ static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructio return ir_unreach_error(ira); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type); IrInstruction *casted_msg = ir_implicit_cast(ira, msg, str_type); if (type_is_invalid(casted_msg->value.type)) @@ -17044,22 +19667,22 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 old_align_bytes = fn_type_id.alignment; fn_type_id.alignment = align_bytes; result_type = get_fn_type(ira->codegen, &fn_type_id); - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPointer) { TypeTableEntry *ptr_type = target_type->data.maybe.child_type; old_align_bytes = ptr_type->data.pointer.alignment; TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes); - result_type = get_maybe_type(ira->codegen, better_ptr_type); - } else if (target_type->id == TypeTableEntryIdMaybe && + result_type = get_optional_type(ira->codegen, better_ptr_type); + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdFn) { FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id; old_align_bytes = fn_type_id.alignment; fn_type_id.alignment = align_bytes; TypeTableEntry *fn_type = get_fn_type(ira->codegen, &fn_type_id); - result_type = get_maybe_type(ira->codegen, fn_type); + result_type = get_optional_type(ira->codegen, fn_type); } else if (is_slice(target_type)) { TypeTableEntry *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry; old_align_bytes = slice_ptr_type->data.pointer.alignment; @@ -17076,6 +19699,15 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 if (!val) return ira->codegen->invalid_instruction; + if (val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr && + val->data.x_ptr.data.hard_coded_addr.addr % align_bytes != 0) + { + ir_add_error(ira, target, + buf_sprintf("pointer address 0x%" ZIG_PRI_x64 " is not aligned to %" PRIu32 " bytes", + val->data.x_ptr.data.hard_coded_addr.addr, align_bytes)); + return ira->codegen->invalid_instruction; + } + IrInstruction *result = ir_create_const(&ira->new_irb, target->scope, target->source_node, result_type); copy_const_val(&result->value, val, false); result->value.type = result_type; @@ -17170,10 +19802,10 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: @@ -17210,7 +19842,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue return; case TypeTableEntryIdStruct: zig_panic("TODO buf_write_value_bytes struct type"); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: zig_panic("TODO buf_write_value_bytes maybe type"); case TypeTableEntryIdErrorUnion: zig_panic("TODO buf_write_value_bytes error union"); @@ -17237,10 +19869,10 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: @@ -17268,7 +19900,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue zig_panic("TODO buf_read_value_bytes array type"); case TypeTableEntryIdStruct: zig_panic("TODO buf_read_value_bytes struct type"); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: zig_panic("TODO buf_read_value_bytes maybe type"); case TypeTableEntryIdErrorUnion: zig_panic("TODO buf_read_value_bytes error union"); @@ -17296,7 +19928,12 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; ensure_complete_type(ira->codegen, dest_type); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + ensure_complete_type(ira->codegen, src_type); + if (type_is_invalid(src_type)) + return ira->codegen->builtin_types.entry_invalid; if (get_codegen_ptr_type(src_type) != nullptr) { ir_add_error(ira, value, @@ -17313,10 +19950,10 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -17339,10 +19976,10 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: - case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -17429,7 +20066,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira, Tld *tld = instruction->tld; LVal lval = instruction->lval; - resolve_top_level_decl(ira->codegen, tld, lval.is_ptr, instruction->base.source_node); + resolve_top_level_decl(ira->codegen, tld, lval == LValPtr, instruction->base.source_node); if (tld->resolution == TldResolutionInvalid) return ira->codegen->builtin_types.entry_invalid; @@ -17442,8 +20079,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira, TldVar *tld_var = (TldVar *)tld; VariableTableEntry *var = tld_var->var; - IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var, - !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile); + IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var); if (type_is_invalid(var_ptr->value.type)) return ira->codegen->builtin_types.entry_invalid; @@ -17451,7 +20087,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira, add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, instruction->base.source_node); } - if (lval.is_ptr) { + if (lval == LValPtr) { ir_link_new_instruction(var_ptr, &instruction->base); return var_ptr->value.type; } else { @@ -17472,7 +20108,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira, IrInstruction *ref_instruction = ir_create_const_fn(&ira->new_irb, instruction->base.scope, instruction->base.source_node, fn_entry); - if (lval.is_ptr) { + if (lval == LValPtr) { IrInstruction *ptr_instr = ir_get_ref(ira, &instruction->base, ref_instruction, true, false); ir_link_new_instruction(ptr_instr, &instruction->base); return ptr_instr->value.type; @@ -17498,13 +20134,16 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr return ira->codegen->builtin_types.entry_invalid; } + if (!type_has_bits(target->value.type)) { + ir_add_error(ira, target, + buf_sprintf("pointer to size 0 type has no address")); + return ira->codegen->builtin_types.entry_invalid; + } + if (instr_is_comptime(target)) { ConstExprValue *val = ir_resolve_const(ira, target, UndefBad); if (!val) return ira->codegen->builtin_types.entry_invalid; - if (target->value.type->id == TypeTableEntryIdMaybe) { - val = val->data.x_maybe; - } if (val->type->id == TypeTableEntryIdPointer && val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) { IrInstruction *result = ir_create_const(&ira->new_irb, instruction->base.scope, instruction->base.source_node, usize); @@ -17521,22 +20160,34 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr return usize; } -static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) { +static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) { TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other); if (type_is_invalid(child_type)) return ira->codegen->builtin_types.entry_invalid; + if (child_type->id == TypeTableEntryIdUnreachable) { + ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed")); + return ira->codegen->builtin_types.entry_invalid; + } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) { + ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque")); + return ira->codegen->builtin_types.entry_invalid; + } + uint32_t align_bytes; if (instruction->align_value != nullptr) { if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes)) return ira->codegen->builtin_types.entry_invalid; } else { + type_ensure_zero_bits_known(ira->codegen, child_type); + if (type_is_invalid(child_type)) + return ira->codegen->builtin_types.entry_invalid; align_bytes = get_abi_alignment(ira->codegen, child_type); } ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_type = get_pointer_to_type_extra(ira->codegen, child_type, - instruction->is_const, instruction->is_volatile, align_bytes, + instruction->is_const, instruction->is_volatile, + instruction->ptr_len, align_bytes, instruction->bit_offset_start, instruction->bit_offset_end - instruction->bit_offset_start); return ira->codegen->builtin_types.entry_type; @@ -17634,6 +20285,16 @@ static TypeTableEntry *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstruc ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_type = fn_type_id->param_info[arg_index].type; + if (out_val->data.x_type == nullptr) { + // Args are only unresolved if our function is generic. + assert(fn_type->data.fn.is_generic); + + ir_add_error(ira, arg_index_inst, + buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_u64 " because '%s' is generic", + arg_index, buf_ptr(&fn_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + return ira->codegen->builtin_types.entry_type; } @@ -17797,7 +20458,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstru instruction->base.source_node, coro_id, coro_handle); ir_link_new_instruction(result, &instruction->base); TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_maybe_type(ira->codegen, ptr_type); + result->value.type = get_optional_type(ira->codegen, ptr_type); return result->value.type; } @@ -17865,43 +20526,52 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, instruction->base.source_node, alloc_fn, coro_size); ir_link_new_instruction(result, &instruction->base); TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_maybe_type(ira->codegen, u8_ptr_type); + result->value.type = get_optional_type(ira->codegen, u8_ptr_type); return result->value.type; } -static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) { - TypeTableEntry *operand_type = ir_resolve_type(ira, instruction->operand_type->other); - if (type_is_invalid(operand_type)) { +static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) { + TypeTableEntry *operand_type = ir_resolve_type(ira, op); + if (type_is_invalid(operand_type)) return ira->codegen->builtin_types.entry_invalid; - } + if (operand_type->id == TypeTableEntryIdInt) { if (operand_type->data.integral.bit_count < 8) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, op, buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type", operand_type->data.integral.bit_count)); return ira->codegen->builtin_types.entry_invalid; } if (operand_type->data.integral.bit_count > ira->codegen->pointer_size_bytes * 8) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, op, buf_sprintf("expected integer type pointer size or smaller, found %" PRIu32 "-bit integer type", operand_type->data.integral.bit_count)); return ira->codegen->builtin_types.entry_invalid; } if (!is_power_of_2(operand_type->data.integral.bit_count)) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, op, buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count)); return ira->codegen->builtin_types.entry_invalid; } } else if (get_codegen_ptr_type(operand_type) == nullptr) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, op, buf_sprintf("expected integer or pointer type, found '%s'", buf_ptr(&operand_type->name))); return ira->codegen->builtin_types.entry_invalid; } + return operand_type; +} + +static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) { + TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->other); + if (type_is_invalid(operand_type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *ptr_inst = instruction->ptr->other; if (type_is_invalid(ptr_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; + // TODO let this be volatile TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false); IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type); if (type_is_invalid(casted_ptr->value.type)) @@ -17930,6 +20600,11 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr } else { if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) return ira->codegen->builtin_types.entry_invalid; + if (ordering == AtomicOrderUnordered) { + ir_add_error(ira, instruction->ordering, + buf_sprintf("@atomicRmw atomic ordering must not be Unordered")); + return ira->codegen->builtin_types.entry_invalid; + } } if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) @@ -17945,6 +20620,49 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstructionAtomicLoad *instruction) { + TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->other); + if (type_is_invalid(operand_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *ptr_inst = instruction->ptr->other; + if (type_is_invalid(ptr_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, true); + IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type); + if (type_is_invalid(casted_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + AtomicOrder ordering; + if (instruction->ordering == nullptr) { + ordering = instruction->resolved_ordering; + } else { + if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) + return ira->codegen->builtin_types.entry_invalid; + } + + if (ordering == AtomicOrderRelease || ordering == AtomicOrderAcqRel) { + assert(instruction->ordering != nullptr); + ir_add_error(ira, instruction->ordering, + buf_sprintf("@atomicLoad atomic ordering must not be Release or AcqRel")); + return ira->codegen->builtin_types.entry_invalid; + } + + if (instr_is_comptime(casted_ptr)) { + IrInstruction *result = ir_get_deref(ira, &instruction->base, casted_ptr); + ir_link_new_instruction(result, &instruction->base); + assert(result->value.type != nullptr); + return result->value.type; + } + + IrInstruction *result = ir_build_atomic_load(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr, casted_ptr, nullptr, ordering); + ir_link_new_instruction(result, &instruction->base); + result->value.type = operand_type; + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) { TypeTableEntry *promise_type = ir_resolve_type(ira, instruction->promise_type->other); if (type_is_invalid(promise_type)) @@ -18031,18 +20749,135 @@ static TypeTableEntry *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze * return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstructionSqrt *instruction) { + TypeTableEntry *float_type = ir_resolve_type(ira, instruction->type->other); + if (type_is_invalid(float_type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *op = instruction->op->other; + if (type_is_invalid(op->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + bool ok_type = float_type->id == TypeTableEntryIdComptimeFloat || float_type->id == TypeTableEntryIdFloat; + if (!ok_type) { + ir_add_error(ira, instruction->type, buf_sprintf("@sqrt does not support type '%s'", buf_ptr(&float_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *casted_op = ir_implicit_cast(ira, op, float_type); + if (type_is_invalid(casted_op->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (instr_is_comptime(casted_op)) { + ConstExprValue *val = ir_resolve_const(ira, casted_op, UndefBad); + if (!val) + return ira->codegen->builtin_types.entry_invalid; + + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + + if (float_type->id == TypeTableEntryIdComptimeFloat) { + bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat); + } else if (float_type->id == TypeTableEntryIdFloat) { + switch (float_type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_sqrt(val->data.x_f16); + break; + case 32: + out_val->data.x_f32 = sqrtf(val->data.x_f32); + break; + case 64: + out_val->data.x_f64 = sqrt(val->data.x_f64); + break; + case 128: + f128M_sqrt(&val->data.x_f128, &out_val->data.x_f128); + break; + default: + zig_unreachable(); + } + } else { + zig_unreachable(); + } + + return float_type; + } + + assert(float_type->id == TypeTableEntryIdFloat); + if (float_type->data.floating.bit_count != 16 && + float_type->data.floating.bit_count != 32 && + float_type->data.floating.bit_count != 64) { + ir_add_error(ira, instruction->type, buf_sprintf("compiler TODO: add implementation of sqrt for '%s'", buf_ptr(&float_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *result = ir_build_sqrt(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr, casted_op); + ir_link_new_instruction(result, &instruction->base); + result->value.type = float_type; + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInstructionEnumToInt *instruction) { + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (target->value.type->id != TypeTableEntryIdEnum) { + ir_add_error(ira, instruction->target, + buf_sprintf("expected enum, found type '%s'", buf_ptr(&target->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + type_ensure_zero_bits_known(ira->codegen, target->value.type); + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + TypeTableEntry *tag_type = target->value.type->data.enumeration.tag_int_type; + + IrInstruction *result = ir_analyze_enum_to_int(ira, &instruction->base, target, tag_type); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInstructionIntToEnum *instruction) { + IrInstruction *dest_type_value = instruction->dest_type->other; + TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + if (dest_type->id != TypeTableEntryIdEnum) { + ir_add_error(ira, instruction->dest_type, + buf_sprintf("expected enum, found type '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + type_ensure_zero_bits_known(ira->codegen, dest_type); + if (type_is_invalid(dest_type)) + return ira->codegen->builtin_types.entry_invalid; + + TypeTableEntry *tag_type = dest_type->data.enumeration.tag_int_type; + + IrInstruction *target = instruction->target->other; + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *casted_target = ir_implicit_cast(ira, target, tag_type); + if (type_is_invalid(casted_target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_analyze_int_to_enum(ira, &instruction->base, casted_target, dest_type); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: case IrInstructionIdWidenOrShorten: - case IrInstructionIdIntToEnum: - case IrInstructionIdIntToErr: - case IrInstructionIdErrToInt: case IrInstructionIdStructInit: case IrInstructionIdUnionInit: case IrInstructionIdStructFieldPtr: case IrInstructionIdUnionFieldPtr: - case IrInstructionIdMaybeWrap: + case IrInstructionIdOptionalWrap: case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdCast: @@ -18102,12 +20937,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction); case IrInstructionIdTestNonNull: return ir_analyze_instruction_test_non_null(ira, (IrInstructionTestNonNull *)instruction); - case IrInstructionIdUnwrapMaybe: - return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapOptional *)instruction); case IrInstructionIdClz: return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction); case IrInstructionIdCtz: return ir_analyze_instruction_ctz(ira, (IrInstructionCtz *)instruction); + case IrInstructionIdPopCount: + return ir_analyze_instruction_pop_count(ira, (IrInstructionPopCount *)instruction); case IrInstructionIdSwitchBr: return ir_analyze_instruction_switch_br(ira, (IrInstructionSwitchBr *)instruction); case IrInstructionIdSwitchTarget: @@ -18154,6 +20991,22 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_fence(ira, (IrInstructionFence *)instruction); case IrInstructionIdTruncate: return ir_analyze_instruction_truncate(ira, (IrInstructionTruncate *)instruction); + case IrInstructionIdIntCast: + return ir_analyze_instruction_int_cast(ira, (IrInstructionIntCast *)instruction); + case IrInstructionIdFloatCast: + return ir_analyze_instruction_float_cast(ira, (IrInstructionFloatCast *)instruction); + case IrInstructionIdErrSetCast: + return ir_analyze_instruction_err_set_cast(ira, (IrInstructionErrSetCast *)instruction); + case IrInstructionIdFromBytes: + return ir_analyze_instruction_from_bytes(ira, (IrInstructionFromBytes *)instruction); + case IrInstructionIdToBytes: + return ir_analyze_instruction_to_bytes(ira, (IrInstructionToBytes *)instruction); + case IrInstructionIdIntToFloat: + return ir_analyze_instruction_int_to_float(ira, (IrInstructionIntToFloat *)instruction); + case IrInstructionIdFloatToInt: + return ir_analyze_instruction_float_to_int(ira, (IrInstructionFloatToInt *)instruction); + case IrInstructionIdBoolToInt: + return ir_analyze_instruction_bool_to_int(ira, (IrInstructionBoolToInt *)instruction); case IrInstructionIdIntType: return ir_analyze_instruction_int_type(ira, (IrInstructionIntType *)instruction); case IrInstructionIdBoolNot: @@ -18176,6 +21029,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction); case IrInstructionIdFrameAddress: return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction); + case IrInstructionIdHandle: + return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction); case IrInstructionIdAlignOf: return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction); case IrInstructionIdOverflowOp: @@ -18194,8 +21049,6 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_check_switch_prongs(ira, (IrInstructionCheckSwitchProngs *)instruction); case IrInstructionIdCheckStatementIsVoid: return ir_analyze_instruction_check_statement_is_void(ira, (IrInstructionCheckStatementIsVoid *)instruction); - case IrInstructionIdCanImplicitCast: - return ir_analyze_instruction_can_implicit_cast(ira, (IrInstructionCanImplicitCast *)instruction); case IrInstructionIdDeclRef: return ir_analyze_instruction_decl_ref(ira, (IrInstructionDeclRef *)instruction); case IrInstructionIdPanic: @@ -18214,12 +21067,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_field_parent_ptr(ira, (IrInstructionFieldParentPtr *)instruction); case IrInstructionIdOffsetOf: return ir_analyze_instruction_offset_of(ira, (IrInstructionOffsetOf *)instruction); + case IrInstructionIdTypeInfo: + return ir_analyze_instruction_type_info(ira, (IrInstructionTypeInfo *) instruction); case IrInstructionIdTypeId: return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction); case IrInstructionIdSetEvalBranchQuota: return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction); - case IrInstructionIdPtrTypeOf: - return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction); + case IrInstructionIdPtrType: + return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction); case IrInstructionIdAlignCast: return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction); case IrInstructionIdOpaqueType: @@ -18266,6 +21121,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); + case IrInstructionIdAtomicLoad: + return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction); case IrInstructionIdPromiseResultType: return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction); case IrInstructionIdAwaitBookkeeping: @@ -18278,6 +21135,16 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction); + case IrInstructionIdSqrt: + return ir_analyze_instruction_sqrt(ira, (IrInstructionSqrt *)instruction); + case IrInstructionIdIntToErr: + return ir_analyze_instruction_int_to_err(ira, (IrInstructionIntToErr *)instruction); + case IrInstructionIdErrToInt: + return ir_analyze_instruction_err_to_int(ira, (IrInstructionErrToInt *)instruction); + case IrInstructionIdIntToEnum: + return ir_analyze_instruction_int_to_enum(ira, (IrInstructionIntToEnum *)instruction); + case IrInstructionIdEnumToInt: + return ir_analyze_instruction_enum_to_int(ira, (IrInstructionEnumToInt *)instruction); } zig_unreachable(); } @@ -18285,6 +21152,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction) { TypeTableEntry *instruction_type = ir_analyze_instruction_nocast(ira, instruction); instruction->value.type = instruction_type; + if (instruction->other) { instruction->other->value.type = instruction_type; } else { @@ -18354,7 +21222,7 @@ TypeTableEntry *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutabl } else if (ira->src_implicit_return_type_list.length == 0) { return codegen->builtin_types.entry_unreachable; } else { - return ir_resolve_peer_types(ira, expected_type_source_node, ira->src_implicit_return_type_list.items, + return ir_resolve_peer_types(ira, expected_type_source_node, expected_type, ira->src_implicit_return_type_list.items, ira->src_implicit_return_type_list.length); } } @@ -18391,7 +21259,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCheckStatementIsVoid: case IrInstructionIdPanic: case IrInstructionIdSetEvalBranchQuota: - case IrInstructionIdPtrTypeOf: + case IrInstructionIdPtrType: case IrInstructionIdSetAlignStack: case IrInstructionIdExport: case IrInstructionIdCancel: @@ -18407,6 +21275,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdAddImplicitReturnType: case IrInstructionIdMergeErrRetTraces: case IrInstructionIdMarkErrRetTracePtr: + case IrInstructionIdAtomicRmw: return true; case IrInstructionIdPhi: @@ -18433,9 +21302,10 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSliceType: case IrInstructionIdSizeOf: case IrInstructionIdTestNonNull: - case IrInstructionIdUnwrapMaybe: + case IrInstructionIdUnwrapOptional: case IrInstructionIdClz: case IrInstructionIdCtz: + case IrInstructionIdPopCount: case IrInstructionIdSwitchVar: case IrInstructionIdSwitchTarget: case IrInstructionIdUnionTag: @@ -18453,9 +21323,10 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdAlignOf: case IrInstructionIdReturnAddress: case IrInstructionIdFrameAddress: + case IrInstructionIdHandle: case IrInstructionIdTestErr: case IrInstructionIdUnwrapErrCode: - case IrInstructionIdMaybeWrap: + case IrInstructionIdOptionalWrap: case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdFnProto: @@ -18468,13 +21339,13 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdIntToEnum: case IrInstructionIdIntToErr: case IrInstructionIdErrToInt: - case IrInstructionIdCanImplicitCast: case IrInstructionIdDeclRef: case IrInstructionIdErrName: case IrInstructionIdTypeName: case IrInstructionIdTagName: case IrInstructionIdFieldParentPtr: case IrInstructionIdOffsetOf: + case IrInstructionIdTypeInfo: case IrInstructionIdTypeId: case IrInstructionIdAlignCast: case IrInstructionIdOpaqueType: @@ -18487,9 +21358,19 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroSize: case IrInstructionIdCoroSuspend: case IrInstructionIdCoroFree: - case IrInstructionIdAtomicRmw: case IrInstructionIdCoroPromise: case IrInstructionIdPromiseResultType: + case IrInstructionIdSqrt: + case IrInstructionIdAtomicLoad: + case IrInstructionIdIntCast: + case IrInstructionIdFloatCast: + case IrInstructionIdErrSetCast: + case IrInstructionIdIntToFloat: + case IrInstructionIdFloatToInt: + case IrInstructionIdBoolToInt: + case IrInstructionIdFromBytes: + case IrInstructionIdToBytes: + case IrInstructionIdEnumToInt: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 99f79ff75e..77c7ef47b6 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -45,6 +45,10 @@ static void ir_print_var_instruction(IrPrint *irp, IrInstruction *instruction) { } static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) { + if (instruction == nullptr) { + fprintf(irp->f, "(null)"); + return; + } if (instruction->value.special != ConstValSpecialRuntime) { ir_print_const_value(irp, &instruction->value); } else { @@ -148,7 +152,7 @@ static const char *ir_un_op_id_str(IrUnOp op_id) { return "-%"; case IrUnOpDereference: return "*"; - case IrUnOpMaybe: + case IrUnOpOptional: return "?"; } zig_unreachable(); @@ -358,9 +362,18 @@ static void ir_print_ptr_type_child(IrPrint *irp, IrInstructionPtrTypeChild *ins } static void ir_print_field_ptr(IrPrint *irp, IrInstructionFieldPtr *instruction) { - fprintf(irp->f, "fieldptr "); - ir_print_other_instruction(irp, instruction->container_ptr); - fprintf(irp->f, ".%s", buf_ptr(instruction->field_name)); + if (instruction->field_name_buffer) { + fprintf(irp->f, "fieldptr "); + ir_print_other_instruction(irp, instruction->container_ptr); + fprintf(irp->f, ".%s", buf_ptr(instruction->field_name_buffer)); + } else { + assert(instruction->field_name_expr); + fprintf(irp->f, "@field("); + ir_print_other_instruction(irp, instruction->container_ptr); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->field_name_expr); + fprintf(irp->f, ")"); + } } static void ir_print_struct_field_ptr(IrPrint *irp, IrInstructionStructFieldPtr *instruction) { @@ -472,7 +485,7 @@ static void ir_print_test_null(IrPrint *irp, IrInstructionTestNonNull *instructi fprintf(irp->f, " != null"); } -static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapMaybe *instruction) { +static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapOptional *instruction) { fprintf(irp->f, "&??*"); ir_print_other_instruction(irp, instruction->value); if (!instruction->safety_check_on) { @@ -492,6 +505,12 @@ static void ir_print_ctz(IrPrint *irp, IrInstructionCtz *instruction) { fprintf(irp->f, ")"); } +static void ir_print_pop_count(IrPrint *irp, IrInstructionPopCount *instruction) { + fprintf(irp->f, "@popCount("); + ir_print_other_instruction(irp, instruction->value); + fprintf(irp->f, ")"); +} + static void ir_print_switch_br(IrPrint *irp, IrInstructionSwitchBr *instruction) { fprintf(irp->f, "switch ("); ir_print_other_instruction(irp, instruction->target_value); @@ -639,6 +658,66 @@ static void ir_print_truncate(IrPrint *irp, IrInstructionTruncate *instruction) fprintf(irp->f, ")"); } +static void ir_print_int_cast(IrPrint *irp, IrInstructionIntCast *instruction) { + fprintf(irp->f, "@intCast("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_float_cast(IrPrint *irp, IrInstructionFloatCast *instruction) { + fprintf(irp->f, "@floatCast("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_err_set_cast(IrPrint *irp, IrInstructionErrSetCast *instruction) { + fprintf(irp->f, "@errSetCast("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_from_bytes(IrPrint *irp, IrInstructionFromBytes *instruction) { + fprintf(irp->f, "@bytesToSlice("); + ir_print_other_instruction(irp, instruction->dest_child_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_to_bytes(IrPrint *irp, IrInstructionToBytes *instruction) { + fprintf(irp->f, "@sliceToBytes("); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_int_to_float(IrPrint *irp, IrInstructionIntToFloat *instruction) { + fprintf(irp->f, "@intToFloat("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_float_to_int(IrPrint *irp, IrInstructionFloatToInt *instruction) { + fprintf(irp->f, "@floatToInt("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_bool_to_int(IrPrint *irp, IrInstructionBoolToInt *instruction) { + fprintf(irp->f, "@boolToInt("); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + static void ir_print_int_type(IrPrint *irp, IrInstructionIntType *instruction) { fprintf(irp->f, "@IntType("); ir_print_other_instruction(irp, instruction->is_signed); @@ -712,6 +791,10 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst fprintf(irp->f, "@frameAddress()"); } +static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) { + fprintf(irp->f, "@handle()"); +} + static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) { fprintf(irp->f, "@returnAddress()"); } @@ -768,7 +851,7 @@ static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayl } } -static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionMaybeWrap *instruction) { +static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) { fprintf(irp->f, "@maybeWrap("); ir_print_other_instruction(irp, instruction->value); fprintf(irp->f, ")"); @@ -859,6 +942,17 @@ static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction static void ir_print_int_to_enum(IrPrint *irp, IrInstructionIntToEnum *instruction) { fprintf(irp->f, "@intToEnum("); + if (instruction->dest_type == nullptr) { + fprintf(irp->f, "(null)"); + } else { + ir_print_other_instruction(irp, instruction->dest_type); + } + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_enum_to_int(IrPrint *irp, IrInstructionEnumToInt *instruction) { + fprintf(irp->f, "@enumToInt("); ir_print_other_instruction(irp, instruction->target); fprintf(irp->f, ")"); } @@ -904,15 +998,7 @@ static void ir_print_tag_name(IrPrint *irp, IrInstructionTagName *instruction) { ir_print_other_instruction(irp, instruction->target); } -static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCast *instruction) { - fprintf(irp->f, "@canImplicitCast("); - ir_print_other_instruction(irp, instruction->type_value); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->target_value); - fprintf(irp->f, ")"); -} - -static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) { +static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) { fprintf(irp->f, "&"); if (instruction->align_value != nullptr) { fprintf(irp->f, "align("); @@ -927,10 +1013,8 @@ static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instructi } static void ir_print_decl_ref(IrPrint *irp, IrInstructionDeclRef *instruction) { - const char *ptr_str = instruction->lval.is_ptr ? "ptr " : ""; - const char *const_str = instruction->lval.is_const ? "const " : ""; - const char *volatile_str = instruction->lval.is_volatile ? "volatile " : ""; - fprintf(irp->f, "declref %s%s%s%s", const_str, volatile_str, ptr_str, buf_ptr(instruction->tld->name)); + const char *ptr_str = (instruction->lval == LValPtr) ? "ptr " : ""; + fprintf(irp->f, "declref %s%s", ptr_str, buf_ptr(instruction->tld->name)); } static void ir_print_panic(IrPrint *irp, IrInstructionPanic *instruction) { @@ -957,6 +1041,12 @@ static void ir_print_offset_of(IrPrint *irp, IrInstructionOffsetOf *instruction) fprintf(irp->f, ")"); } +static void ir_print_type_info(IrPrint *irp, IrInstructionTypeInfo *instruction) { + fprintf(irp->f, "@typeInfo("); + ir_print_other_instruction(irp, instruction->type_value); + fprintf(irp->f, ")"); +} + static void ir_print_type_id(IrPrint *irp, IrInstructionTypeId *instruction) { fprintf(irp->f, "@typeId("); ir_print_other_instruction(irp, instruction->type_value); @@ -1025,7 +1115,7 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) { static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) { fprintf(irp->f, "@errorReturnTrace("); - switch (instruction->nullable) { + switch (instruction->optional) { case IrInstructionErrorReturnTrace::Null: fprintf(irp->f, "Null"); break; @@ -1172,6 +1262,24 @@ static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instructio fprintf(irp->f, ")"); } +static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruction) { + fprintf(irp->f, "@atomicLoad("); + if (instruction->operand_type != nullptr) { + ir_print_other_instruction(irp, instruction->operand_type); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->ptr); + fprintf(irp->f, ","); + if (instruction->ordering != nullptr) { + ir_print_other_instruction(irp, instruction->ordering); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ")"); +} + static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) { fprintf(irp->f, "@awaitBookkeeping("); ir_print_other_instruction(irp, instruction->promise_result_type); @@ -1204,6 +1312,18 @@ static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRe fprintf(irp->f, ")"); } +static void ir_print_sqrt(IrPrint *irp, IrInstructionSqrt *instruction) { + fprintf(irp->f, "@sqrt("); + if (instruction->type != nullptr) { + ir_print_other_instruction(irp, instruction->type); + } else { + fprintf(irp->f, "null"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->op); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1311,12 +1431,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTestNonNull: ir_print_test_null(irp, (IrInstructionTestNonNull *)instruction); break; - case IrInstructionIdUnwrapMaybe: - ir_print_unwrap_maybe(irp, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + ir_print_unwrap_maybe(irp, (IrInstructionUnwrapOptional *)instruction); break; case IrInstructionIdCtz: ir_print_ctz(irp, (IrInstructionCtz *)instruction); break; + case IrInstructionIdPopCount: + ir_print_pop_count(irp, (IrInstructionPopCount *)instruction); + break; case IrInstructionIdClz: ir_print_clz(irp, (IrInstructionClz *)instruction); break; @@ -1380,6 +1503,30 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTruncate: ir_print_truncate(irp, (IrInstructionTruncate *)instruction); break; + case IrInstructionIdIntCast: + ir_print_int_cast(irp, (IrInstructionIntCast *)instruction); + break; + case IrInstructionIdFloatCast: + ir_print_float_cast(irp, (IrInstructionFloatCast *)instruction); + break; + case IrInstructionIdErrSetCast: + ir_print_err_set_cast(irp, (IrInstructionErrSetCast *)instruction); + break; + case IrInstructionIdFromBytes: + ir_print_from_bytes(irp, (IrInstructionFromBytes *)instruction); + break; + case IrInstructionIdToBytes: + ir_print_to_bytes(irp, (IrInstructionToBytes *)instruction); + break; + case IrInstructionIdIntToFloat: + ir_print_int_to_float(irp, (IrInstructionIntToFloat *)instruction); + break; + case IrInstructionIdFloatToInt: + ir_print_float_to_int(irp, (IrInstructionFloatToInt *)instruction); + break; + case IrInstructionIdBoolToInt: + ir_print_bool_to_int(irp, (IrInstructionBoolToInt *)instruction); + break; case IrInstructionIdIntType: ir_print_int_type(irp, (IrInstructionIntType *)instruction); break; @@ -1413,6 +1560,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdFrameAddress: ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction); break; + case IrInstructionIdHandle: + ir_print_handle(irp, (IrInstructionHandle *)instruction); + break; case IrInstructionIdAlignOf: ir_print_align_of(irp, (IrInstructionAlignOf *)instruction); break; @@ -1428,8 +1578,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdUnwrapErrPayload: ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction); break; - case IrInstructionIdMaybeWrap: - ir_print_maybe_wrap(irp, (IrInstructionMaybeWrap *)instruction); + case IrInstructionIdOptionalWrap: + ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction); break; case IrInstructionIdErrWrapCode: ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction); @@ -1479,11 +1629,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTagName: ir_print_tag_name(irp, (IrInstructionTagName *)instruction); break; - case IrInstructionIdCanImplicitCast: - ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction); - break; - case IrInstructionIdPtrTypeOf: - ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction); + case IrInstructionIdPtrType: + ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction); break; case IrInstructionIdDeclRef: ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction); @@ -1497,6 +1644,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdOffsetOf: ir_print_offset_of(irp, (IrInstructionOffsetOf *)instruction); break; + case IrInstructionIdTypeInfo: + ir_print_type_info(irp, (IrInstructionTypeInfo *)instruction); + break; case IrInstructionIdTypeId: ir_print_type_id(irp, (IrInstructionTypeId *)instruction); break; @@ -1590,6 +1740,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdMarkErrRetTracePtr: ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction); break; + case IrInstructionIdSqrt: + ir_print_sqrt(irp, (IrInstructionSqrt *)instruction); + break; + case IrInstructionIdAtomicLoad: + ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction); + break; + case IrInstructionIdEnumToInt: + ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/src/link.cpp b/src/link.cpp index 3c6e27e331..f65c072bac 100644 --- a/src/link.cpp +++ b/src/link.cpp @@ -208,7 +208,7 @@ static Buf *get_dynamic_linker_path(CodeGen *g) { static void construct_linker_job_elf(LinkJob *lj) { CodeGen *g = lj->codegen; - if (lj->link_in_crt) { + if (g->libc_link_lib != nullptr) { find_libc_lib_path(g); } @@ -217,6 +217,9 @@ static void construct_linker_job_elf(LinkJob *lj) { lj->args.append(g->linker_script); } + if (g->no_rosegment_workaround) { + lj->args.append("--no-rosegment"); + } lj->args.append("--gc-sections"); lj->args.append("-m"); @@ -322,10 +325,13 @@ static void construct_linker_job_elf(LinkJob *lj) { lj->args.append((const char *)buf_ptr(g->link_objects.at(i))); } - if (g->libc_link_lib == nullptr && (g->out_type == OutTypeExe || g->out_type == OutTypeLib)) { - Buf *builtin_o_path = build_o(g, "builtin"); - lj->args.append(buf_ptr(builtin_o_path)); + if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) { + if (g->libc_link_lib == nullptr) { + Buf *builtin_o_path = build_o(g, "builtin"); + lj->args.append(buf_ptr(builtin_o_path)); + } + // sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage Buf *compiler_rt_o_path = build_compiler_rt(g); lj->args.append(buf_ptr(compiler_rt_o_path)); } @@ -388,6 +394,19 @@ static void construct_linker_job_elf(LinkJob *lj) { } } +static void construct_linker_job_wasm(LinkJob *lj) { + CodeGen *g = lj->codegen; + + lj->args.append("--relocatable"); // So lld doesn't look for _start. + lj->args.append("-o"); + lj->args.append(buf_ptr(&lj->out_file)); + + // .o files + for (size_t i = 0; i < g->link_objects.length; i += 1) { + lj->args.append((const char *)buf_ptr(g->link_objects.at(i))); + } +} + //static bool is_target_cyg_mingw(const ZigTarget *target) { // return (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_Cygnus) || // (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_GNU); @@ -416,7 +435,7 @@ static bool zig_lld_link(ZigLLVM_ObjectFormatType oformat, const char **args, si static void construct_linker_job_coff(LinkJob *lj) { CodeGen *g = lj->codegen; - if (lj->link_in_crt) { + if (g->libc_link_lib != nullptr) { find_libc_lib_path(g); } @@ -538,7 +557,7 @@ static void construct_linker_job_coff(LinkJob *lj) { lj->args.append(buf_ptr(builtin_o_path)); } - // msvc compiler_rt is missing some stuff, so we still build it and rely on LinkOnce + // msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage Buf *compiler_rt_o_path = build_compiler_rt(g); lj->args.append(buf_ptr(compiler_rt_o_path)); } @@ -882,7 +901,7 @@ static void construct_linker_job_macho(LinkJob *lj) { if (strchr(buf_ptr(link_lib->name), '/') == nullptr) { Buf *arg = buf_sprintf("-l%s", buf_ptr(link_lib->name)); lj->args.append(buf_ptr(arg)); - } else { + } else { lj->args.append(buf_ptr(link_lib->name)); } } @@ -921,7 +940,7 @@ static void construct_linker_job(LinkJob *lj) { case ZigLLVM_MachO: return construct_linker_job_macho(lj); case ZigLLVM_Wasm: - zig_panic("TODO link wasm"); + return construct_linker_job_wasm(lj); } } diff --git a/src/main.cpp b/src/main.cpp index 35c7462f4b..5f96953f21 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -23,6 +23,7 @@ static int usage(const char *arg0) { " build-exe [source] create executable from source or object files\n" " build-lib [source] create library from source or object files\n" " build-obj [source] create object from source or assembly\n" + " builtin show the source code of that @import(\"builtin\")\n" " run [source] create executable and run immediately\n" " translate-c [source] convert c code to zig code\n" " targets list available compilation targets\n" @@ -33,7 +34,7 @@ static int usage(const char *arg0) { " --assembly [source] add assembly file to build\n" " --cache-dir [path] override the cache directory\n" " --color [auto|off|on] enable or disable colored error messages\n" - " --emit [filetype] emit a specific file format as compilation output\n" + " --emit [asm|bin|llvm-ir] emit a specific file format as compilation output\n" " --enable-timing-info print timing diagnostics\n" " --libc-include-dir [path] directory where libc stdlib.h resides\n" " --name [name] override output name\n" @@ -43,6 +44,7 @@ static int usage(const char *arg0) { " --pkg-end pop current pkg\n" " --release-fast build with optimizations on and safety off\n" " --release-safe build with optimizations on and safety on\n" + " --release-small build with size optimizations on and safety off\n" " --static output will be statically linked\n" " --strip exclude debug symbols\n" " --target-arch [name] specify target architecture\n" @@ -73,6 +75,7 @@ static int usage(const char *arg0) { " -L[dir] alias for --library-path\n" " -rdynamic add all symbols to the dynamic symbol table\n" " -rpath [path] add directory to the runtime library search path\n" + " --no-rosegment compromise security to workaround valgrind bug\n" " -mconsole (windows) --subsystem console to the linker\n" " -mwindows (windows) --subsystem windows to the linker\n" " -framework [name] (darwin) link against framework\n" @@ -212,6 +215,7 @@ static Buf *resolve_zig_lib_dir(void) { enum Cmd { CmdInvalid, CmdBuild, + CmdBuiltin, CmdRun, CmdTest, CmdVersion, @@ -323,6 +327,7 @@ int main(int argc, char **argv) { ZigList<const char *> test_exec_args = {0}; int comptime_args_end = 0; int runtime_args_start = argc; + bool no_rosegment_workaround = false; if (argc >= 2 && strcmp(argv[1], "build") == 0) { const char *zig_exe_path = arg0; @@ -482,6 +487,8 @@ int main(int argc, char **argv) { build_mode = BuildModeFastRelease; } else if (strcmp(arg, "--release-safe") == 0) { build_mode = BuildModeSafeRelease; + } else if (strcmp(arg, "--release-small") == 0) { + build_mode = BuildModeSmallRelease; } else if (strcmp(arg, "--strip") == 0) { strip = true; } else if (strcmp(arg, "--static") == 0) { @@ -504,6 +511,8 @@ int main(int argc, char **argv) { mconsole = true; } else if (strcmp(arg, "-rdynamic") == 0) { rdynamic = true; + } else if (strcmp(arg, "--no-rosegment") == 0) { + no_rosegment_workaround = true; } else if (strcmp(arg, "--each-lib-rpath") == 0) { each_lib_rpath = true; } else if (strcmp(arg, "--enable-timing-info") == 0) { @@ -657,6 +666,8 @@ int main(int argc, char **argv) { out_type = OutTypeExe; } else if (strcmp(arg, "targets") == 0) { cmd = CmdTargets; + } else if (strcmp(arg, "builtin") == 0) { + cmd = CmdBuiltin; } else { fprintf(stderr, "Unrecognized command: %s\n", arg); return usage(arg0); @@ -674,6 +685,7 @@ int main(int argc, char **argv) { return usage(arg0); } break; + case CmdBuiltin: case CmdVersion: case CmdZen: case CmdTargets: @@ -720,6 +732,16 @@ int main(int argc, char **argv) { } switch (cmd) { + case CmdBuiltin: { + Buf *zig_lib_dir_buf = resolve_zig_lib_dir(); + CodeGen *g = codegen_create(nullptr, target, out_type, build_mode, zig_lib_dir_buf); + Buf *builtin_source = codegen_generate_builtin_source(g); + if (fwrite(buf_ptr(builtin_source), 1, buf_len(builtin_source), stdout) != buf_len(builtin_source)) { + fprintf(stderr, "unable to write to stdout: %s\n", strerror(ferror(stdout))); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; + } case CmdRun: case CmdBuild: case CmdTranslateC: @@ -841,6 +863,7 @@ int main(int argc, char **argv) { codegen_set_windows_subsystem(g, mwindows, mconsole); codegen_set_rdynamic(g, rdynamic); + g->no_rosegment_workaround = no_rosegment_workaround; if (mmacosx_version_min && mios_version_min) { fprintf(stderr, "-mmacosx-version-min and -mios-version-min options not allowed together\n"); return EXIT_FAILURE; @@ -868,15 +891,19 @@ int main(int argc, char **argv) { add_package(g, cur_pkg, g->root_package); - if (cmd == CmdBuild || cmd == CmdRun) { - codegen_set_emit_file_type(g, emit_file_type); - + if (cmd == CmdBuild || cmd == CmdRun || cmd == CmdTest) { for (size_t i = 0; i < objects.length; i += 1) { codegen_add_object(g, buf_create_from_str(objects.at(i))); } for (size_t i = 0; i < asm_files.length; i += 1) { codegen_add_assembly(g, buf_create_from_str(asm_files.at(i))); } + } + + + if (cmd == CmdBuild || cmd == CmdRun) { + codegen_set_emit_file_type(g, emit_file_type); + codegen_build(g); codegen_link(g, out_file); if (timing_info) @@ -901,6 +928,8 @@ int main(int argc, char **argv) { codegen_print_timing_report(g, stdout); return EXIT_SUCCESS; } else if (cmd == CmdTest) { + codegen_set_emit_file_type(g, emit_file_type); + ZigTarget native; get_native_target(&native); diff --git a/src/os.cpp b/src/os.cpp index e0491b21de..91a591a7b6 100644 --- a/src/os.cpp +++ b/src/os.cpp @@ -26,7 +26,6 @@ #include <windows.h> #include <io.h> #include <fcntl.h> -#include "windows_com.hpp" typedef SSIZE_T ssize_t; #else @@ -225,6 +224,11 @@ void os_path_extname(Buf *full_path, Buf *out_basename, Buf *out_extname) { } void os_path_join(Buf *dirname, Buf *basename, Buf *out_full_path) { + if (buf_len(dirname) == 0) { + buf_init_from_buf(out_full_path, basename); + return; + } + buf_init_from_buf(out_full_path, dirname); uint8_t c = *(buf_ptr(out_full_path) + buf_len(out_full_path) - 1); if (!os_is_sep(c)) @@ -989,12 +993,29 @@ int os_self_exe_path(Buf *out_path) { } #elif defined(ZIG_OS_DARWIN) + // How long is the executable's path? uint32_t u32_len = 0; int ret1 = _NSGetExecutablePath(nullptr, &u32_len); assert(ret1 != 0); - buf_resize(out_path, u32_len); - int ret2 = _NSGetExecutablePath(buf_ptr(out_path), &u32_len); + + Buf *tmp = buf_alloc_fixed(u32_len); + + // Fill the executable path. + int ret2 = _NSGetExecutablePath(buf_ptr(tmp), &u32_len); assert(ret2 == 0); + + // According to libuv project, PATH_MAX*2 works around a libc bug where + // the resolved path is sometimes bigger than PATH_MAX. + buf_resize(out_path, PATH_MAX*2); + char *real_path = realpath(buf_ptr(tmp), buf_ptr(out_path)); + if (!real_path) { + buf_init_from_buf(out_path, tmp); + return 0; + } + + // Resize out_path for the correct length. + buf_resize(out_path, strlen(buf_ptr(out_path))); + return 0; #elif defined(ZIG_OS_LINUX) buf_resize(out_path, 256); @@ -1007,6 +1028,7 @@ int os_self_exe_path(Buf *out_path) { buf_resize(out_path, buf_len(out_path) * 2); continue; } + buf_resize(out_path, amt); return 0; } #endif @@ -1092,249 +1114,10 @@ void os_stderr_set_color(TermColor color) { #endif } -int os_find_windows_sdk(ZigWindowsSDK **out_sdk) { -#if defined(ZIG_OS_WINDOWS) - ZigWindowsSDK *result_sdk = allocate<ZigWindowsSDK>(1); - buf_resize(&result_sdk->path10, 0); - buf_resize(&result_sdk->path81, 0); - - HKEY key; - HRESULT rc; - rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key); - if (rc != ERROR_SUCCESS) { - return ErrorFileNotFound; - } - - { - DWORD tmp_buf_len = MAX_PATH; - buf_resize(&result_sdk->path10, tmp_buf_len); - rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path10), &tmp_buf_len); - if (rc == ERROR_FILE_NOT_FOUND) { - buf_resize(&result_sdk->path10, 0); - } else { - buf_resize(&result_sdk->path10, tmp_buf_len); - } - } - { - DWORD tmp_buf_len = MAX_PATH; - buf_resize(&result_sdk->path81, tmp_buf_len); - rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path81), &tmp_buf_len); - if (rc == ERROR_FILE_NOT_FOUND) { - buf_resize(&result_sdk->path81, 0); - } else { - buf_resize(&result_sdk->path81, tmp_buf_len); - } - } - - if (buf_len(&result_sdk->path10) != 0) { - Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\*", buf_ptr(&result_sdk->path10)); - - // enumerate files in sdk path looking for latest version - WIN32_FIND_DATA ffd; - HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd); - if (hFind == INVALID_HANDLE_VALUE) { - return ErrorFileNotFound; - } - int v0 = 0, v1 = 0, v2 = 0, v3 = 0; - bool found_version_dir = false; - for (;;) { - if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { - int c0 = 0, c1 = 0, c2 = 0, c3 = 0; - sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3); - if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) { - // Microsoft released 26624 as 10240 accidentally. - // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive - c2 = 26624; - } - if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) { - v0 = c0, v1 = c1, v2 = c2, v3 = c3; - buf_init_from_str(&result_sdk->version10, ffd.cFileName); - found_version_dir = true; - } - } - if (FindNextFile(hFind, &ffd) == 0) { - FindClose(hFind); - break; - } - } - if (!found_version_dir) { - buf_resize(&result_sdk->path10, 0); - } - } - - if (buf_len(&result_sdk->path81) != 0) { - Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\winv*", buf_ptr(&result_sdk->path81)); - - // enumerate files in sdk path looking for latest version - WIN32_FIND_DATA ffd; - HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd); - if (hFind == INVALID_HANDLE_VALUE) { - return ErrorFileNotFound; - } - int v0 = 0, v1 = 0; - bool found_version_dir = false; - for (;;) { - if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { - int c0 = 0, c1 = 0; - sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1); - if ((c0 > v0) || (c1 > v1)) { - v0 = c0, v1 = c1; - buf_init_from_str(&result_sdk->version81, ffd.cFileName); - found_version_dir = true; - } - } - if (FindNextFile(hFind, &ffd) == 0) { - FindClose(hFind); - break; - } - } - if (!found_version_dir) { - buf_resize(&result_sdk->path81, 0); - } - } - - *out_sdk = result_sdk; - return 0; -#else - return ErrorFileNotFound; -#endif -} - -int os_get_win32_vcruntime_path(Buf* output_buf, ZigLLVM_ArchType platform_type) { -#if defined(ZIG_OS_WINDOWS) - buf_resize(output_buf, 0); - //COM Smart Pointerse requires explicit scope - { - HRESULT rc; - rc = CoInitializeEx(NULL, COINIT_MULTITHREADED); - if (rc != S_OK) { - goto com_done; - } - - //This COM class is installed when a VS2017 - ISetupConfigurationPtr setup_config; - rc = setup_config.CreateInstance(__uuidof(SetupConfiguration)); - if (rc != S_OK) { - goto com_done; - } - - IEnumSetupInstancesPtr all_instances; - rc = setup_config->EnumInstances(&all_instances); - if (rc != S_OK) { - goto com_done; - } - - ISetupInstance* curr_instance; - ULONG found_inst; - while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) { - BSTR bstr_inst_path; - rc = curr_instance->GetInstallationPath(&bstr_inst_path); - if (rc != S_OK) { - goto com_done; - } - //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length - UINT bstr_path_len = *((UINT*)bstr_inst_path - 1); - ULONG tmp_path_len = bstr_path_len / 2 + 1; - char* conv_path = (char*)bstr_inst_path; - char *tmp_path = (char*)alloca(tmp_path_len); - memset(tmp_path, 0, tmp_path_len); - uint32_t c = 0; - for (uint32_t i = 0; i < bstr_path_len; i += 2) { - tmp_path[c] = conv_path[i]; - ++c; - assert(c != tmp_path_len); - } - - buf_append_str(output_buf, tmp_path); - buf_append_char(output_buf, '\\'); - - Buf* tmp_buf = buf_alloc(); - buf_append_buf(tmp_buf, output_buf); - buf_append_str(tmp_buf, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); - FILE* tools_file = fopen(buf_ptr(tmp_buf), "r"); - if (!tools_file) { - goto com_done; - } - memset(tmp_path, 0, tmp_path_len); - fgets(tmp_path, tmp_path_len, tools_file); - strtok(tmp_path, " \r\n"); - fclose(tools_file); - buf_appendf(output_buf, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path); - switch (platform_type) { - case ZigLLVM_x86: - buf_append_str(output_buf, "x86\\"); - break; - case ZigLLVM_x86_64: - buf_append_str(output_buf, "x64\\"); - break; - case ZigLLVM_arm: - buf_append_str(output_buf, "arm\\"); - break; - default: - zig_panic("Attemped to use vcruntime for non-supported platform."); - } - buf_resize(tmp_buf, 0); - buf_append_buf(tmp_buf, output_buf); - buf_append_str(tmp_buf, "vcruntime.lib"); - - if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) { - return 0; - } - } - } - -com_done:; - HKEY key; - HRESULT rc; - rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key); - if (rc != ERROR_SUCCESS) { - return ErrorFileNotFound; - } - - DWORD dw_type = 0; - DWORD cb_data = 0; - rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data); - if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) { - return ErrorFileNotFound; - } - - Buf* tmp_buf = buf_alloc_fixed(cb_data); - RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)buf_ptr(tmp_buf), &cb_data); - //RegQueryValueExA returns the length of the string INCLUDING the null terminator - buf_resize(tmp_buf, cb_data-1); - buf_append_str(tmp_buf, "VC\\Lib\\"); - switch (platform_type) { - case ZigLLVM_x86: - //x86 is in the root of the Lib folder - break; - case ZigLLVM_x86_64: - buf_append_str(tmp_buf, "amd64\\"); - break; - case ZigLLVM_arm: - buf_append_str(tmp_buf, "arm\\"); - break; - default: - zig_panic("Attemped to use vcruntime for non-supported platform."); - } - - buf_append_buf(output_buf, tmp_buf); - buf_append_str(tmp_buf, "vcruntime.lib"); - - if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) { - return 0; - } else { - buf_resize(output_buf, 0); - return ErrorFileNotFound; - } -#else - return ErrorFileNotFound; -#endif -} - int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchType platform_type) { #if defined(ZIG_OS_WINDOWS) buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", sdk->path10_ptr, sdk->version10_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); @@ -1366,7 +1149,7 @@ int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_Arch int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf* output_buf) { #if defined(ZIG_OS_WINDOWS) buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", sdk->path10_ptr, sdk->version10_ptr); if (GetFileAttributesA(buf_ptr(output_buf)) != INVALID_FILE_ATTRIBUTES) { return 0; } @@ -1383,7 +1166,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy #if defined(ZIG_OS_WINDOWS) { buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path10_ptr, sdk->version10_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); @@ -1406,7 +1189,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy } { buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path81), buf_ptr(&sdk->version81)); + buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path81_ptr, sdk->version81_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); diff --git a/src/os.hpp b/src/os.hpp index b94e98ec3d..cfe4e8f3a2 100644 --- a/src/os.hpp +++ b/src/os.hpp @@ -12,6 +12,7 @@ #include "buffer.hpp" #include "error.hpp" #include "zig_llvm.h" +#include "windows_sdk.h" #include <stdio.h> #include <inttypes.h> @@ -79,15 +80,6 @@ bool os_is_sep(uint8_t c); int os_self_exe_path(Buf *out_path); -struct ZigWindowsSDK { - Buf path10; - Buf version10; - Buf path81; - Buf version81; -}; - -int os_find_windows_sdk(ZigWindowsSDK **out_sdk); -int os_get_win32_vcruntime_path(Buf *output_buf, ZigLLVM_ArchType platform_type); int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf *output_buf); int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type); int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type); diff --git a/src/parser.cpp b/src/parser.cpp index 2bd94033cc..453ab7ce2c 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -648,12 +648,11 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m } /* -SuspendExpression(body) = "suspend" "|" Symbol "|" body +SuspendExpression(body) = "suspend" option( body ) */ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) { - size_t orig_token_index = *token_index; - Token *suspend_token = &pc->tokens->at(*token_index); + if (suspend_token->id == TokenIdKeywordSuspend) { *token_index += 1; } else if (mandatory) { @@ -663,23 +662,18 @@ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, b return nullptr; } - Token *bar_token = &pc->tokens->at(*token_index); - if (bar_token->id == TokenIdBinOr) { - *token_index += 1; + Token *lbrace = &pc->tokens->at(*token_index); + if (lbrace->id == TokenIdLBrace) { + AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token); + node->data.suspend.block = ast_parse_block(pc, token_index, true); + return node; } else if (mandatory) { - ast_expect_token(pc, suspend_token, TokenIdBinOr); + ast_expect_token(pc, lbrace, TokenIdLBrace); zig_unreachable(); } else { - *token_index = orig_token_index; + *token_index -= 1; return nullptr; } - - AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token); - node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index); - ast_eat_token(pc, token_index, TokenIdBinOr); - node->data.suspend.block = ast_parse_block(pc, token_index, true); - - return node; } /* @@ -1025,7 +1019,7 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index } /* -SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) +SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?") FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen) ArrayAccessExpression : token(LBracket) Expression token(RBracket) SliceExpression = "[" Expression ".." option(Expression) "]" @@ -1110,13 +1104,34 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, } else if (first_token->id == TokenIdDot) { *token_index += 1; - Token *name_token = ast_eat_token(pc, token_index, TokenIdSymbol); + Token *token = &pc->tokens->at(*token_index); - AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token); - node->data.field_access_expr.struct_expr = primary_expr; - node->data.field_access_expr.field_name = token_buf(name_token); + if (token->id == TokenIdSymbol) { + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token); + node->data.field_access_expr.struct_expr = primary_expr; + node->data.field_access_expr.field_name = token_buf(token); + + primary_expr = node; + } else if (token->id == TokenIdStar) { + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token); + node->data.ptr_deref_expr.target = primary_expr; + + primary_expr = node; + } else if (token->id == TokenIdQuestion) { + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token); + node->data.unwrap_optional.expr = primary_expr; + + primary_expr = node; + } else { + ast_invalid_token_error(pc, token); + } - primary_expr = node; } else { return primary_expr; } @@ -1129,24 +1144,21 @@ static PrefixOp tok_to_prefix_op(Token *token) { case TokenIdDash: return PrefixOpNegation; case TokenIdMinusPercent: return PrefixOpNegationWrap; case TokenIdTilde: return PrefixOpBinNot; - case TokenIdStar: return PrefixOpDereference; - case TokenIdMaybe: return PrefixOpMaybe; - case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe; - case TokenIdStarStar: return PrefixOpDereference; + case TokenIdQuestion: return PrefixOpOptional; + case TokenIdAmpersand: return PrefixOpAddrOf; default: return PrefixOpInvalid; } } -static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) { - Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand); - - AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok); +static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) { + AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok); + node->data.pointer_type.star_token = star_tok; Token *token = &pc->tokens->at(*token_index); if (token->id == TokenIdKeywordAlign) { *token_index += 1; ast_eat_token(pc, token_index, TokenIdLParen); - node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true); + node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true); token = &pc->tokens->at(*token_index); if (token->id == TokenIdColon) { @@ -1155,35 +1167,45 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) { ast_eat_token(pc, token_index, TokenIdColon); Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral); - node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok); - node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok); + node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok); + node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok); } ast_eat_token(pc, token_index, TokenIdRParen); token = &pc->tokens->at(*token_index); } if (token->id == TokenIdKeywordConst) { *token_index += 1; - node->data.addr_of_expr.is_const = true; + node->data.pointer_type.is_const = true; token = &pc->tokens->at(*token_index); } if (token->id == TokenIdKeywordVolatile) { *token_index += 1; - node->data.addr_of_expr.is_volatile = true; + node->data.pointer_type.is_volatile = true; } - node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true); + node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true); return node; } /* PrefixOpExpression = PrefixOp ErrorSetExpr | SuffixOpExpression -PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" +PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" */ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); - if (token->id == TokenIdAmpersand) { - return ast_parse_addr_of(pc, token_index); + if (token->id == TokenIdStar || token->id == TokenIdBracketStarBracket) { + *token_index += 1; + return ast_parse_pointer_type(pc, token_index, token); + } + if (token->id == TokenIdStarStar) { + *token_index += 1; + AstNode *child_node = ast_parse_pointer_type(pc, token_index, token); + child_node->column += 1; + AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token); + parent_node->data.pointer_type.star_token = token; + parent_node->data.pointer_type.op_expr = child_node; + return parent_node; } if (token->id == TokenIdKeywordTry) { return ast_parse_try_expr(pc, token_index); @@ -1200,22 +1222,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token); - AstNode *parent_node = node; - if (token->id == TokenIdStarStar) { - // pretend that we got 2 star tokens - - parent_node = ast_create_node(pc, NodeTypePrefixOpExpr, token); - parent_node->data.prefix_op_expr.primary_expr = node; - parent_node->data.prefix_op_expr.prefix_op = PrefixOpDereference; - - node->column += 1; - } AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true); node->data.prefix_op_expr.primary_expr = prefix_op_expr; node->data.prefix_op_expr.prefix_op = prefix_op; - return parent_node; + return node; } @@ -2270,8 +2282,8 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma } /* -UnwrapExpression : BoolOrExpression (UnwrapMaybe | UnwrapError) | BoolOrExpression -UnwrapMaybe : "??" BoolOrExpression +UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression +UnwrapOptional = "orelse" Expression UnwrapError = "catch" option("|" Symbol "|") Expression */ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -2281,14 +2293,14 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo Token *token = &pc->tokens->at(*token_index); - if (token->id == TokenIdDoubleQuestion) { + if (token->id == TokenIdKeywordOrElse) { *token_index += 1; AstNode *rhs = ast_parse_expression(pc, token_index, true); AstNode *node = ast_create_node(pc, NodeTypeBinOpExpr, token); node->data.bin_op_expr.op1 = lhs; - node->data.bin_op_expr.bin_op = BinOpTypeUnwrapMaybe; + node->data.bin_op_expr.bin_op = BinOpTypeUnwrapOptional; node->data.bin_op_expr.op2 = rhs; return node; @@ -2991,6 +3003,12 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeFieldAccessExpr: visit_field(&node->data.field_access_expr.struct_expr, visit, context); break; + case NodeTypePtrDeref: + visit_field(&node->data.ptr_deref_expr.target, visit, context); + break; + case NodeTypeUnwrapOptional: + visit_field(&node->data.unwrap_optional.expr, visit, context); + break; case NodeTypeUse: visit_field(&node->data.use.expr, visit, context); break; @@ -3093,9 +3111,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeErrorType: // none break; - case NodeTypeAddrOfExpr: - visit_field(&node->data.addr_of_expr.align_expr, visit, context); - visit_field(&node->data.addr_of_expr.op_expr, visit, context); + case NodeTypePointerType: + visit_field(&node->data.pointer_type.align_expr, visit, context); + visit_field(&node->data.pointer_type.op_expr, visit, context); break; case NodeTypeErrorSetDecl: visit_node_list(&node->data.err_set_decl.decls, visit, context); @@ -3110,7 +3128,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont visit_field(&node->data.await_expr.expr, visit, context); break; case NodeTypeSuspend: - visit_field(&node->data.suspend.promise_symbol, visit, context); visit_field(&node->data.suspend.block, visit, context); break; } diff --git a/src/target.cpp b/src/target.cpp index 5008b51a09..91d36c5109 100644 --- a/src/target.cpp +++ b/src/target.cpp @@ -597,12 +597,15 @@ void resolve_target_object_format(ZigTarget *target) { case ZigLLVM_tce: case ZigLLVM_tcele: case ZigLLVM_thumbeb: - case ZigLLVM_wasm32: - case ZigLLVM_wasm64: case ZigLLVM_xcore: target->oformat= ZigLLVM_ELF; return; + case ZigLLVM_wasm32: + case ZigLLVM_wasm64: + target->oformat = ZigLLVM_Wasm; + return; + case ZigLLVM_ppc: case ZigLLVM_ppc64: if (is_os_darwin(target)) { @@ -683,25 +686,46 @@ static int get_arch_pointer_bit_width(ZigLLVM_ArchType arch) { uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) { switch (target->os) { case OsFreestanding: - switch (id) { - case CIntTypeShort: - case CIntTypeUShort: - return 16; - case CIntTypeInt: - case CIntTypeUInt: - return 32; - case CIntTypeLong: - case CIntTypeULong: - return get_arch_pointer_bit_width(target->arch.arch); - case CIntTypeLongLong: - case CIntTypeULongLong: - return 64; - case CIntTypeCount: - zig_unreachable(); + switch (target->arch.arch) { + case ZigLLVM_msp430: + switch (id) { + case CIntTypeShort: + case CIntTypeUShort: + return 16; + case CIntTypeInt: + case CIntTypeUInt: + return 16; + case CIntTypeLong: + case CIntTypeULong: + return 32; + case CIntTypeLongLong: + case CIntTypeULongLong: + return 64; + case CIntTypeCount: + zig_unreachable(); + } + default: + switch (id) { + case CIntTypeShort: + case CIntTypeUShort: + return 16; + case CIntTypeInt: + case CIntTypeUInt: + return 32; + case CIntTypeLong: + case CIntTypeULong: + return get_arch_pointer_bit_width(target->arch.arch); + case CIntTypeLongLong: + case CIntTypeULongLong: + return 64; + case CIntTypeCount: + zig_unreachable(); + } } case OsLinux: case OsMacOSX: case OsZen: + case OsOpenBSD: switch (id) { case CIntTypeShort: case CIntTypeUShort: @@ -742,7 +766,6 @@ uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) { case OsKFreeBSD: case OsLv2: case OsNetBSD: - case OsOpenBSD: case OsSolaris: case OsHaiku: case OsMinix: @@ -896,3 +919,65 @@ bool target_can_exec(const ZigTarget *host_target, const ZigTarget *guest_target return false; } + +const char *arch_stack_pointer_register_name(const ArchType *arch) { + switch (arch->arch) { + case ZigLLVM_UnknownArch: + zig_unreachable(); + case ZigLLVM_x86: + return "sp"; + case ZigLLVM_x86_64: + return "rsp"; + + case ZigLLVM_aarch64: + case ZigLLVM_arm: + case ZigLLVM_thumb: + case ZigLLVM_aarch64_be: + case ZigLLVM_amdgcn: + case ZigLLVM_amdil: + case ZigLLVM_amdil64: + case ZigLLVM_armeb: + case ZigLLVM_arc: + case ZigLLVM_avr: + case ZigLLVM_bpfeb: + case ZigLLVM_bpfel: + case ZigLLVM_hexagon: + case ZigLLVM_lanai: + case ZigLLVM_hsail: + case ZigLLVM_hsail64: + case ZigLLVM_kalimba: + case ZigLLVM_le32: + case ZigLLVM_le64: + case ZigLLVM_mips: + case ZigLLVM_mips64: + case ZigLLVM_mips64el: + case ZigLLVM_mipsel: + case ZigLLVM_msp430: + case ZigLLVM_nios2: + case ZigLLVM_nvptx: + case ZigLLVM_nvptx64: + case ZigLLVM_ppc64le: + case ZigLLVM_r600: + case ZigLLVM_renderscript32: + case ZigLLVM_renderscript64: + case ZigLLVM_riscv32: + case ZigLLVM_riscv64: + case ZigLLVM_shave: + case ZigLLVM_sparc: + case ZigLLVM_sparcel: + case ZigLLVM_sparcv9: + case ZigLLVM_spir: + case ZigLLVM_spir64: + case ZigLLVM_systemz: + case ZigLLVM_tce: + case ZigLLVM_tcele: + case ZigLLVM_thumbeb: + case ZigLLVM_wasm32: + case ZigLLVM_wasm64: + case ZigLLVM_xcore: + case ZigLLVM_ppc: + case ZigLLVM_ppc64: + zig_panic("TODO populate this table with stack pointer register name for this CPU architecture"); + } + zig_unreachable(); +} diff --git a/src/target.hpp b/src/target.hpp index 614b0627d5..5a118f6d8d 100644 --- a/src/target.hpp +++ b/src/target.hpp @@ -77,6 +77,8 @@ size_t target_arch_count(void); const ArchType *get_target_arch(size_t index); void get_arch_name(char *out_str, const ArchType *arch); +const char *arch_stack_pointer_register_name(const ArchType *arch); + size_t target_vendor_count(void); ZigLLVM_VendorType get_target_vendor(size_t index); diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 365b35cdfd..1d3db5567a 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = { {"noalias", TokenIdKeywordNoAlias}, {"null", TokenIdKeywordNull}, {"or", TokenIdKeywordOr}, + {"orelse", TokenIdKeywordOrElse}, {"packed", TokenIdKeywordPacked}, {"promise", TokenIdKeywordPromise}, {"pub", TokenIdKeywordPub}, @@ -215,10 +216,11 @@ enum TokenizeState { TokenizeStateSawGreaterThanGreaterThan, TokenizeStateSawDot, TokenizeStateSawDotDot, - TokenizeStateSawQuestionMark, TokenizeStateSawAtSign, TokenizeStateCharCode, TokenizeStateError, + TokenizeStateLBracket, + TokenizeStateLBracketStar, }; @@ -355,12 +357,19 @@ static void end_float_token(Tokenize *t) { // Mask the sign bit to 0 since always non-negative lex const uint64_t exp_mask = 0xffffull << exp_shift; - if (shift >= 64) { + // must be special-cased to avoid undefined behavior on shift == 64 + if (shift == 128) { + f_bits.repr[0] = 0; + f_bits.repr[1] = sig_bits[0]; + } else if (shift == 0) { + f_bits.repr[0] = sig_bits[0]; + f_bits.repr[1] = sig_bits[1]; + } else if (shift >= 64) { f_bits.repr[0] = 0; f_bits.repr[1] = sig_bits[0] << (shift - 64); } else { f_bits.repr[0] = sig_bits[0] << shift; - f_bits.repr[1] = ((sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift))); + f_bits.repr[1] = (sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift)); } f_bits.repr[1] &= ~exp_mask; @@ -451,16 +460,21 @@ static const char* get_escape_shorthand(uint8_t c) { static void invalid_char_error(Tokenize *t, uint8_t c) { if (c == '\r') { tokenize_error(t, "invalid carriage return, only '\\n' line endings are supported"); - } else if (isprint(c)) { + return; + } + + const char *sh = get_escape_shorthand(c); + if (sh) { + tokenize_error(t, "invalid character: '%s'", sh); + return; + } + + if (isprint(c)) { tokenize_error(t, "invalid character: '%c'", c); - } else { - const char *sh = get_escape_shorthand(c); - if (sh) { - tokenize_error(t, "invalid character: '%s'", sh); - } else { - tokenize_error(t, "invalid character: '\\x%x'", c); - } + return; } + + tokenize_error(t, "invalid character: '\\x%02x'", c); } void tokenize(Buf *buf, Tokenization *out) { @@ -530,6 +544,10 @@ void tokenize(Buf *buf, Tokenization *out) { begin_token(&t, TokenIdComma); end_token(&t); break; + case '?': + begin_token(&t, TokenIdQuestion); + end_token(&t); + break; case '{': begin_token(&t, TokenIdLBrace); end_token(&t); @@ -539,8 +557,8 @@ void tokenize(Buf *buf, Tokenization *out) { end_token(&t); break; case '[': + t.state = TokenizeStateLBracket; begin_token(&t, TokenIdLBracket); - end_token(&t); break; case ']': begin_token(&t, TokenIdRBracket); @@ -622,33 +640,10 @@ void tokenize(Buf *buf, Tokenization *out) { begin_token(&t, TokenIdDot); t.state = TokenizeStateSawDot; break; - case '?': - begin_token(&t, TokenIdMaybe); - t.state = TokenizeStateSawQuestionMark; - break; default: invalid_char_error(&t, c); } break; - case TokenizeStateSawQuestionMark: - switch (c) { - case '?': - set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion); - end_token(&t); - t.state = TokenizeStateStart; - break; - case '=': - set_token_id(&t, t.cur_tok, TokenIdMaybeAssign); - end_token(&t); - t.state = TokenizeStateStart; - break; - default: - t.pos -= 1; - end_token(&t); - t.state = TokenizeStateStart; - continue; - } - break; case TokenizeStateSawDot: switch (c) { case '.': @@ -852,6 +847,30 @@ void tokenize(Buf *buf, Tokenization *out) { continue; } break; + case TokenizeStateLBracket: + switch (c) { + case '*': + t.state = TokenizeStateLBracketStar; + set_token_id(&t, t.cur_tok, TokenIdBracketStarBracket); + break; + default: + // reinterpret as just an lbracket + t.pos -= 1; + end_token(&t); + t.state = TokenizeStateStart; + continue; + } + break; + case TokenizeStateLBracketStar: + switch (c) { + case ']': + end_token(&t); + t.state = TokenizeStateStart; + break; + default: + invalid_char_error(&t, c); + } + break; case TokenizeStateSawPlusPercent: switch (c) { case '=': @@ -1459,7 +1478,6 @@ void tokenize(Buf *buf, Tokenization *out) { case TokenizeStateSawGreaterThan: case TokenizeStateSawGreaterThanGreaterThan: case TokenizeStateSawDot: - case TokenizeStateSawQuestionMark: case TokenizeStateSawAtSign: case TokenizeStateSawStarPercent: case TokenizeStateSawPlusPercent: @@ -1467,12 +1485,14 @@ void tokenize(Buf *buf, Tokenization *out) { case TokenizeStateLineString: case TokenizeStateLineStringEnd: case TokenizeStateSawBarBar: + case TokenizeStateLBracket: end_token(&t); break; case TokenizeStateSawDotDot: case TokenizeStateSawBackslash: case TokenizeStateLineStringContinue: case TokenizeStateLineStringContinueC: + case TokenizeStateLBracketStar: tokenize_error(&t, "unexpected EOF"); break; case TokenizeStateLineComment: @@ -1509,6 +1529,7 @@ const char * token_name(TokenId id) { case TokenIdBitShiftRight: return ">>"; case TokenIdBitShiftRightEq: return ">>="; case TokenIdBitXorEq: return "^="; + case TokenIdBracketStarBracket: return "[*]"; case TokenIdCharLiteral: return "CharLiteral"; case TokenIdCmpEq: return "=="; case TokenIdCmpGreaterOrEq: return ">="; @@ -1521,7 +1542,6 @@ const char * token_name(TokenId id) { case TokenIdDash: return "-"; case TokenIdDivEq: return "/="; case TokenIdDot: return "."; - case TokenIdDoubleQuestion: return "??"; case TokenIdEllipsis2: return ".."; case TokenIdEllipsis3: return "..."; case TokenIdEof: return "EOF"; @@ -1558,6 +1578,7 @@ const char * token_name(TokenId id) { case TokenIdKeywordNoAlias: return "noalias"; case TokenIdKeywordNull: return "null"; case TokenIdKeywordOr: return "or"; + case TokenIdKeywordOrElse: return "orelse"; case TokenIdKeywordPacked: return "packed"; case TokenIdKeywordPromise: return "promise"; case TokenIdKeywordPub: return "pub"; @@ -1580,8 +1601,7 @@ const char * token_name(TokenId id) { case TokenIdLBrace: return "{"; case TokenIdLBracket: return "["; case TokenIdLParen: return "("; - case TokenIdMaybe: return "?"; - case TokenIdMaybeAssign: return "?="; + case TokenIdQuestion: return "?"; case TokenIdMinusEq: return "-="; case TokenIdMinusPercent: return "-%"; case TokenIdMinusPercentEq: return "-%="; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index b719293704..75c7feb476 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -28,6 +28,7 @@ enum TokenId { TokenIdBitShiftRight, TokenIdBitShiftRightEq, TokenIdBitXorEq, + TokenIdBracketStarBracket, TokenIdCharLiteral, TokenIdCmpEq, TokenIdCmpGreaterOrEq, @@ -40,7 +41,6 @@ enum TokenId { TokenIdDash, TokenIdDivEq, TokenIdDot, - TokenIdDoubleQuestion, TokenIdEllipsis2, TokenIdEllipsis3, TokenIdEof, @@ -75,6 +75,7 @@ enum TokenId { TokenIdKeywordNoAlias, TokenIdKeywordNull, TokenIdKeywordOr, + TokenIdKeywordOrElse, TokenIdKeywordPacked, TokenIdKeywordPromise, TokenIdKeywordPub, @@ -99,8 +100,7 @@ enum TokenId { TokenIdLBrace, TokenIdLBracket, TokenIdLParen, - TokenIdMaybe, - TokenIdMaybeAssign, + TokenIdQuestion, TokenIdMinusEq, TokenIdMinusPercent, TokenIdMinusPercentEq, @@ -169,6 +169,8 @@ struct Token { TokenCharLit char_lit; } data; }; +// work around conflicting name Token which is also found in libclang +typedef Token ZigToken; struct Tokenization { ZigList<Token> *tokens; diff --git a/src/translate_c.cpp b/src/translate_c.cpp index 965a8963bd..735a671bcc 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -247,6 +247,12 @@ static AstNode *trans_create_node_field_access_str(Context *c, AstNode *containe return trans_create_node_field_access(c, container, buf_create_from_str(field_name)); } +static AstNode *trans_create_node_ptr_deref(Context *c, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypePtrDeref); + node->data.ptr_deref_expr.target = child_node; + return node; +} + static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *child_node) { AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr); node->data.prefix_op_expr.prefix_op = op; @@ -254,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch return node; } +static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional); + node->data.unwrap_optional.expr = child_node; + return node; +} + static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) { AstNode *node = trans_create_node(c, NodeTypeBinOpExpr); node->data.bin_op_expr.op1 = lhs_node; @@ -270,11 +282,21 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod node); } -static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) { - AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr); - node->data.addr_of_expr.is_const = is_const; - node->data.addr_of_expr.is_volatile = is_volatile; - node->data.addr_of_expr.op_expr = child_node; +static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node, PtrLen ptr_len) { + AstNode *node = trans_create_node(c, NodeTypePointerType); + node->data.pointer_type.star_token = allocate<ZigToken>(1); + node->data.pointer_type.star_token->id = (ptr_len == PtrLenSingle) ? TokenIdStar: TokenIdBracketStarBracket; + node->data.pointer_type.is_const = is_const; + node->data.pointer_type.is_const = is_const; + node->data.pointer_type.is_volatile = is_volatile; + node->data.pointer_type.op_expr = child_node; + return node; +} + +static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr); + node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf; + node->data.prefix_op_expr.primary_expr = child_node; return node; } @@ -366,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r fn_def->data.fn_def.fn_proto = fn_proto; fn_proto->data.fn_proto.fn_def_node = fn_def; - AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, ref_node); + AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node); AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr); fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node; @@ -393,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r return fn_def; } -static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) { - return trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, child); -} - static AstNode *get_global(Context *c, Buf *name) { { auto entry = c->global_table.maybe_get(name); @@ -409,7 +427,7 @@ static AstNode *get_global(Context *c, Buf *name) { if (entry) return entry->value; } - if (c->codegen->primitive_type_table.maybe_get(name) != nullptr) { + if (get_primitive_type(c->codegen, name) != nullptr) { return trans_create_node_symbol(c, name); } return nullptr; @@ -718,6 +736,30 @@ static bool qual_type_has_wrapping_overflow(Context *c, QualType qt) { } } +static bool type_is_opaque(Context *c, const Type *ty, const SourceLocation &source_loc) { + switch (ty->getTypeClass()) { + case Type::Builtin: { + const BuiltinType *builtin_ty = static_cast<const BuiltinType*>(ty); + return builtin_ty->getKind() == BuiltinType::Void; + } + case Type::Record: { + const RecordType *record_ty = static_cast<const RecordType*>(ty); + return record_ty->getDecl()->getDefinition() == nullptr; + } + case Type::Elaborated: { + const ElaboratedType *elaborated_ty = static_cast<const ElaboratedType*>(ty); + return type_is_opaque(c, elaborated_ty->getNamedType().getTypePtr(), source_loc); + } + case Type::Typedef: { + const TypedefType *typedef_ty = static_cast<const TypedefType*>(ty); + const TypedefNameDecl *typedef_decl = typedef_ty->getDecl(); + return type_is_opaque(c, typedef_decl->getUnderlyingType().getTypePtr(), source_loc); + } + default: + return false; + } +} + static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &source_loc) { switch (ty->getTypeClass()) { case Type::Builtin: @@ -839,12 +881,14 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou } if (qual_type_child_is_fn_proto(child_qt)) { - return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node); + return trans_create_node_prefix_op(c, PrefixOpOptional, child_node); } - AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(), - child_qt.isVolatileQualified(), child_node); - return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node); + PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown; + + AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), + child_qt.isVolatileQualified(), child_node, ptr_len); + return trans_create_node_prefix_op(c, PrefixOpOptional, pointer_node); } case Type::Typedef: { @@ -1027,8 +1071,8 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou emit_warning(c, source_loc, "unresolved array element type"); return nullptr; } - AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(), - child_qt.isVolatileQualified(), child_type_node); + AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), + child_qt.isVolatileQualified(), child_type_node, PtrLenUnknown); return pointer_node; } case Type::BlockPointer: @@ -1396,7 +1440,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result // const _ref = &lhs; AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue); if (lhs == nullptr) return nullptr; - AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs); + AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs); // TODO: avoid name collisions with generated variable names Buf* tmp_var_name = buf_create_from_str("_ref"); AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs); @@ -1412,8 +1456,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result AstNode *operation_type_cast = trans_c_cast(c, rhs_location, stmt->getComputationLHSType(), stmt->getLHS()->getType(), - trans_create_node_prefix_op(c, PrefixOpDereference, - trans_create_node_symbol(c, tmp_var_name))); + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name))); // result_type(... >> u5(rhs)) AstNode *result_type_cast = trans_c_cast(c, rhs_location, @@ -1426,7 +1469,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result // *_ref = ... AstNode *assign_statement = trans_create_node_bin_op(c, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)), BinOpTypeAssign, result_type_cast); @@ -1436,7 +1479,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result // break :x *_ref child_scope->node->data.block.statements.append( trans_create_node_break(c, label_name, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)))); } @@ -1471,7 +1514,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used, // const _ref = &lhs; AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue); if (lhs == nullptr) return nullptr; - AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs); + AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs); // TODO: avoid name collisions with generated variable names Buf* tmp_var_name = buf_create_from_str("_ref"); AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs); @@ -1483,11 +1526,11 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used, if (rhs == nullptr) return nullptr; AstNode *assign_statement = trans_create_node_bin_op(c, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)), BinOpTypeAssign, trans_create_node_bin_op(c, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)), bin_op, rhs)); @@ -1496,7 +1539,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used, // break :x *_ref child_scope->node->data.block.statements.append( trans_create_node_break(c, label_name, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)))); return child_scope->node; @@ -1808,7 +1851,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr // const _ref = &expr; AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue); if (expr == nullptr) return nullptr; - AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr); + AstNode *addr_of_expr = trans_create_node_addr_of(c, expr); // TODO: avoid name collisions with generated variable names Buf* ref_var_name = buf_create_from_str("_ref"); AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr); @@ -1817,13 +1860,13 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr // const _tmp = *_ref; Buf* tmp_var_name = buf_create_from_str("_tmp"); AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, ref_var_name))); child_scope->node->data.block.statements.append(tmp_var_decl); // *_ref += 1; AstNode *assign_statement = trans_create_node_bin_op(c, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, ref_var_name)), assign_op, trans_create_node_unsigned(c, 1)); @@ -1863,7 +1906,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra // const _ref = &expr; AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue); if (expr == nullptr) return nullptr; - AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr); + AstNode *addr_of_expr = trans_create_node_addr_of(c, expr); // TODO: avoid name collisions with generated variable names Buf* ref_var_name = buf_create_from_str("_ref"); AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr); @@ -1871,14 +1914,14 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra // *_ref += 1; AstNode *assign_statement = trans_create_node_bin_op(c, - trans_create_node_prefix_op(c, PrefixOpDereference, + trans_create_node_ptr_deref(c, trans_create_node_symbol(c, ref_var_name)), assign_op, trans_create_node_unsigned(c, 1)); child_scope->node->data.block.statements.append(assign_statement); // break :x *_ref - AstNode *deref_expr = trans_create_node_prefix_op(c, PrefixOpDereference, + AstNode *deref_expr = trans_create_node_ptr_deref(c, trans_create_node_symbol(c, ref_var_name)); child_scope->node->data.block.statements.append(trans_create_node_break(c, label_name, deref_expr)); @@ -1912,7 +1955,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue); if (value_node == nullptr) return value_node; - return trans_create_node_addr_of(c, false, false, value_node); + return trans_create_node_addr_of(c, value_node); } case UO_Deref: { @@ -1922,8 +1965,8 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType()); if (is_fn_ptr) return value_node; - AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, value_node); - return trans_create_node_prefix_op(c, PrefixOpDereference, unwrapped); + AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node); + return trans_create_node_ptr_deref(c, unwrapped); } case UO_Plus: emit_warning(c, stmt->getLocStart(), "TODO handle C translation UO_Plus"); @@ -2546,7 +2589,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope * } } if (callee_node == nullptr) { - callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, callee_raw_node); + callee_node = trans_create_node_unwrap_null(c, callee_raw_node); } } else { callee_node = callee_raw_node; @@ -2664,7 +2707,9 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt AstNode *child_statement; child_scope = trans_stmt(c, &child_block_scope->base, stmt->getBody(), &child_statement); if (child_scope == nullptr) return nullptr; - body_node->data.block.statements.append(child_statement); + if (child_statement != nullptr) { + body_node->data.block.statements.append(child_statement); + } } // if (!cond) break; @@ -2674,6 +2719,7 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt terminator_node->data.if_bool_expr.condition = trans_create_node_prefix_op(c, PrefixOpBoolNot, condition_node); terminator_node->data.if_bool_expr.then_block = trans_create_node(c, NodeTypeBreak); + assert(terminator_node != nullptr); body_node->data.block.statements.append(terminator_node); while_scope->node->data.while_expr.body = body_node; @@ -2737,7 +2783,12 @@ static AstNode *trans_for_loop(Context *c, TransScope *parent_scope, const ForSt TransScope *body_scope = trans_stmt(c, &while_scope->base, stmt->getBody(), &body_statement); if (body_scope == nullptr) return nullptr; - while_scope->node->data.while_expr.body = body_statement; + + if (body_statement == nullptr) { + while_scope->node->data.while_expr.body = trans_create_node(c, NodeTypeBlock); + } else { + while_scope->node->data.while_expr.body = body_statement; + } return loop_block_node; } @@ -2972,9 +3023,14 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt, trans_unary_operator(c, result_used, scope, (const UnaryOperator *)stmt)); case Stmt::DeclStmtClass: return trans_local_declaration(c, scope, (const DeclStmt *)stmt, out_node, out_child_scope); - case Stmt::WhileStmtClass: - return wrap_stmt(out_node, out_child_scope, scope, - trans_while_loop(c, scope, (const WhileStmt *)stmt)); + case Stmt::WhileStmtClass: { + AstNode *while_node = trans_while_loop(c, scope, (const WhileStmt *)stmt); + assert(while_node->type == NodeTypeWhileExpr); + if (while_node->data.while_expr.body == nullptr) { + while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock); + } + return wrap_stmt(out_node, out_child_scope, scope, while_node); + } case Stmt::IfStmtClass: return wrap_stmt(out_node, out_child_scope, scope, trans_if_statement(c, scope, (const IfStmt *)stmt)); @@ -2997,12 +3053,18 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt, case Stmt::UnaryExprOrTypeTraitExprClass: return wrap_stmt(out_node, out_child_scope, scope, trans_unary_expr_or_type_trait_expr(c, scope, (const UnaryExprOrTypeTraitExpr *)stmt)); - case Stmt::DoStmtClass: - return wrap_stmt(out_node, out_child_scope, scope, - trans_do_loop(c, scope, (const DoStmt *)stmt)); - case Stmt::ForStmtClass: - return wrap_stmt(out_node, out_child_scope, scope, - trans_for_loop(c, scope, (const ForStmt *)stmt)); + case Stmt::DoStmtClass: { + AstNode *while_node = trans_do_loop(c, scope, (const DoStmt *)stmt); + assert(while_node->type == NodeTypeWhileExpr); + if (while_node->data.while_expr.body == nullptr) { + while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock); + } + return wrap_stmt(out_node, out_child_scope, scope, while_node); + } + case Stmt::ForStmtClass: { + AstNode *node = trans_for_loop(c, scope, (const ForStmt *)stmt); + return wrap_stmt(out_node, out_child_scope, scope, node); + } case Stmt::StringLiteralClass: return wrap_stmt(out_node, out_child_scope, scope, trans_string_literal(c, scope, (const StringLiteral *)stmt)); @@ -3667,6 +3729,7 @@ static AstNode *resolve_typedef_decl(Context *c, const TypedefNameDecl *typedef_ if (existing_entry) { return existing_entry->value; } + QualType child_qt = typedef_decl->getUnderlyingType(); Buf *type_name = buf_create_from_str(decl_name(typedef_decl)); @@ -3700,16 +3763,19 @@ static AstNode *resolve_typedef_decl(Context *c, const TypedefNameDecl *typedef_ // use the name of this typedef // TODO + // trans_qual_type here might cause us to look at this typedef again so we put the item in the map first + AstNode *symbol_node = trans_create_node_symbol(c, type_name); + c->decl_table.put(typedef_decl->getCanonicalDecl(), symbol_node); + AstNode *type_node = trans_qual_type(c, child_qt, typedef_decl->getLocation()); if (type_node == nullptr) { emit_warning(c, typedef_decl->getLocation(), "typedef %s - unresolved child type", buf_ptr(type_name)); c->decl_table.put(typedef_decl, nullptr); + // TODO add global var with type_name equal to @compileError("unable to resolve C type") return nullptr; } add_global_var(c, type_name, type_node); - AstNode *symbol_node = trans_create_node_symbol(c, type_name); - c->decl_table.put(typedef_decl->getCanonicalDecl(), symbol_node); return symbol_node; } @@ -3744,6 +3810,7 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) { return demote_enum_to_opaque(c, enum_decl, full_type_name, bare_name); } + bool pure_enum = true; uint32_t field_count = 0; for (auto it = enum_def->enumerator_begin(), @@ -3755,84 +3822,53 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) { pure_enum = false; } } - AstNode *tag_int_type = trans_qual_type(c, enum_decl->getIntegerType(), enum_decl->getLocation()); assert(tag_int_type); - if (pure_enum) { - AstNode *enum_node = trans_create_node(c, NodeTypeContainerDecl); - enum_node->data.container_decl.kind = ContainerKindEnum; - enum_node->data.container_decl.layout = ContainerLayoutExtern; - // TODO only emit this tag type if the enum tag type is not the default. - // I don't know what the default is, need to figure out how clang is deciding. - // it appears to at least be different across gcc/msvc - if (!c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::UInt) && - !c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::Int)) - { - enum_node->data.container_decl.init_arg_expr = tag_int_type; - } - - enum_node->data.container_decl.fields.resize(field_count); - uint32_t i = 0; - for (auto it = enum_def->enumerator_begin(), - it_end = enum_def->enumerator_end(); - it != it_end; ++it, i += 1) - { - const EnumConstantDecl *enum_const = *it; - - Buf *enum_val_name = buf_create_from_str(decl_name(enum_const)); - Buf *field_name; - if (bare_name != nullptr && buf_starts_with_buf(enum_val_name, bare_name)) { - field_name = buf_slice(enum_val_name, buf_len(bare_name), buf_len(enum_val_name)); - } else { - field_name = enum_val_name; - } - - AstNode *field_node = trans_create_node(c, NodeTypeStructField); - field_node->data.struct_field.name = field_name; - field_node->data.struct_field.type = nullptr; - enum_node->data.container_decl.fields.items[i] = field_node; - - // in C each enum value is in the global namespace. so we put them there too. - // at this point we can rely on the enum emitting successfully - if (is_anonymous) { - AstNode *lit_node = trans_create_node_unsigned(c, i); - add_global_var(c, enum_val_name, lit_node); - } else { - AstNode *field_access_node = trans_create_node_field_access(c, - trans_create_node_symbol(c, full_type_name), field_name); - add_global_var(c, enum_val_name, field_access_node); - } - } - - if (is_anonymous) { - c->decl_table.put(enum_decl->getCanonicalDecl(), enum_node); - return enum_node; - } else { - AstNode *symbol_node = trans_create_node_symbol(c, full_type_name); - add_global_weak_alias(c, bare_name, full_type_name); - add_global_var(c, full_type_name, enum_node); - c->decl_table.put(enum_decl->getCanonicalDecl(), symbol_node); - return enum_node; - } + AstNode *enum_node = trans_create_node(c, NodeTypeContainerDecl); + enum_node->data.container_decl.kind = ContainerKindEnum; + enum_node->data.container_decl.layout = ContainerLayoutExtern; + // TODO only emit this tag type if the enum tag type is not the default. + // I don't know what the default is, need to figure out how clang is deciding. + // it appears to at least be different across gcc/msvc + if (!c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::UInt) && + !c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::Int)) + { + enum_node->data.container_decl.init_arg_expr = tag_int_type; } - - // TODO after issue #305 is solved, make this be an enum with tag_int_type - // as the integer type and set the custom enum values - AstNode *enum_node = tag_int_type; - - - // add variables for all the values with enum_node + enum_node->data.container_decl.fields.resize(field_count); + uint32_t i = 0; for (auto it = enum_def->enumerator_begin(), it_end = enum_def->enumerator_end(); - it != it_end; ++it) + it != it_end; ++it, i += 1) { const EnumConstantDecl *enum_const = *it; Buf *enum_val_name = buf_create_from_str(decl_name(enum_const)); - AstNode *int_node = trans_create_node_apint(c, enum_const->getInitVal()); - AstNode *var_node = add_global_var(c, enum_val_name, int_node); - var_node->data.variable_declaration.type = tag_int_type; + Buf *field_name; + if (bare_name != nullptr && buf_starts_with_buf(enum_val_name, bare_name)) { + field_name = buf_slice(enum_val_name, buf_len(bare_name), buf_len(enum_val_name)); + } else { + field_name = enum_val_name; + } + + AstNode *int_node = pure_enum && !is_anonymous ? nullptr : trans_create_node_apint(c, enum_const->getInitVal()); + AstNode *field_node = trans_create_node(c, NodeTypeStructField); + field_node->data.struct_field.name = field_name; + field_node->data.struct_field.type = nullptr; + field_node->data.struct_field.value = int_node; + enum_node->data.container_decl.fields.items[i] = field_node; + + // in C each enum value is in the global namespace. so we put them there too. + // at this point we can rely on the enum emitting successfully + if (is_anonymous) { + Buf *enum_val_name = buf_create_from_str(decl_name(enum_const)); + add_global_var(c, enum_val_name, int_node); + } else { + AstNode *field_access_node = trans_create_node_field_access(c, + trans_create_node_symbol(c, full_type_name), field_name); + add_global_var(c, enum_val_name, field_access_node); + } } if (is_anonymous) { @@ -3843,7 +3879,7 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) { add_global_weak_alias(c, bare_name, full_type_name); add_global_var(c, full_type_name, enum_node); c->decl_table.put(enum_decl->getCanonicalDecl(), symbol_node); - return symbol_node; + return enum_node; } } @@ -4286,7 +4322,7 @@ static AstNode *trans_lookup_ast_maybe_fn(Context *c, AstNode *ref_node) { return nullptr; if (prefix_node->type != NodeTypePrefixOpExpr) return nullptr; - if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpMaybe) + if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpOptional) return nullptr; AstNode *fn_proto_node = prefix_node->data.prefix_op_expr.primary_expr; @@ -4462,34 +4498,52 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t } else if (first_tok->id == CTokIdAsterisk) { *tok_i += 1; - node = trans_create_node_addr_of(c, false, false, node); + node = trans_create_node_ptr_type(c, false, false, node, PtrLenUnknown); } else { return node; } } } -static PrefixOp ctok_to_prefix_op(CTok *token) { - switch (token->id) { - case CTokIdBang: return PrefixOpBoolNot; - case CTokIdMinus: return PrefixOpNegation; - case CTokIdTilde: return PrefixOpBinNot; - case CTokIdAsterisk: return PrefixOpDereference; - default: return PrefixOpInvalid; - } -} static AstNode *parse_ctok_prefix_op_expr(Context *c, CTokenize *ctok, size_t *tok_i) { CTok *op_tok = &ctok->tokens.at(*tok_i); - PrefixOp prefix_op = ctok_to_prefix_op(op_tok); - if (prefix_op == PrefixOpInvalid) { - return parse_ctok_suffix_op_expr(c, ctok, tok_i); - } - *tok_i += 1; - AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i); - if (prefix_op_expr == nullptr) - return nullptr; - return trans_create_node_prefix_op(c, prefix_op, prefix_op_expr); + switch (op_tok->id) { + case CTokIdBang: + { + *tok_i += 1; + AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i); + if (prefix_op_expr == nullptr) + return nullptr; + return trans_create_node_prefix_op(c, PrefixOpBoolNot, prefix_op_expr); + } + case CTokIdMinus: + { + *tok_i += 1; + AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i); + if (prefix_op_expr == nullptr) + return nullptr; + return trans_create_node_prefix_op(c, PrefixOpNegation, prefix_op_expr); + } + case CTokIdTilde: + { + *tok_i += 1; + AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i); + if (prefix_op_expr == nullptr) + return nullptr; + return trans_create_node_prefix_op(c, PrefixOpBinNot, prefix_op_expr); + } + case CTokIdAsterisk: + { + *tok_i += 1; + AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i); + if (prefix_op_expr == nullptr) + return nullptr; + return trans_create_node_ptr_deref(c, prefix_op_expr); + } + default: + return parse_ctok_suffix_op_expr(c, ctok, tok_i); + } } static void process_macro(Context *c, CTokenize *ctok, Buf *name, const char *char_ptr) { diff --git a/src/util.hpp b/src/util.hpp index ae33cb84af..b0402137bd 100644 --- a/src/util.hpp +++ b/src/util.hpp @@ -31,6 +31,8 @@ #endif +#include "softfloat.hpp" + #define BREAKPOINT __asm("int $0x03") ATTRIBUTE_COLD @@ -38,11 +40,11 @@ ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF(1, 2) void zig_panic(const char *format, ...); -ATTRIBUTE_COLD -ATTRIBUTE_NORETURN -static inline void zig_unreachable(void) { - zig_panic("unreachable"); -} +#ifdef WIN32 +#define __func__ __FUNCTION__ +#endif + +#define zig_unreachable() zig_panic("unreachable: %s:%s:%d", __FILE__, __func__, __LINE__) #if defined(_MSC_VER) static inline int clzll(unsigned long long mask) { @@ -65,6 +67,11 @@ static inline int clzll(unsigned long long mask) { template<typename T> ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (count == 0) + return nullptr; +#endif T *ptr = reinterpret_cast<T*>(malloc(count * sizeof(T))); if (!ptr) zig_panic("allocation failed"); @@ -73,6 +80,11 @@ ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) { template<typename T> ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate(size_t count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (count == 0) + return nullptr; +#endif T *ptr = reinterpret_cast<T*>(calloc(count, sizeof(T))); if (!ptr) zig_panic("allocation failed"); @@ -93,9 +105,7 @@ static inline void safe_memcpy(T *dest, const T *src, size_t count) { template<typename T> static inline T *reallocate(T *old, size_t old_count, size_t new_count) { - T *ptr = reinterpret_cast<T*>(realloc(old, new_count * sizeof(T))); - if (!ptr) - zig_panic("allocation failed"); + T *ptr = reallocate_nonzero(old, old_count, new_count); if (new_count > old_count) { memset(&ptr[old_count], 0, (new_count - old_count) * sizeof(T)); } @@ -104,6 +114,11 @@ static inline T *reallocate(T *old, size_t old_count, size_t new_count) { template<typename T> static inline T *reallocate_nonzero(T *old, size_t old_count, size_t new_count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (new_count == 0 && old == nullptr) + return nullptr; +#endif T *ptr = reinterpret_cast<T*>(realloc(old, new_count * sizeof(T))); if (!ptr) zig_panic("allocation failed"); @@ -152,4 +167,21 @@ static inline uint8_t log2_u64(uint64_t x) { return (63 - clzll(x)); } +static inline float16_t zig_double_to_f16(double x) { + float64_t y; + static_assert(sizeof(x) == sizeof(y), ""); + memcpy(&y, &x, sizeof(x)); + return f64_to_f16(y); +} + + +// Return value is safe to coerce to float even when |x| is NaN or Infinity. +static inline double zig_f16_to_double(float16_t x) { + float64_t y = f16_to_f64(x); + double z; + static_assert(sizeof(y) == sizeof(z), ""); + memcpy(&z, &y, sizeof(y)); + return z; +} + #endif diff --git a/src/windows_sdk.cpp b/src/windows_sdk.cpp new file mode 100644 index 0000000000..0f9d0fc301 --- /dev/null +++ b/src/windows_sdk.cpp @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2018 Andrew Kelley + * + * This file is part of zig, which is MIT licensed. + * See http://opensource.org/licenses/MIT + */ + +#include "windows_sdk.h" + +#if defined(_WIN32) + +#include "windows_com.hpp" +#include <inttypes.h> +#include <assert.h> + +struct ZigWindowsSDKPrivate { + ZigWindowsSDK base; +}; + +enum NativeArch { + NativeArchArm, + NativeArchi386, + NativeArchx86_64, +}; + +#if defined(_M_ARM) || defined(__arm_) +static const NativeArch native_arch = NativeArchArm; +#endif +#if defined(_M_IX86) || defined(__i386__) +static const NativeArch native_arch = NativeArchi386; +#endif +#if defined(_M_X64) || defined(__x86_64__) +static const NativeArch native_arch = NativeArchx86_64; +#endif + +void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) { + if (sdk == nullptr) { + return; + } + free((void*)sdk->path10_ptr); + free((void*)sdk->version10_ptr); + free((void*)sdk->path81_ptr); + free((void*)sdk->version81_ptr); + free((void*)sdk->msvc_lib_dir_ptr); +} + +static ZigFindWindowsSdkError find_msvc_lib_dir(ZigWindowsSDKPrivate *priv) { + //COM Smart Pointers requires explicit scope + { + HRESULT rc = CoInitializeEx(NULL, COINIT_MULTITHREADED); + if (rc != S_OK && rc != S_FALSE) { + goto com_done; + } + + //This COM class is installed when a VS2017 + ISetupConfigurationPtr setup_config; + rc = setup_config.CreateInstance(__uuidof(SetupConfiguration)); + if (rc != S_OK) { + goto com_done; + } + + IEnumSetupInstancesPtr all_instances; + rc = setup_config->EnumInstances(&all_instances); + if (rc != S_OK) { + goto com_done; + } + + ISetupInstance* curr_instance; + ULONG found_inst; + while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) { + BSTR bstr_inst_path; + rc = curr_instance->GetInstallationPath(&bstr_inst_path); + if (rc != S_OK) { + goto com_done; + } + //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length + //TODO call an actual function to do this + UINT bstr_path_len = *((UINT*)bstr_inst_path - 1); + ULONG tmp_path_len = bstr_path_len / 2 + 1; + char* conv_path = (char*)bstr_inst_path; + // TODO don't use alloca + char *tmp_path = (char*)alloca(tmp_path_len); + memset(tmp_path, 0, tmp_path_len); + uint32_t c = 0; + for (uint32_t i = 0; i < bstr_path_len; i += 2) { + tmp_path[c] = conv_path[i]; + ++c; + assert(c != tmp_path_len); + } + char output_path[4096]; + output_path[0] = 0; + char *out_append_ptr = output_path; + + out_append_ptr += sprintf(out_append_ptr, "%s\\", tmp_path); + + char tmp_buf[4096]; + sprintf(tmp_buf, "%s%s", output_path, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); + FILE* tools_file = fopen(tmp_buf, "rb"); + if (!tools_file) { + goto com_done; + } + memset(tmp_path, 0, tmp_path_len); + fgets(tmp_path, tmp_path_len, tools_file); + strtok(tmp_path, " \r\n"); + fclose(tools_file); + out_append_ptr += sprintf(out_append_ptr, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path); + switch (native_arch) { + case NativeArchi386: + out_append_ptr += sprintf(out_append_ptr, "x86\\"); + break; + case NativeArchx86_64: + out_append_ptr += sprintf(out_append_ptr, "x64\\"); + break; + case NativeArchArm: + out_append_ptr += sprintf(out_append_ptr, "arm\\"); + break; + } + sprintf(tmp_buf, "%s%s", output_path, "vcruntime.lib"); + + if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) { + priv->base.msvc_lib_dir_ptr = strdup(output_path); + if (priv->base.msvc_lib_dir_ptr == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + priv->base.msvc_lib_dir_len = strlen(priv->base.msvc_lib_dir_ptr); + return ZigFindWindowsSdkErrorNone; + } + } + } + +com_done:; + HKEY key; + HRESULT rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0, + KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key); + if (rc != ERROR_SUCCESS) { + return ZigFindWindowsSdkErrorNotFound; + } + + DWORD dw_type = 0; + DWORD cb_data = 0; + rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data); + if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) { + return ZigFindWindowsSdkErrorNotFound; + } + + char tmp_buf[4096]; + + RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)tmp_buf, &cb_data); + // RegQueryValueExA returns the length of the string INCLUDING the null terminator + char *tmp_buf_append_ptr = tmp_buf + (cb_data - 1); + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "VC\\Lib\\"); + switch (native_arch) { + case NativeArchi386: + //x86 is in the root of the Lib folder + break; + case NativeArchx86_64: + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "amd64\\"); + break; + case NativeArchArm: + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "arm\\"); + break; + } + + char *output_path = strdup(tmp_buf); + if (output_path == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "vcruntime.lib"); + + if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) { + priv->base.msvc_lib_dir_ptr = output_path; + priv->base.msvc_lib_dir_len = strlen(output_path); + return ZigFindWindowsSdkErrorNone; + } else { + free(output_path); + return ZigFindWindowsSdkErrorNotFound; + } +} + +static ZigFindWindowsSdkError find_10_version(ZigWindowsSDKPrivate *priv) { + if (priv->base.path10_ptr == nullptr) + return ZigFindWindowsSdkErrorNone; + + char sdk_lib_dir[4096]; + int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\*", priv->base.path10_ptr); + if (n < 0 || n >= 4096) { + return ZigFindWindowsSdkErrorPathTooLong; + } + + // enumerate files in sdk path looking for latest version + WIN32_FIND_DATA ffd; + HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd); + if (hFind == INVALID_HANDLE_VALUE) { + return ZigFindWindowsSdkErrorNotFound; + } + int v0 = 0, v1 = 0, v2 = 0, v3 = 0; + for (;;) { + if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + int c0 = 0, c1 = 0, c2 = 0, c3 = 0; + sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3); + if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) { + // Microsoft released 26624 as 10240 accidentally. + // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive + c2 = 26624; + } + if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) { + v0 = c0, v1 = c1, v2 = c2, v3 = c3; + free((void*)priv->base.version10_ptr); + priv->base.version10_ptr = strdup(ffd.cFileName); + if (priv->base.version10_ptr == nullptr) { + FindClose(hFind); + return ZigFindWindowsSdkErrorOutOfMemory; + } + } + } + if (FindNextFile(hFind, &ffd) == 0) { + FindClose(hFind); + break; + } + } + priv->base.version10_len = strlen(priv->base.version10_ptr); + return ZigFindWindowsSdkErrorNone; +} + +static ZigFindWindowsSdkError find_81_version(ZigWindowsSDKPrivate *priv) { + if (priv->base.path81_ptr == nullptr) + return ZigFindWindowsSdkErrorNone; + + char sdk_lib_dir[4096]; + int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\winv*", priv->base.path81_ptr); + if (n < 0 || n >= 4096) { + return ZigFindWindowsSdkErrorPathTooLong; + } + + // enumerate files in sdk path looking for latest version + WIN32_FIND_DATA ffd; + HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd); + if (hFind == INVALID_HANDLE_VALUE) { + return ZigFindWindowsSdkErrorNotFound; + } + int v0 = 0, v1 = 0; + for (;;) { + if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + int c0 = 0, c1 = 0; + sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1); + if ((c0 > v0) || (c1 > v1)) { + v0 = c0, v1 = c1; + free((void*)priv->base.version81_ptr); + priv->base.version81_ptr = strdup(ffd.cFileName); + if (priv->base.version81_ptr == nullptr) { + FindClose(hFind); + return ZigFindWindowsSdkErrorOutOfMemory; + } + } + } + if (FindNextFile(hFind, &ffd) == 0) { + FindClose(hFind); + break; + } + } + priv->base.version81_len = strlen(priv->base.version81_ptr); + return ZigFindWindowsSdkErrorNone; +} + +ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) { + ZigWindowsSDKPrivate *priv = (ZigWindowsSDKPrivate*)calloc(1, sizeof(ZigWindowsSDKPrivate)); + if (priv == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + + HKEY key; + HRESULT rc; + rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0, + KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key); + if (rc != ERROR_SUCCESS) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorNotFound; + } + + { + DWORD tmp_buf_len = MAX_PATH; + priv->base.path10_ptr = (const char *)calloc(tmp_buf_len, 1); + if (priv->base.path10_ptr == nullptr) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorOutOfMemory; + } + rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)priv->base.path10_ptr, &tmp_buf_len); + if (rc == ERROR_SUCCESS) { + priv->base.path10_len = tmp_buf_len - 1; + if (priv->base.path10_ptr[priv->base.path10_len - 1] == '\\') { + priv->base.path10_len -= 1; + } + } else { + free((void*)priv->base.path10_ptr); + priv->base.path10_ptr = nullptr; + } + } + { + DWORD tmp_buf_len = MAX_PATH; + priv->base.path81_ptr = (const char *)calloc(tmp_buf_len, 1); + if (priv->base.path81_ptr == nullptr) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorOutOfMemory; + } + rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)priv->base.path81_ptr, &tmp_buf_len); + if (rc == ERROR_SUCCESS) { + priv->base.path81_len = tmp_buf_len - 1; + if (priv->base.path81_ptr[priv->base.path81_len - 1] == '\\') { + priv->base.path81_len -= 1; + } + } else { + free((void*)priv->base.path81_ptr); + priv->base.path81_ptr = nullptr; + } + } + + { + ZigFindWindowsSdkError err = find_10_version(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + { + ZigFindWindowsSdkError err = find_81_version(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + + { + ZigFindWindowsSdkError err = find_msvc_lib_dir(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + + *out_sdk = &priv->base; + return ZigFindWindowsSdkErrorNone; +} + +#else + +void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) {} +ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) { + return ZigFindWindowsSdkErrorNotFound; +} + +#endif diff --git a/src/windows_sdk.h b/src/windows_sdk.h new file mode 100644 index 0000000000..2d531ad372 --- /dev/null +++ b/src/windows_sdk.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 Andrew Kelley + * + * This file is part of zig, which is MIT licensed. + * See http://opensource.org/licenses/MIT + */ + +#ifndef ZIG_WINDOWS_SDK_H +#define ZIG_WINDOWS_SDK_H + +#ifdef __cplusplus +#define ZIG_EXTERN_C extern "C" +#else +#define ZIG_EXTERN_C +#endif + +#include <stddef.h> + +struct ZigWindowsSDK { + const char *path10_ptr; + size_t path10_len; + + const char *version10_ptr; + size_t version10_len; + + const char *path81_ptr; + size_t path81_len; + + const char *version81_ptr; + size_t version81_len; + + const char *msvc_lib_dir_ptr; + size_t msvc_lib_dir_len; +}; + +enum ZigFindWindowsSdkError { + ZigFindWindowsSdkErrorNone, + ZigFindWindowsSdkErrorOutOfMemory, + ZigFindWindowsSdkErrorNotFound, + ZigFindWindowsSdkErrorPathTooLong, +}; + +ZIG_EXTERN_C enum ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk); + +ZIG_EXTERN_C void zig_free_windows_sdk(struct ZigWindowsSDK *sdk); + +#endif diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index b4eef13cc1..a43d2d182c 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -81,7 +81,7 @@ static const bool assertions_on = false; #endif bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref, - const char *filename, ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug) + const char *filename, ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug, bool is_small) { std::error_code EC; raw_fd_ostream dest(filename, EC, sys::fs::F_None); @@ -100,7 +100,7 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM return true; } PMBuilder->OptLevel = target_machine->getOptLevel(); - PMBuilder->SizeLevel = 0; + PMBuilder->SizeLevel = is_small ? 2 : 0; PMBuilder->DisableTailCalls = is_debug; PMBuilder->DisableUnitAtATime = is_debug; @@ -440,6 +440,11 @@ ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unreso return reinterpret_cast<ZigLLVMDIBuilder *>(di_builder); } +void ZigLLVMDisposeDIBuilder(ZigLLVMDIBuilder *dbuilder) { + DIBuilder *di_builder = reinterpret_cast<DIBuilder *>(dbuilder); + delete di_builder; +} + void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder, int line, int column, ZigLLVMDIScope *scope) { unwrap(builder)->SetCurrentDebugLocation(DebugLoc::get( line, column, reinterpret_cast<DIScope*>(scope))); @@ -765,10 +770,12 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) { LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp, LLVMValueRef new_val, LLVMAtomicOrdering success_ordering, - LLVMAtomicOrdering failure_ordering) + LLVMAtomicOrdering failure_ordering, bool is_weak) { - return wrap(unwrap(builder)->CreateAtomicCmpXchg(unwrap(ptr), unwrap(cmp), unwrap(new_val), - mapFromLLVMOrdering(success_ordering), mapFromLLVMOrdering(failure_ordering))); + AtomicCmpXchgInst *inst = unwrap(builder)->CreateAtomicCmpXchg(unwrap(ptr), unwrap(cmp), + unwrap(new_val), mapFromLLVMOrdering(success_ordering), mapFromLLVMOrdering(failure_ordering)); + inst->setWeak(is_weak); + return wrap(inst); } LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, @@ -836,7 +843,7 @@ bool ZigLLDLink(ZigLLVM_ObjectFormatType oformat, const char **args, size_t arg_ return lld::mach_o::link(array_ref_args, diag); case ZigLLVM_Wasm: - assert(false); // TODO ZigLLDLink for Wasm + return lld::wasm::link(array_ref_args, false, diag); } assert(false); // unreachable abort(); diff --git a/src/zig_llvm.h b/src/zig_llvm.h index d6809000ce..63d69bd23e 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -22,6 +22,9 @@ #define ZIG_EXTERN_C #endif +// ATTENTION: If you modify this file, be sure to update the corresponding +// extern function declarations in the self-hosted compiler. + struct ZigLLVMDIType; struct ZigLLVMDIBuilder; struct ZigLLVMDICompileUnit; @@ -39,7 +42,7 @@ struct ZigLLVMInsertionPoint; ZIG_EXTERN_C void ZigLLVMInitializeLoopStrengthReducePass(LLVMPassRegistryRef R); ZIG_EXTERN_C void ZigLLVMInitializeLowerIntrinsicsPass(LLVMPassRegistryRef R); -/// Caller must free memory. +/// Caller must free memory with LLVMDisposeMessage ZIG_EXTERN_C char *ZigLLVMGetHostCPUName(void); ZIG_EXTERN_C char *ZigLLVMGetNativeFeatures(void); @@ -52,7 +55,7 @@ enum ZigLLVM_EmitOutputType { }; ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref, - const char *filename, enum ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug); + const char *filename, enum ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug, bool is_small); ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref); @@ -66,7 +69,7 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LL ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp, LLVMValueRef new_val, LLVMAtomicOrdering success_ordering, - LLVMAtomicOrdering failure_ordering); + LLVMAtomicOrdering failure_ordering, bool is_weak); ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char *name); @@ -139,6 +142,7 @@ ZIG_EXTERN_C unsigned ZigLLVMTag_DW_enumeration_type(void); ZIG_EXTERN_C unsigned ZigLLVMTag_DW_union_type(void); ZIG_EXTERN_C struct ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unresolved); +ZIG_EXTERN_C void ZigLLVMDisposeDIBuilder(struct ZigLLVMDIBuilder *dbuilder); ZIG_EXTERN_C void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module); ZIG_EXTERN_C void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module); |
