From f7143e18e368eb89763e9d813b8b7c9c96dd1bd3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2024 20:35:26 -0700 Subject: move Zcu.LazySrcLoc to std.zig.LazySrcLoc Part of an effort to ship more of the compiler in source form. --- src/Module.zig | 542 ++++++++++----------------------------------------------- 1 file changed, 96 insertions(+), 446 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index a4cedd9077..aef54be700 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -13,6 +13,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; const Ast = std.zig.Ast; +const LazySrcLoc = std.zig.LazySrcLoc; /// Deprecated, use `Zcu`. const Module = Zcu; @@ -664,6 +665,101 @@ pub const Decl = struct { if (decl.alignment != .none) return decl.alignment; return decl.ty.abiAlignment(zcu); } + + /// Upgrade a `LazySrcLoc` to a `SrcLoc` based on the `Decl` provided. + pub fn toSrcLoc(decl: *Decl, lazy: LazySrcLoc, mod: *Module) SrcLoc { + return switch (lazy) { + .unneeded, + .entire_file, + .byte_abs, + .token_abs, + .node_abs, + => .{ + .file_scope = decl.getFileScope(mod), + .parent_decl_node = 0, + .lazy = lazy, + }, + + .byte_offset, + .token_offset, + .node_offset, + .node_offset_main_token, + .node_offset_initializer, + .node_offset_var_decl_ty, + .node_offset_var_decl_align, + .node_offset_var_decl_section, + .node_offset_var_decl_addrspace, + .node_offset_var_decl_init, + .node_offset_builtin_call_arg0, + .node_offset_builtin_call_arg1, + .node_offset_builtin_call_arg2, + .node_offset_builtin_call_arg3, + .node_offset_builtin_call_arg4, + .node_offset_builtin_call_arg5, + .node_offset_ptrcast_operand, + .node_offset_array_access_index, + .node_offset_slice_ptr, + .node_offset_slice_start, + .node_offset_slice_end, + .node_offset_slice_sentinel, + .node_offset_call_func, + .node_offset_field_name, + .node_offset_field_name_init, + .node_offset_deref_ptr, + .node_offset_asm_source, + .node_offset_asm_ret_ty, + .node_offset_if_cond, + .node_offset_bin_op, + .node_offset_bin_lhs, + .node_offset_bin_rhs, + .node_offset_switch_operand, + .node_offset_switch_special_prong, + .node_offset_switch_range, + .node_offset_switch_prong_capture, + .node_offset_switch_prong_tag_capture, + .node_offset_fn_type_align, + .node_offset_fn_type_addrspace, + .node_offset_fn_type_section, + .node_offset_fn_type_cc, + .node_offset_fn_type_ret_ty, + .node_offset_param, + .token_offset_param, + .node_offset_anyframe_type, + .node_offset_lib_name, + .node_offset_array_type_len, + .node_offset_array_type_sentinel, + .node_offset_array_type_elem, + .node_offset_un_op, + .node_offset_ptr_elem, + .node_offset_ptr_sentinel, + .node_offset_ptr_align, + .node_offset_ptr_addrspace, + .node_offset_ptr_bitoffset, + .node_offset_ptr_hostsize, + .node_offset_container_tag, + .node_offset_field_default, + .node_offset_init_ty, + .node_offset_store_ptr, + .node_offset_store_operand, + .node_offset_return_operand, + .for_input, + .for_capture_from_input, + .array_cat_lhs, + .array_cat_rhs, + => .{ + .file_scope = decl.getFileScope(mod), + .parent_decl_node = decl.src_node, + .lazy = lazy, + }, + inline .call_arg, + .fn_proto_param, + => |x| .{ + .file_scope = decl.getFileScope(mod), + .parent_decl_node = mod.declPtr(x.decl).src_node, + .lazy = lazy, + }, + }; + } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -1951,452 +2047,6 @@ pub const SrcLoc = struct { } }; -/// This wraps a simple integer in debug builds so that later on we can find out -/// where in semantic analysis the value got set. -const TracedOffset = struct { - x: i32, - trace: std.debug.Trace = .{}, - - const want_tracing = build_options.value_tracing; -}; - -/// Resolving a source location into a byte offset may require doing work -/// that we would rather not do unless the error actually occurs. -/// Therefore we need a data structure that contains the information necessary -/// to lazily produce a `SrcLoc` as required. -/// Most of the offsets in this data structure are relative to the containing Decl. -/// This makes the source location resolve properly even when a Decl gets -/// shifted up or down in the file, as long as the Decl's contents itself -/// do not change. -pub const LazySrcLoc = union(enum) { - /// When this tag is set, the code that constructed this `LazySrcLoc` is asserting - /// that all code paths which would need to resolve the source location are - /// unreachable. If you are debugging this tag incorrectly being this value, - /// look into using reverse-continue with a memory watchpoint to see where the - /// value is being set to this tag. - unneeded, - /// Means the source location points to an entire file; not any particular - /// location within the file. `file_scope` union field will be active. - entire_file, - /// The source location points to a byte offset within a source file, - /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. - byte_abs: u32, - /// The source location points to a token within a source file, - /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. - token_abs: u32, - /// The source location points to an AST node within a source file, - /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. - node_abs: u32, - /// The source location points to a byte offset within a source file, - /// offset from the byte offset of the Decl within the file. - /// The Decl is determined contextually. - byte_offset: u32, - /// This data is the offset into the token list from the Decl token. - /// The Decl is determined contextually. - token_offset: u32, - /// The source location points to an AST node, which is this value offset - /// from its containing Decl node AST index. - /// The Decl is determined contextually. - node_offset: TracedOffset, - /// The source location points to the main token of an AST node, found - /// by taking this AST node index offset from the containing Decl AST node. - /// The Decl is determined contextually. - node_offset_main_token: i32, - /// The source location points to the beginning of a struct initializer. - /// The Decl is determined contextually. - node_offset_initializer: i32, - /// The source location points to a variable declaration type expression, - /// found by taking this AST node index offset from the containing - /// Decl AST node, which points to a variable declaration AST node. Next, navigate - /// to the type expression. - /// The Decl is determined contextually. - node_offset_var_decl_ty: i32, - /// The source location points to the alignment expression of a var decl. - /// The Decl is determined contextually. - node_offset_var_decl_align: i32, - /// The source location points to the linksection expression of a var decl. - /// The Decl is determined contextually. - node_offset_var_decl_section: i32, - /// The source location points to the addrspace expression of a var decl. - /// The Decl is determined contextually. - node_offset_var_decl_addrspace: i32, - /// The source location points to the initializer of a var decl. - /// The Decl is determined contextually. - node_offset_var_decl_init: i32, - /// The source location points to the first parameter of a builtin - /// function call, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a builtin call AST node. Next, navigate - /// to the first parameter. - /// The Decl is determined contextually. - node_offset_builtin_call_arg0: i32, - /// Same as `node_offset_builtin_call_arg0` except arg index 1. - node_offset_builtin_call_arg1: i32, - node_offset_builtin_call_arg2: i32, - node_offset_builtin_call_arg3: i32, - node_offset_builtin_call_arg4: i32, - node_offset_builtin_call_arg5: i32, - /// Like `node_offset_builtin_call_arg0` but recurses through arbitrarily many calls - /// to pointer cast builtins. - node_offset_ptrcast_operand: i32, - /// The source location points to the index expression of an array access - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to an array access AST node. Next, navigate - /// to the index expression. - /// The Decl is determined contextually. - node_offset_array_access_index: i32, - /// The source location points to the LHS of a slice expression - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a slice AST node. Next, navigate - /// to the sentinel expression. - /// The Decl is determined contextually. - node_offset_slice_ptr: i32, - /// The source location points to start expression of a slice expression - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a slice AST node. Next, navigate - /// to the sentinel expression. - /// The Decl is determined contextually. - node_offset_slice_start: i32, - /// The source location points to the end expression of a slice - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a slice AST node. Next, navigate - /// to the sentinel expression. - /// The Decl is determined contextually. - node_offset_slice_end: i32, - /// The source location points to the sentinel expression of a slice - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a slice AST node. Next, navigate - /// to the sentinel expression. - /// The Decl is determined contextually. - node_offset_slice_sentinel: i32, - /// The source location points to the callee expression of a function - /// call expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function call AST node. Next, navigate - /// to the callee expression. - /// The Decl is determined contextually. - node_offset_call_func: i32, - /// The payload is offset from the containing Decl AST node. - /// The source location points to the field name of: - /// * a field access expression (`a.b`), or - /// * the callee of a method call (`a.b()`) - /// The Decl is determined contextually. - node_offset_field_name: i32, - /// The payload is offset from the containing Decl AST node. - /// The source location points to the field name of the operand ("b" node) - /// of a field initialization expression (`.a = b`) - /// The Decl is determined contextually. - node_offset_field_name_init: i32, - /// The source location points to the pointer of a pointer deref expression, - /// found by taking this AST node index offset from the containing - /// Decl AST node, which points to a pointer deref AST node. Next, navigate - /// to the pointer expression. - /// The Decl is determined contextually. - node_offset_deref_ptr: i32, - /// The source location points to the assembly source code of an inline assembly - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to inline assembly AST node. Next, navigate - /// to the asm template source code. - /// The Decl is determined contextually. - node_offset_asm_source: i32, - /// The source location points to the return type of an inline assembly - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to inline assembly AST node. Next, navigate - /// to the return type expression. - /// The Decl is determined contextually. - node_offset_asm_ret_ty: i32, - /// The source location points to the condition expression of an if - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to an if expression AST node. Next, navigate - /// to the condition expression. - /// The Decl is determined contextually. - node_offset_if_cond: i32, - /// The source location points to a binary expression, such as `a + b`, found - /// by taking this AST node index offset from the containing Decl AST node. - /// The Decl is determined contextually. - node_offset_bin_op: i32, - /// The source location points to the LHS of a binary expression, found - /// by taking this AST node index offset from the containing Decl AST node, - /// which points to a binary expression AST node. Next, navigate to the LHS. - /// The Decl is determined contextually. - node_offset_bin_lhs: i32, - /// The source location points to the RHS of a binary expression, found - /// by taking this AST node index offset from the containing Decl AST node, - /// which points to a binary expression AST node. Next, navigate to the RHS. - /// The Decl is determined contextually. - node_offset_bin_rhs: i32, - /// The source location points to the operand of a switch expression, found - /// by taking this AST node index offset from the containing Decl AST node, - /// which points to a switch expression AST node. Next, navigate to the operand. - /// The Decl is determined contextually. - node_offset_switch_operand: i32, - /// The source location points to the else/`_` prong of a switch expression, found - /// by taking this AST node index offset from the containing Decl AST node, - /// which points to a switch expression AST node. Next, navigate to the else/`_` prong. - /// The Decl is determined contextually. - node_offset_switch_special_prong: i32, - /// The source location points to all the ranges of a switch expression, found - /// by taking this AST node index offset from the containing Decl AST node, - /// which points to a switch expression AST node. Next, navigate to any of the - /// range nodes. The error applies to all of them. - /// The Decl is determined contextually. - node_offset_switch_range: i32, - /// The source location points to the capture of a switch_prong. - /// The Decl is determined contextually. - node_offset_switch_prong_capture: i32, - /// The source location points to the tag capture of a switch_prong. - /// The Decl is determined contextually. - node_offset_switch_prong_tag_capture: i32, - /// The source location points to the align expr of a function type - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function type AST node. Next, navigate to - /// the calling convention node. - /// The Decl is determined contextually. - node_offset_fn_type_align: i32, - /// The source location points to the addrspace expr of a function type - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function type AST node. Next, navigate to - /// the calling convention node. - /// The Decl is determined contextually. - node_offset_fn_type_addrspace: i32, - /// The source location points to the linksection expr of a function type - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function type AST node. Next, navigate to - /// the calling convention node. - /// The Decl is determined contextually. - node_offset_fn_type_section: i32, - /// The source location points to the calling convention of a function type - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function type AST node. Next, navigate to - /// the calling convention node. - /// The Decl is determined contextually. - node_offset_fn_type_cc: i32, - /// The source location points to the return type of a function type - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a function type AST node. Next, navigate to - /// the return type node. - /// The Decl is determined contextually. - node_offset_fn_type_ret_ty: i32, - node_offset_param: i32, - token_offset_param: i32, - /// The source location points to the type expression of an `anyframe->T` - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to a `anyframe->T` expression AST node. Next, navigate - /// to the type expression. - /// The Decl is determined contextually. - node_offset_anyframe_type: i32, - /// The source location points to the string literal of `extern "foo"`, found - /// by taking this AST node index offset from the containing - /// Decl AST node, which points to a function prototype or variable declaration - /// expression AST node. Next, navigate to the string literal of the `extern "foo"`. - /// The Decl is determined contextually. - node_offset_lib_name: i32, - /// The source location points to the len expression of an `[N:S]T` - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate - /// to the len expression. - /// The Decl is determined contextually. - node_offset_array_type_len: i32, - /// The source location points to the sentinel expression of an `[N:S]T` - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate - /// to the sentinel expression. - /// The Decl is determined contextually. - node_offset_array_type_sentinel: i32, - /// The source location points to the elem expression of an `[N:S]T` - /// expression, found by taking this AST node index offset from the containing - /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate - /// to the elem expression. - /// The Decl is determined contextually. - node_offset_array_type_elem: i32, - /// The source location points to the operand of an unary expression. - /// The Decl is determined contextually. - node_offset_un_op: i32, - /// The source location points to the elem type of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_elem: i32, - /// The source location points to the sentinel of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_sentinel: i32, - /// The source location points to the align expr of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_align: i32, - /// The source location points to the addrspace expr of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_addrspace: i32, - /// The source location points to the bit-offset of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_bitoffset: i32, - /// The source location points to the host size of a pointer. - /// The Decl is determined contextually. - node_offset_ptr_hostsize: i32, - /// The source location points to the tag type of an union or an enum. - /// The Decl is determined contextually. - node_offset_container_tag: i32, - /// The source location points to the default value of a field. - /// The Decl is determined contextually. - node_offset_field_default: i32, - /// The source location points to the type of an array or struct initializer. - /// The Decl is determined contextually. - node_offset_init_ty: i32, - /// The source location points to the LHS of an assignment. - /// The Decl is determined contextually. - node_offset_store_ptr: i32, - /// The source location points to the RHS of an assignment. - /// The Decl is determined contextually. - node_offset_store_operand: i32, - /// The source location points to the operand of a `return` statement, or - /// the `return` itself if there is no explicit operand. - /// The Decl is determined contextually. - node_offset_return_operand: i32, - /// The source location points to a for loop input. - /// The Decl is determined contextually. - for_input: struct { - /// Points to the for loop AST node. - for_node_offset: i32, - /// Picks one of the inputs from the condition. - input_index: u32, - }, - /// The source location points to one of the captures of a for loop, found - /// by taking this AST node index offset from the containing - /// Decl AST node, which points to one of the input nodes of a for loop. - /// Next, navigate to the corresponding capture. - /// The Decl is determined contextually. - for_capture_from_input: i32, - /// The source location points to the argument node of a function call. - call_arg: struct { - decl: Decl.Index, - /// Points to the function call AST node. - call_node_offset: i32, - /// The index of the argument the source location points to. - arg_index: u32, - }, - fn_proto_param: struct { - decl: Decl.Index, - /// Points to the function prototype AST node. - fn_proto_node_offset: i32, - /// The index of the parameter the source location points to. - param_index: u32, - }, - array_cat_lhs: ArrayCat, - array_cat_rhs: ArrayCat, - - const ArrayCat = struct { - /// Points to the array concat AST node. - array_cat_offset: i32, - /// The index of the element the source location points to. - elem_index: u32, - }; - - pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease; - - noinline fn nodeOffsetDebug(node_offset: i32) LazySrcLoc { - var result: LazySrcLoc = .{ .node_offset = .{ .x = node_offset } }; - result.node_offset.trace.addAddr(@returnAddress(), "init"); - return result; - } - - fn nodeOffsetRelease(node_offset: i32) LazySrcLoc { - return .{ .node_offset = .{ .x = node_offset } }; - } - - /// Upgrade to a `SrcLoc` based on the `Decl` provided. - pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc { - return switch (lazy) { - .unneeded, - .entire_file, - .byte_abs, - .token_abs, - .node_abs, - => .{ - .file_scope = decl.getFileScope(mod), - .parent_decl_node = 0, - .lazy = lazy, - }, - - .byte_offset, - .token_offset, - .node_offset, - .node_offset_main_token, - .node_offset_initializer, - .node_offset_var_decl_ty, - .node_offset_var_decl_align, - .node_offset_var_decl_section, - .node_offset_var_decl_addrspace, - .node_offset_var_decl_init, - .node_offset_builtin_call_arg0, - .node_offset_builtin_call_arg1, - .node_offset_builtin_call_arg2, - .node_offset_builtin_call_arg3, - .node_offset_builtin_call_arg4, - .node_offset_builtin_call_arg5, - .node_offset_ptrcast_operand, - .node_offset_array_access_index, - .node_offset_slice_ptr, - .node_offset_slice_start, - .node_offset_slice_end, - .node_offset_slice_sentinel, - .node_offset_call_func, - .node_offset_field_name, - .node_offset_field_name_init, - .node_offset_deref_ptr, - .node_offset_asm_source, - .node_offset_asm_ret_ty, - .node_offset_if_cond, - .node_offset_bin_op, - .node_offset_bin_lhs, - .node_offset_bin_rhs, - .node_offset_switch_operand, - .node_offset_switch_special_prong, - .node_offset_switch_range, - .node_offset_switch_prong_capture, - .node_offset_switch_prong_tag_capture, - .node_offset_fn_type_align, - .node_offset_fn_type_addrspace, - .node_offset_fn_type_section, - .node_offset_fn_type_cc, - .node_offset_fn_type_ret_ty, - .node_offset_param, - .token_offset_param, - .node_offset_anyframe_type, - .node_offset_lib_name, - .node_offset_array_type_len, - .node_offset_array_type_sentinel, - .node_offset_array_type_elem, - .node_offset_un_op, - .node_offset_ptr_elem, - .node_offset_ptr_sentinel, - .node_offset_ptr_align, - .node_offset_ptr_addrspace, - .node_offset_ptr_bitoffset, - .node_offset_ptr_hostsize, - .node_offset_container_tag, - .node_offset_field_default, - .node_offset_init_ty, - .node_offset_store_ptr, - .node_offset_store_operand, - .node_offset_return_operand, - .for_input, - .for_capture_from_input, - .array_cat_lhs, - .array_cat_rhs, - => .{ - .file_scope = decl.getFileScope(mod), - .parent_decl_node = decl.src_node, - .lazy = lazy, - }, - inline .call_arg, - .fn_proto_param, - => |x| .{ - .file_scope = decl.getFileScope(mod), - .parent_decl_node = mod.declPtr(x.decl).src_node, - .lazy = lazy, - }, - }; - } -}; - pub const SemaError = error{ OutOfMemory, AnalysisFail }; pub const CompileError = error{ OutOfMemory, -- cgit v1.2.3 From 7b37bc771b9a1ed38b06358269bf6a716a38de60 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2024 20:58:10 -0700 Subject: move Zir to std.zig.Zir Part of an effort to ship more of the compiler in source form. --- CMakeLists.txt | 2 +- lib/std/zig.zig | 1 + lib/std/zig/Zir.zig | 4090 +++++++++++++++++++++++++++++++++++++++++++++++++ src/AstGen.zig | 2 +- src/Autodoc.zig | 2 +- src/Compilation.zig | 2 +- src/InternPool.zig | 2 +- src/Module.zig | 2 +- src/Sema.zig | 2 +- src/Zir.zig | 4090 ------------------------------------------------- src/codegen.zig | 2 +- src/codegen/spirv.zig | 1 - src/crash_report.zig | 2 +- src/main.zig | 6 +- src/print_zir.zig | 2 +- src/reduce.zig | 2 +- 16 files changed, 4105 insertions(+), 4105 deletions(-) create mode 100644 lib/std/zig/Zir.zig delete mode 100644 src/Zir.zig (limited to 'src/Module.zig') diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c06d24715..7a62cbe9c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -515,6 +515,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/lib/std/zig/system/NativePaths.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/system/x86.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/tokenizer.zig" + "${CMAKE_SOURCE_DIR}/lib/std/zig/Zir.zig" "${CMAKE_SOURCE_DIR}/src/Air.zig" "${CMAKE_SOURCE_DIR}/src/AstGen.zig" "${CMAKE_SOURCE_DIR}/src/Compilation.zig" @@ -527,7 +528,6 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/Sema.zig" "${CMAKE_SOURCE_DIR}/src/TypedValue.zig" "${CMAKE_SOURCE_DIR}/src/Value.zig" - "${CMAKE_SOURCE_DIR}/src/Zir.zig" "${CMAKE_SOURCE_DIR}/src/arch/aarch64/CodeGen.zig" "${CMAKE_SOURCE_DIR}/src/arch/aarch64/Emit.zig" "${CMAKE_SOURCE_DIR}/src/arch/aarch64/Mir.zig" diff --git a/lib/std/zig.zig b/lib/std/zig.zig index f8782d5c9b..38cad68c21 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -12,6 +12,7 @@ pub const string_literal = @import("zig/string_literal.zig"); pub const number_literal = @import("zig/number_literal.zig"); pub const primitives = @import("zig/primitives.zig"); pub const Ast = @import("zig/Ast.zig"); +pub const Zir = @import("zig/Zir.zig"); pub const system = @import("zig/system.zig"); /// Deprecated: use `std.Target.Query`. pub const CrossTarget = std.Target.Query; diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig new file mode 100644 index 0000000000..117212ccbb --- /dev/null +++ b/lib/std/zig/Zir.zig @@ -0,0 +1,4090 @@ +//! Zig Intermediate Representation. Astgen.zig converts AST nodes to these +//! untyped IR instructions. Next, Sema.zig processes these into AIR. +//! The minimum amount of information needed to represent a list of ZIR instructions. +//! Once this structure is completed, it can be used to generate AIR, followed by +//! machine code, without any memory access into the AST tree token list, node list, +//! or source bytes. Exceptions include: +//! * Compile errors, which may need to reach into these data structures to +//! create a useful report. +//! * In the future, possibly inline assembly, which needs to get parsed and +//! handled by the codegen backend, and errors reported there. However for now, +//! inline assembly is not an exception. + +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const Ast = std.zig.Ast; + +const Zir = @This(); +const LazySrcLoc = std.zig.LazySrcLoc; + +instructions: std.MultiArrayList(Inst).Slice, +/// In order to store references to strings in fewer bytes, we copy all +/// string bytes into here. String bytes can be null. It is up to whomever +/// is referencing the data here whether they want to store both index and length, +/// thus allowing null bytes, or store only index, and use null-termination. The +/// `string_bytes` array is agnostic to either usage. +/// Index 0 is reserved for special cases. +string_bytes: []u8, +/// The meaning of this data is determined by `Inst.Tag` value. +/// The first few indexes are reserved. See `ExtraIndex` for the values. +extra: []u32, + +/// The data stored at byte offset 0 when ZIR is stored in a file. +pub const Header = extern struct { + instructions_len: u32, + string_bytes_len: u32, + extra_len: u32, + /// We could leave this as padding, however it triggers a Valgrind warning because + /// we read and write undefined bytes to the file system. This is harmless, but + /// it's essentially free to have a zero field here and makes the warning go away, + /// making it more likely that following Valgrind warnings will be taken seriously. + unused: u32 = 0, + stat_inode: std.fs.File.INode, + stat_size: u64, + stat_mtime: i128, +}; + +pub const ExtraIndex = enum(u32) { + /// If this is 0, no compile errors. Otherwise there is a `CompileErrors` + /// payload at this index. + compile_errors, + /// If this is 0, this file contains no imports. Otherwise there is a `Imports` + /// payload at this index. + imports, + + _, +}; + +fn ExtraData(comptime T: type) type { + return struct { data: T, end: usize }; +} + +/// Returns the requested data, as well as the new index which is at the start of the +/// trailers for the object. +pub fn extraData(code: Zir, comptime T: type, index: usize) ExtraData(T) { + const fields = @typeInfo(T).Struct.fields; + var i: usize = index; + var result: T = undefined; + inline for (fields) |field| { + @field(result, field.name) = switch (field.type) { + u32 => code.extra[i], + + Inst.Ref, + Inst.Index, + Inst.Declaration.Name, + NullTerminatedString, + => @enumFromInt(code.extra[i]), + + i32, + Inst.Call.Flags, + Inst.BuiltinCall.Flags, + Inst.SwitchBlock.Bits, + Inst.SwitchBlockErrUnion.Bits, + Inst.FuncFancy.Bits, + Inst.Declaration.Flags, + => @bitCast(code.extra[i]), + + else => @compileError("bad field type"), + }; + i += 1; + } + return .{ + .data = result, + .end = i, + }; +} + +pub const NullTerminatedString = enum(u32) { + empty = 0, + _, +}; + +/// Given an index into `string_bytes` returns the null-terminated string found there. +pub fn nullTerminatedString(code: Zir, index: NullTerminatedString) [:0]const u8 { + const start = @intFromEnum(index); + var end: u32 = start; + while (code.string_bytes[end] != 0) { + end += 1; + } + return code.string_bytes[start..end :0]; +} + +pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { + return @ptrCast(code.extra[start..][0..len]); +} + +pub fn bodySlice(zir: Zir, start: usize, len: usize) []Inst.Index { + return @ptrCast(zir.extra[start..][0..len]); +} + +pub fn hasCompileErrors(code: Zir) bool { + return code.extra[@intFromEnum(ExtraIndex.compile_errors)] != 0; +} + +pub fn deinit(code: *Zir, gpa: Allocator) void { + code.instructions.deinit(gpa); + gpa.free(code.string_bytes); + gpa.free(code.extra); + code.* = undefined; +} + +/// These are untyped instructions generated from an Abstract Syntax Tree. +/// The data here is immutable because it is possible to have multiple +/// analyses on the same ZIR happening at the same time. +pub const Inst = struct { + tag: Tag, + data: Data, + + /// These names are used directly as the instruction names in the text format. + /// See `data_field_map` for a list of which `Data` fields are used by each `Tag`. + pub const Tag = enum(u8) { + /// Arithmetic addition, asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + add, + /// Twos complement wrapping integer addition. + /// Uses the `pl_node` union field. Payload is `Bin`. + addwrap, + /// Saturating addition. + /// Uses the `pl_node` union field. Payload is `Bin`. + add_sat, + /// The same as `add` except no safety check. + add_unsafe, + /// Arithmetic subtraction. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub, + /// Twos complement wrapping integer subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + subwrap, + /// Saturating subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub_sat, + /// Arithmetic multiplication. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul, + /// Twos complement wrapping integer multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mulwrap, + /// Saturating multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul_sat, + /// Implements the `@divExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_exact, + /// Implements the `@divFloor` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_floor, + /// Implements the `@divTrunc` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_trunc, + /// Implements the `@mod` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + mod, + /// Implements the `@rem` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + rem, + /// Ambiguously remainder division or modulus. If the computation would possibly have + /// a different value depending on whether the operation is remainder division or modulus, + /// a compile error is emitted. Otherwise the computation is performed. + /// Uses the `pl_node` union field. Payload is `Bin`. + mod_rem, + /// Integer shift-left. Zeroes are shifted in from the right hand side. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl, + /// Implements the `@shlExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shl_exact, + /// Saturating shift-left. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl_sat, + /// Integer shift-right. Arithmetic or logical depending on the signedness of + /// the integer type. + /// Uses the `pl_node` union field. Payload is `Bin`. + shr, + /// Implements the `@shrExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shr_exact, + + /// Declares a parameter of the current function. Used for: + /// * debug info + /// * checking shadowing against declarations in the current namespace + /// * parameter type expressions referencing other parameters + /// These occur in the block outside a function body (the same block as + /// contains the func instruction). + /// Uses the `pl_tok` field. Token is the parameter name, payload is a `Param`. + param, + /// Same as `param` except the parameter is marked comptime. + param_comptime, + /// Same as `param` except the parameter is marked anytype. + /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. + param_anytype, + /// Same as `param` except the parameter is marked both comptime and anytype. + /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. + param_anytype_comptime, + /// Array concatenation. `a ++ b` + /// Uses the `pl_node` union field. Payload is `Bin`. + array_cat, + /// Array multiplication `a ** b` + /// Uses the `pl_node` union field. Payload is `ArrayMul`. + array_mul, + /// `[N]T` syntax. No source location provided. + /// Uses the `pl_node` union field. Payload is `Bin`. lhs is length, rhs is element type. + array_type, + /// `[N:S]T` syntax. Source location is the array type expression node. + /// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`. + array_type_sentinel, + /// `@Vector` builtin. + /// Uses the `pl_node` union field with `Bin` payload. + /// lhs is length, rhs is element type. + vector_type, + /// Given a pointer type, returns its element type. Reaches through any optional or error + /// union types wrapping the pointer. Asserts that the underlying type is a pointer type. + /// Returns generic poison if the element type is `anyopaque`. + /// Uses the `un_node` field. + elem_type, + /// Given an indexable pointer (slice, many-ptr, single-ptr-to-array), returns its + /// element type. Emits a compile error if the type is not an indexable pointer. + /// Uses the `un_node` field. + indexable_ptr_elem_type, + /// Given a vector type, returns its element type. + /// Uses the `un_node` field. + vector_elem_type, + /// Given a pointer to an indexable object, returns the len property. This is + /// used by for loops. This instruction also emits a for-loop specific compile + /// error if the indexable object is not indexable. + /// Uses the `un_node` field. The AST node is the for loop node. + indexable_ptr_len, + /// Create a `anyframe->T` type. + /// Uses the `un_node` field. + anyframe_type, + /// Type coercion to the function's return type. + /// Uses the `pl_node` field. Payload is `As`. AST node could be many things. + as_node, + /// Same as `as_node` but ignores runtime to comptime int error. + as_shift_operand, + /// Bitwise AND. `&` + bit_and, + /// Reinterpret the memory representation of a value as a different type. + /// Uses the pl_node field with payload `Bin`. + bitcast, + /// Bitwise NOT. `~` + /// Uses `un_node`. + bit_not, + /// Bitwise OR. `|` + bit_or, + /// A labeled block of code, which can return a value. + /// Uses the `pl_node` union field. Payload is `Block`. + block, + /// Like `block`, but forces full evaluation of its contents at compile-time. + /// Uses the `pl_node` union field. Payload is `Block`. + block_comptime, + /// A list of instructions which are analyzed in the parent context, without + /// generating a runtime block. Must terminate with an "inline" variant of + /// a noreturn instruction. + /// Uses the `pl_node` union field. Payload is `Block`. + block_inline, + /// This instruction may only ever appear in the list of declarations for a + /// namespace type, e.g. within a `struct_decl` instruction. It represents a + /// single source declaration (`const`/`var`/`fn`), containing the name, + /// attributes, type, and value of the declaration. + /// Uses the `pl_node` union field. Payload is `Declaration`. + declaration, + /// Implements `suspend {...}`. + /// Uses the `pl_node` union field. Payload is `Block`. + suspend_block, + /// Boolean NOT. See also `bit_not`. + /// Uses the `un_node` field. + bool_not, + /// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand + /// is a block, which is evaluated if `lhs` is `true`. + /// Uses the `pl_node` union field. Payload is `BoolBr`. + bool_br_and, + /// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand + /// is a block, which is evaluated if `lhs` is `false`. + /// Uses the `pl_node` union field. Payload is `BoolBr`. + bool_br_or, + /// Return a value from a block. + /// Uses the `break` union field. + /// Uses the source information from previous instruction. + @"break", + /// Return a value from a block. This instruction is used as the terminator + /// of a `block_inline`. It allows using the return value from `Sema.analyzeBody`. + /// This instruction may also be used when it is known that there is only one + /// break instruction in a block, and the target block is the parent. + /// Uses the `break` union field. + break_inline, + /// Checks that comptime control flow does not happen inside a runtime block. + /// Uses the `un_node` union field. + check_comptime_control_flow, + /// Function call. + /// Uses the `pl_node` union field with payload `Call`. + /// AST node is the function call. + call, + /// Function call using `a.b()` syntax. + /// Uses the named field as the callee. If there is no such field, searches in the type for + /// a decl matching the field name. The decl is resolved and we ensure that it's a function + /// which can accept the object as the first parameter, with one pointer fixup. This + /// function is then used as the callee, with the object as an implicit first parameter. + /// Uses the `pl_node` union field with payload `FieldCall`. + /// AST node is the function call. + field_call, + /// Implements the `@call` builtin. + /// Uses the `pl_node` union field with payload `BuiltinCall`. + /// AST node is the builtin call. + builtin_call, + /// `<` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_lt, + /// `<=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_lte, + /// `==` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_eq, + /// `>=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_gte, + /// `>` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_gt, + /// `!=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_neq, + /// Conditional branch. Splits control flow based on a boolean condition value. + /// Uses the `pl_node` union field. AST node is an if, while, for, etc. + /// Payload is `CondBr`. + condbr, + /// Same as `condbr`, except the condition is coerced to a comptime value, and + /// only the taken branch is analyzed. The then block and else block must + /// terminate with an "inline" variant of a noreturn instruction. + condbr_inline, + /// Given an operand which is an error union, splits control flow. In + /// case of error, control flow goes into the block that is part of this + /// instruction, which is guaranteed to end with a return instruction + /// and never breaks out of the block. + /// In the case of non-error, control flow proceeds to the next instruction + /// after the `try`, with the result of this instruction being the unwrapped + /// payload value, as if `err_union_payload_unsafe` was executed on the operand. + /// Uses the `pl_node` union field. Payload is `Try`. + @"try", + /// Same as `try` except the operand is a pointer and the result is a pointer. + try_ptr, + /// An error set type definition. Contains a list of field names. + /// Uses the `pl_node` union field. Payload is `ErrorSetDecl`. + error_set_decl, + error_set_decl_anon, + error_set_decl_func, + /// Declares the beginning of a statement. Used for debug info. + /// Uses the `dbg_stmt` union field. The line and column are offset + /// from the parent declaration. + dbg_stmt, + /// Marks a variable declaration. Used for debug info. + /// Uses the `str_op` union field. The string is the local variable name, + /// and the operand is the pointer to the variable's location. The local + /// may be a const or a var. + dbg_var_ptr, + /// Same as `dbg_var_ptr` but the local is always a const and the operand + /// is the local's value. + dbg_var_val, + /// Uses a name to identify a Decl and takes a pointer to it. + /// Uses the `str_tok` union field. + decl_ref, + /// Uses a name to identify a Decl and uses it as a value. + /// Uses the `str_tok` union field. + decl_val, + /// Load the value from a pointer. Assumes `x.*` syntax. + /// Uses `un_node` field. AST node is the `x.*` syntax. + load, + /// Arithmetic division. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + div, + /// Given a pointer to an array, slice, or pointer, returns a pointer to the element at + /// the provided index. + /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. + elem_ptr_node, + /// Same as `elem_ptr_node` but used only for for loop. + /// Uses the `pl_node` union field. AST node is the condition of a for loop. + /// Payload is `Bin`. + /// No OOB safety check is emitted. + elem_ptr, + /// Given an array, slice, or pointer, returns the element at the provided index. + /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. + elem_val_node, + /// Same as `elem_val_node` but used only for for loop. + /// Uses the `pl_node` union field. AST node is the condition of a for loop. + /// Payload is `Bin`. + /// No OOB safety check is emitted. + elem_val, + /// Same as `elem_val` but takes the index as an immediate value. + /// No OOB safety check is emitted. A prior instruction must validate this operation. + /// Uses the `elem_val_imm` union field. + elem_val_imm, + /// Emits a compile error if the operand is not `void`. + /// Uses the `un_node` field. + ensure_result_used, + /// Emits a compile error if an error is ignored. + /// Uses the `un_node` field. + ensure_result_non_error, + /// Emits a compile error error union payload is not void. + ensure_err_union_payload_void, + /// Create a `E!T` type. + /// Uses the `pl_node` field with `Bin` payload. + error_union_type, + /// `error.Foo` syntax. Uses the `str_tok` field of the Data union. + error_value, + /// Implements the `@export` builtin function, based on either an identifier to a Decl, + /// or field access of a Decl. The thing being exported is the Decl. + /// Uses the `pl_node` union field. Payload is `Export`. + @"export", + /// Implements the `@export` builtin function, based on a comptime-known value. + /// The thing being exported is the comptime-known value which is the operand. + /// Uses the `pl_node` union field. Payload is `ExportValue`. + export_value, + /// Given a pointer to a struct or object that contains virtual fields, returns a pointer + /// to the named field. The field name is stored in string_bytes. Used by a.b syntax. + /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. + field_ptr, + /// Given a struct or object that contains virtual fields, returns the named field. + /// The field name is stored in string_bytes. Used by a.b syntax. + /// This instruction also accepts a pointer. + /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. + field_val, + /// Given a pointer to a struct or object that contains virtual fields, returns a pointer + /// to the named field. The field name is a comptime instruction. Used by @field. + /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. + field_ptr_named, + /// Given a struct or object that contains virtual fields, returns the named field. + /// The field name is a comptime instruction. Used by @field. + /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. + field_val_named, + /// Returns a function type, or a function instance, depending on whether + /// the body_len is 0. Calling convention is auto. + /// Uses the `pl_node` union field. `payload_index` points to a `Func`. + func, + /// Same as `func` but has an inferred error set. + func_inferred, + /// Represents a function declaration or function prototype, depending on + /// whether body_len is 0. + /// Uses the `pl_node` union field. `payload_index` points to a `FuncFancy`. + func_fancy, + /// Implements the `@import` builtin. + /// Uses the `str_tok` field. + import, + /// Integer literal that fits in a u64. Uses the `int` union field. + int, + /// Arbitrary sized integer literal. Uses the `str` union field. + int_big, + /// A float literal that fits in a f64. Uses the float union value. + float, + /// A float literal that fits in a f128. Uses the `pl_node` union value. + /// Payload is `Float128`. + float128, + /// Make an integer type out of signedness and bit count. + /// Payload is `int_type` + int_type, + /// Return a boolean false if an optional is null. `x != null` + /// Uses the `un_node` field. + is_non_null, + /// Return a boolean false if an optional is null. `x.* != null` + /// Uses the `un_node` field. + is_non_null_ptr, + /// Return a boolean false if value is an error + /// Uses the `un_node` field. + is_non_err, + /// Return a boolean false if dereferenced pointer is an error + /// Uses the `un_node` field. + is_non_err_ptr, + /// Same as `is_non_er` but doesn't validate that the type can be an error. + /// Uses the `un_node` field. + ret_is_non_err, + /// A labeled block of code that loops forever. At the end of the body will have either + /// a `repeat` instruction or a `repeat_inline` instruction. + /// Uses the `pl_node` field. The AST node is either a for loop or while loop. + /// This ZIR instruction is needed because AIR does not (yet?) match ZIR, and Sema + /// needs to emit more than 1 AIR block for this instruction. + /// The payload is `Block`. + loop, + /// Sends runtime control flow back to the beginning of the current block. + /// Uses the `node` field. + repeat, + /// Sends comptime control flow back to the beginning of the current block. + /// Uses the `node` field. + repeat_inline, + /// Asserts that all the lengths provided match. Used to build a for loop. + /// Return value is the length as a usize. + /// Uses the `pl_node` field with payload `MultiOp`. + /// There is exactly one item corresponding to each AST node inside the for + /// loop condition. Any item may be `none`, indicating an unbounded range. + /// Illegal behaviors: + /// * If all lengths are unbounded ranges (always a compile error). + /// * If any two lengths do not match each other. + for_len, + /// Merge two error sets into one, `E1 || E2`. + /// Uses the `pl_node` field with payload `Bin`. + merge_error_sets, + /// Turns an R-Value into a const L-Value. In other words, it takes a value, + /// stores it in a memory location, and returns a const pointer to it. If the value + /// is `comptime`, the memory location is global static constant data. Otherwise, + /// the memory location is in the stack frame, local to the scope containing the + /// instruction. + /// Uses the `un_tok` union field. + ref, + /// Sends control flow back to the function's callee. + /// Includes an operand as the return value. + /// Includes an AST node source location. + /// Uses the `un_node` union field. + ret_node, + /// Sends control flow back to the function's callee. + /// The operand is a `ret_ptr` instruction, where the return value can be found. + /// Includes an AST node source location. + /// Uses the `un_node` union field. + ret_load, + /// Sends control flow back to the function's callee. + /// Includes an operand as the return value. + /// Includes a token source location. + /// Uses the `un_tok` union field. + ret_implicit, + /// Sends control flow back to the function's callee. + /// The return operand is `error.foo` where `foo` is given by the string. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Uses the `str_tok` union field. + ret_err_value, + /// A string name is provided which is an anonymous error set value. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Results in the error code. Note that control flow is not diverted with + /// this instruction; a following 'ret' instruction will do the diversion. + /// Uses the `str_tok` union field. + ret_err_value_code, + /// Obtains a pointer to the return value. + /// Uses the `node` union field. + ret_ptr, + /// Obtains the return type of the in-scope function. + /// Uses the `node` union field. + ret_type, + /// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range. + /// Uses the `ptr_type` union field. + ptr_type, + /// Slice operation `lhs[rhs..]`. No sentinel and no end offset. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceStart`. + slice_start, + /// Slice operation `array_ptr[start..end]`. No sentinel. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceEnd`. + slice_end, + /// Slice operation `array_ptr[start..end:sentinel]`. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceSentinel`. + slice_sentinel, + /// Slice operation `array_ptr[start..][0..len]`. Optional sentinel. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceLength`. + slice_length, + /// Same as `store` except provides a source location. + /// Uses the `pl_node` union field. Payload is `Bin`. + store_node, + /// Same as `store_node` but the type of the value being stored will be + /// used to infer the pointer type of an `alloc_inferred`. + /// Uses the `pl_node` union field. Payload is `Bin`. + store_to_inferred_ptr, + /// String Literal. Makes an anonymous Decl and then takes a pointer to it. + /// Uses the `str` union field. + str, + /// Arithmetic negation. Asserts no integer overflow. + /// Same as sub with a lhs of 0, split into a separate instruction to save memory. + /// Uses `un_node`. + negate, + /// Twos complement wrapping integer negation. + /// Same as subwrap with a lhs of 0, split into a separate instruction to save memory. + /// Uses `un_node`. + negate_wrap, + /// Returns the type of a value. + /// Uses the `un_node` field. + typeof, + /// Implements `@TypeOf` for one operand. + /// Uses the `pl_node` field. + typeof_builtin, + /// Given a value, look at the type of it, which must be an integer type. + /// Returns the integer type for the RHS of a shift operation. + /// Uses the `un_node` field. + typeof_log2_int_type, + /// Asserts control-flow will not reach this instruction (`unreachable`). + /// Uses the `@"unreachable"` union field. + @"unreachable", + /// Bitwise XOR. `^` + /// Uses the `pl_node` union field. Payload is `Bin`. + xor, + /// Create an optional type '?T' + /// Uses the `un_node` field. + optional_type, + /// ?T => T with safety. + /// Given an optional value, returns the payload value, with a safety check that + /// the value is non-null. Used for `orelse`, `if` and `while`. + /// Uses the `un_node` field. + optional_payload_safe, + /// ?T => T without safety. + /// Given an optional value, returns the payload value. No safety checks. + /// Uses the `un_node` field. + optional_payload_unsafe, + /// *?T => *T with safety. + /// Given a pointer to an optional value, returns a pointer to the payload value, + /// with a safety check that the value is non-null. Used for `orelse`, `if` and `while`. + /// Uses the `un_node` field. + optional_payload_safe_ptr, + /// *?T => *T without safety. + /// Given a pointer to an optional value, returns a pointer to the payload value. + /// No safety checks. + /// Uses the `un_node` field. + optional_payload_unsafe_ptr, + /// E!T => T without safety. + /// Given an error union value, returns the payload value. No safety checks. + /// Uses the `un_node` field. + err_union_payload_unsafe, + /// *E!T => *T without safety. + /// Given a pointer to a error union value, returns a pointer to the payload value. + /// No safety checks. + /// Uses the `un_node` field. + err_union_payload_unsafe_ptr, + /// E!T => E without safety. + /// Given an error union value, returns the error code. No safety checks. + /// Uses the `un_node` field. + err_union_code, + /// *E!T => E without safety. + /// Given a pointer to an error union value, returns the error code. No safety checks. + /// Uses the `un_node` field. + err_union_code_ptr, + /// An enum literal. Uses the `str_tok` union field. + enum_literal, + /// A switch expression. Uses the `pl_node` union field. + /// AST node is the switch, payload is `SwitchBlock`. + switch_block, + /// A switch expression. Uses the `pl_node` union field. + /// AST node is the switch, payload is `SwitchBlock`. Operand is a pointer. + switch_block_ref, + /// A switch on an error union `a catch |err| switch (err) {...}`. + /// Uses the `pl_node` union field. AST node is the `catch`, payload is `SwitchBlockErrUnion`. + switch_block_err_union, + /// Check that operand type supports the dereference operand (.*). + /// Uses the `un_node` field. + validate_deref, + /// Check that the operand's type is an array or tuple with the given number of elements. + /// Uses the `pl_node` field. Payload is `ValidateDestructure`. + validate_destructure, + /// Given a struct or union, and a field name as a Ref, + /// returns the field type. Uses the `pl_node` field. Payload is `FieldTypeRef`. + field_type_ref, + /// Given a pointer, initializes all error unions and optionals in the pointee to payloads, + /// returning the base payload pointer. For instance, converts *E!?T into a valid *T + /// (clobbering any existing error or null value). + /// Uses the `un_node` field. + opt_eu_base_ptr_init, + /// Coerce a given value such that when a reference is taken, the resulting pointer will be + /// coercible to the given type. For instance, given a value of type 'u32' and the pointer + /// type '*u64', coerces the value to a 'u64'. Asserts that the type is a pointer type. + /// Uses the `pl_node` field. Payload is `Bin`. + /// LHS is the pointer type, RHS is the value. + coerce_ptr_elem_ty, + /// Given a type, validate that it is a pointer type suitable for return from the address-of + /// operator. Emit a compile error if not. + /// Uses the `un_tok` union field. Token is the `&` operator. Operand is the type. + validate_ref_ty, + + // The following tags all relate to struct initialization expressions. + + /// A struct literal with a specified explicit type, with no fields. + /// Uses the `un_node` field. + struct_init_empty, + /// An anonymous struct literal with a known result type, with no fields. + /// Uses the `un_node` field. + struct_init_empty_result, + /// An anonymous struct literal with no fields, returned by reference, with a known result + /// type for the pointer. Asserts that the type is a pointer. + /// Uses the `un_node` field. + struct_init_empty_ref_result, + /// Struct initialization without a type. Creates a value of an anonymous struct type. + /// Uses the `pl_node` field. Payload is `StructInitAnon`. + struct_init_anon, + /// Finalizes a typed struct or union initialization, performs validation, and returns the + /// struct or union value. The given type must be validated prior to this instruction, using + /// `validate_struct_init_ty` or `validate_struct_init_result_ty`. If the given type is + /// generic poison, this is downgraded to an anonymous initialization. + /// Uses the `pl_node` field. Payload is `StructInit`. + struct_init, + /// Struct initialization syntax, make the result a pointer. Equivalent to `struct_init` + /// followed by `ref` - this ZIR tag exists as an optimization for a common pattern. + /// Uses the `pl_node` field. Payload is `StructInit`. + struct_init_ref, + /// Checks that the type supports struct init syntax. Always returns void. + /// Uses the `un_node` field. + validate_struct_init_ty, + /// Like `validate_struct_init_ty`, but additionally accepts types which structs coerce to. + /// Used on the known result type of a struct init expression. Always returns void. + /// Uses the `un_node` field. + validate_struct_init_result_ty, + /// Given a set of `struct_init_field_ptr` instructions, assumes they are all part of a + /// struct initialization expression, and emits compile errors for duplicate fields as well + /// as missing fields, if applicable. + /// This instruction asserts that there is at least one struct_init_field_ptr instruction, + /// because it must use one of them to find out the struct type. + /// Uses the `pl_node` field. Payload is `Block`. + validate_ptr_struct_init, + /// Given a type being used for a struct initialization expression, returns the type of the + /// field with the given name. + /// Uses the `pl_node` field. Payload is `FieldType`. + struct_init_field_type, + /// Given a pointer being used as the result pointer of a struct initialization expression, + /// return a pointer to the field of the given name. + /// Uses the `pl_node` field. The AST node is the field initializer. Payload is Field. + struct_init_field_ptr, + + // The following tags all relate to array initialization expressions. + + /// Array initialization without a type. Creates a value of a tuple type. + /// Uses the `pl_node` field. Payload is `MultiOp`. + array_init_anon, + /// Array initialization syntax with a known type. The given type must be validated prior to + /// this instruction, using some `validate_array_init_*_ty` instruction. + /// Uses the `pl_node` field. Payload is `MultiOp`, where the first operand is the type. + array_init, + /// Array initialization syntax, make the result a pointer. Equivalent to `array_init` + /// followed by `ref`- this ZIR tag exists as an optimization for a common pattern. + /// Uses the `pl_node` field. Payload is `MultiOp`, where the first operand is the type. + array_init_ref, + /// Checks that the type supports array init syntax. Always returns void. + /// Uses the `pl_node` field. Payload is `ArrayInit`. + validate_array_init_ty, + /// Like `validate_array_init_ty`, but additionally accepts types which arrays coerce to. + /// Used on the known result type of an array init expression. Always returns void. + /// Uses the `pl_node` field. Payload is `ArrayInit`. + validate_array_init_result_ty, + /// Given a pointer or slice type and an element count, return the expected type of an array + /// initializer such that a pointer to the initializer has the given pointer type, checking + /// that this type supports array init syntax and emitting a compile error if not. Preserves + /// error union and optional wrappers on the array type, if any. + /// Asserts that the given type is a pointer or slice type. + /// Uses the `pl_node` field. Payload is `ArrayInitRefTy`. + validate_array_init_ref_ty, + /// Given a set of `array_init_elem_ptr` instructions, assumes they are all part of an array + /// initialization expression, and emits a compile error if the number of elements does not + /// match the array type. + /// This instruction asserts that there is at least one `array_init_elem_ptr` instruction, + /// because it must use one of them to find out the array type. + /// Uses the `pl_node` field. Payload is `Block`. + validate_ptr_array_init, + /// Given a type being used for an array initialization expression, returns the type of the + /// element at the given index. + /// Uses the `bin` union field. lhs is the indexable type, rhs is the index. + array_init_elem_type, + /// Given a pointer being used as the result pointer of an array initialization expression, + /// return a pointer to the element at the given index. + /// Uses the `pl_node` union field. AST node is an element inside array initialization + /// syntax. Payload is `ElemPtrImm`. + array_init_elem_ptr, + + /// Implements the `@unionInit` builtin. + /// Uses the `pl_node` field. Payload is `UnionInit`. + union_init, + /// Implements the `@typeInfo` builtin. Uses `un_node`. + type_info, + /// Implements the `@sizeOf` builtin. Uses `un_node`. + size_of, + /// Implements the `@bitSizeOf` builtin. Uses `un_node`. + bit_size_of, + + /// Implement builtin `@intFromPtr`. Uses `un_node`. + /// Convert a pointer to a `usize` integer. + int_from_ptr, + /// Emit an error message and fail compilation. + /// Uses the `un_node` field. + compile_error, + /// Changes the maximum number of backwards branches that compile-time + /// code execution can use before giving up and making a compile error. + /// Uses the `un_node` union field. + set_eval_branch_quota, + /// Converts an enum value into an integer. Resulting type will be the tag type + /// of the enum. Uses `un_node`. + int_from_enum, + /// Implement builtin `@alignOf`. Uses `un_node`. + align_of, + /// Implement builtin `@intFromBool`. Uses `un_node`. + int_from_bool, + /// Implement builtin `@embedFile`. Uses `un_node`. + embed_file, + /// Implement builtin `@errorName`. Uses `un_node`. + error_name, + /// Implement builtin `@panic`. Uses `un_node`. + panic, + /// Implements `@trap`. + /// Uses the `node` field. + trap, + /// Implement builtin `@setRuntimeSafety`. Uses `un_node`. + set_runtime_safety, + /// Implement builtin `@sqrt`. Uses `un_node`. + sqrt, + /// Implement builtin `@sin`. Uses `un_node`. + sin, + /// Implement builtin `@cos`. Uses `un_node`. + cos, + /// Implement builtin `@tan`. Uses `un_node`. + tan, + /// Implement builtin `@exp`. Uses `un_node`. + exp, + /// Implement builtin `@exp2`. Uses `un_node`. + exp2, + /// Implement builtin `@log`. Uses `un_node`. + log, + /// Implement builtin `@log2`. Uses `un_node`. + log2, + /// Implement builtin `@log10`. Uses `un_node`. + log10, + /// Implement builtin `@abs`. Uses `un_node`. + abs, + /// Implement builtin `@floor`. Uses `un_node`. + floor, + /// Implement builtin `@ceil`. Uses `un_node`. + ceil, + /// Implement builtin `@trunc`. Uses `un_node`. + trunc, + /// Implement builtin `@round`. Uses `un_node`. + round, + /// Implement builtin `@tagName`. Uses `un_node`. + tag_name, + /// Implement builtin `@typeName`. Uses `un_node`. + type_name, + /// Implement builtin `@Frame`. Uses `un_node`. + frame_type, + /// Implement builtin `@frameSize`. Uses `un_node`. + frame_size, + + /// Implements the `@intFromFloat` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + int_from_float, + /// Implements the `@floatFromInt` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + float_from_int, + /// Implements the `@ptrFromInt` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + ptr_from_int, + /// Converts an integer into an enum value. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + enum_from_int, + /// Convert a larger float type to any other float type, possibly causing + /// a loss of precision. + /// Uses the `pl_node` field. AST is the `@floatCast` syntax. + /// Payload is `Bin` with lhs as the dest type, rhs the operand. + float_cast, + /// Implements the `@intCast` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + /// Convert an integer value to another integer type, asserting that the destination type + /// can hold the same mathematical value. + int_cast, + /// Implements the `@ptrCast` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + /// Not every `@ptrCast` will correspond to this instruction - see also + /// `ptr_cast_full` in `Extended`. + ptr_cast, + /// Implements the `@truncate` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + truncate, + + /// Implements the `@hasDecl` builtin. + /// Uses the `pl_node` union field. Payload is `Bin`. + has_decl, + /// Implements the `@hasField` builtin. + /// Uses the `pl_node` union field. Payload is `Bin`. + has_field, + + /// Implements the `@clz` builtin. Uses the `un_node` union field. + clz, + /// Implements the `@ctz` builtin. Uses the `un_node` union field. + ctz, + /// Implements the `@popCount` builtin. Uses the `un_node` union field. + pop_count, + /// Implements the `@byteSwap` builtin. Uses the `un_node` union field. + byte_swap, + /// Implements the `@bitReverse` builtin. Uses the `un_node` union field. + bit_reverse, + + /// Implements the `@bitOffsetOf` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + bit_offset_of, + /// Implements the `@offsetOf` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + offset_of, + /// Implements the `@splat` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + splat, + /// Implements the `@reduce` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + reduce, + /// Implements the `@shuffle` builtin. + /// Uses the `pl_node` union field with payload `Shuffle`. + shuffle, + /// Implements the `@atomicLoad` builtin. + /// Uses the `pl_node` union field with payload `AtomicLoad`. + atomic_load, + /// Implements the `@atomicRmw` builtin. + /// Uses the `pl_node` union field with payload `AtomicRmw`. + atomic_rmw, + /// Implements the `@atomicStore` builtin. + /// Uses the `pl_node` union field with payload `AtomicStore`. + atomic_store, + /// Implements the `@mulAdd` builtin. + /// Uses the `pl_node` union field with payload `MulAdd`. + /// The addend communicates the type of the builtin. + /// The mulends need to be coerced to the same type. + mul_add, + /// Implements the `@fieldParentPtr` builtin. + /// Uses the `pl_node` union field with payload `FieldParentPtr`. + field_parent_ptr, + /// Implements the `@memcpy` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + memcpy, + /// Implements the `@memset` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + memset, + /// Implements the `@min` builtin for 2 args. + /// Uses the `pl_node` union field with payload `Bin` + min, + /// Implements the `@max` builtin for 2 args. + /// Uses the `pl_node` union field with payload `Bin` + max, + /// Implements the `@cImport` builtin. + /// Uses the `pl_node` union field with payload `Block`. + c_import, + + /// Allocates stack local memory. + /// Uses the `un_node` union field. The operand is the type of the allocated object. + /// The node source location points to a var decl node. + /// A `make_ptr_const` instruction should be used once the value has + /// been stored to the allocation. To ensure comptime value detection + /// functions, there are some restrictions on how this pointer should be + /// used prior to the `make_ptr_const` instruction: no pointer derived + /// from this `alloc` may be returned from a block or stored to another + /// address. In other words, it must be trivial to determine whether any + /// given pointer derives from this one. + alloc, + /// Same as `alloc` except mutable. As such, `make_ptr_const` need not be used, + /// and there are no restrictions on the usage of the pointer. + alloc_mut, + /// Allocates comptime-mutable memory. + /// Uses the `un_node` union field. The operand is the type of the allocated object. + /// The node source location points to a var decl node. + alloc_comptime_mut, + /// Same as `alloc` except the type is inferred. + /// Uses the `node` union field. + alloc_inferred, + /// Same as `alloc_inferred` except mutable. + alloc_inferred_mut, + /// Allocates comptime const memory. + /// Uses the `node` union field. The type of the allocated object is inferred. + /// The node source location points to a var decl node. + alloc_inferred_comptime, + /// Same as `alloc_comptime_mut` except the type is inferred. + alloc_inferred_comptime_mut, + /// Each `store_to_inferred_ptr` puts the type of the stored value into a set, + /// and then `resolve_inferred_alloc` triggers peer type resolution on the set. + /// The operand is a `alloc_inferred` or `alloc_inferred_mut` instruction, which + /// is the allocation that needs to have its type inferred. + /// Uses the `un_node` field. The AST node is the var decl. + resolve_inferred_alloc, + /// Turns a pointer coming from an `alloc` or `Extended.alloc` into a constant + /// version of the same pointer. For inferred allocations this is instead implicitly + /// handled by the `resolve_inferred_alloc` instruction. + /// Uses the `un_node` union field. + make_ptr_const, + + /// Implements `resume` syntax. Uses `un_node` field. + @"resume", + @"await", + + /// When a type or function refers to a comptime value from an outer + /// scope, that forms a closure over comptime value. The outer scope + /// will record a capture of that value, which encodes its current state + /// and marks it to persist. Uses `un_tok` field. Operand is the + /// instruction value to capture. + closure_capture, + /// The inner scope of a closure uses closure_get to retrieve the value + /// stored by the outer scope. Uses `inst_node` field. Operand is the + /// closure_capture instruction ref. + closure_get, + + /// A defer statement. + /// Uses the `defer` union field. + @"defer", + /// An errdefer statement with a code. + /// Uses the `err_defer_code` union field. + defer_err_code, + + /// Requests that Sema update the saved error return trace index for the enclosing + /// block, if the operand is .none or of an error/error-union type. + /// Uses the `save_err_ret_index` field. + save_err_ret_index, + /// Specialized form of `Extended.restore_err_ret_index`. + /// Unconditionally restores the error return index to its last saved state + /// in the block referred to by `operand`. If `operand` is `none`, restores + /// to the point of function entry. + /// Uses the `un_node` field. + restore_err_ret_index_unconditional, + /// Specialized form of `Extended.restore_err_ret_index`. + /// Restores the error return index to its state at the entry of + /// the current function conditional on `operand` being a non-error. + /// If `operand` is `none`, restores unconditionally. + /// Uses the `un_node` field. + restore_err_ret_index_fn_entry, + + /// The ZIR instruction tag is one of the `Extended` ones. + /// Uses the `extended` union field. + extended, + + /// Returns whether the instruction is one of the control flow "noreturn" types. + /// Function calls do not count. + pub fn isNoReturn(tag: Tag) bool { + return switch (tag) { + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .add, + .addwrap, + .add_sat, + .add_unsafe, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .vector_type, + .elem_type, + .indexable_ptr_elem_type, + .vector_elem_type, + .indexable_ptr_len, + .anyframe_type, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_comptime, + .block_inline, + .declaration, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .call, + .field_call, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_val_node, + .elem_val_imm, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .@"export", + .export_value, + .field_ptr, + .field_val, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .has_decl, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .store_node, + .store_to_inferred_ptr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .slice_length, + .import, + .typeof_log2_int_type, + .resolve_inferred_alloc, + .set_eval_branch_quota, + .switch_block, + .switch_block_ref, + .switch_block_err_union, + .validate_deref, + .validate_destructure, + .union_init, + .field_type_ref, + .enum_from_int, + .int_from_enum, + .type_info, + .size_of, + .bit_size_of, + .int_from_ptr, + .align_of, + .int_from_bool, + .embed_file, + .error_name, + .set_runtime_safety, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .abs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .int_from_float, + .float_from_int, + .ptr_from_int, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .atomic_store, + .mul_add, + .builtin_call, + .field_parent_ptr, + .max, + .memcpy, + .memset, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .extended, + .closure_get, + .closure_capture, + .ret_ptr, + .ret_type, + .@"try", + .try_ptr, + .@"defer", + .defer_err_code, + .save_err_ret_index, + .for_len, + .opt_eu_base_ptr_init, + .coerce_ptr_elem_ty, + .struct_init_empty, + .struct_init_empty_result, + .struct_init_empty_ref_result, + .struct_init_anon, + .struct_init, + .struct_init_ref, + .validate_struct_init_ty, + .validate_struct_init_result_ty, + .validate_ptr_struct_init, + .struct_init_field_type, + .struct_init_field_ptr, + .array_init_anon, + .array_init, + .array_init_ref, + .validate_array_init_ty, + .validate_array_init_result_ty, + .validate_array_init_ref_ty, + .validate_ptr_array_init, + .array_init_elem_type, + .array_init_elem_ptr, + .validate_ref_ty, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, + => false, + + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .trap, + .check_comptime_control_flow, + => true, + }; + } + + pub fn isParam(tag: Tag) bool { + return switch (tag) { + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + => true, + + else => false, + }; + } + + /// AstGen uses this to find out if `Ref.void_value` should be used in place + /// of the result of a given instruction. This allows Sema to forego adding + /// the instruction to the map after analysis. + pub fn isAlwaysVoid(tag: Tag, data: Data) bool { + return switch (tag) { + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .set_eval_branch_quota, + .atomic_store, + .store_node, + .store_to_inferred_ptr, + .resolve_inferred_alloc, + .validate_deref, + .validate_destructure, + .@"export", + .export_value, + .set_runtime_safety, + .memcpy, + .memset, + .check_comptime_control_flow, + .@"defer", + .defer_err_code, + .save_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, + .validate_struct_init_ty, + .validate_struct_init_result_ty, + .validate_ptr_struct_init, + .validate_array_init_ty, + .validate_array_init_result_ty, + .validate_ptr_array_init, + .validate_ref_ty, + => true, + + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .add, + .addwrap, + .add_sat, + .add_unsafe, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .vector_type, + .elem_type, + .indexable_ptr_elem_type, + .vector_elem_type, + .indexable_ptr_len, + .anyframe_type, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_comptime, + .block_inline, + .declaration, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .call, + .field_call, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_val_node, + .elem_val_imm, + .field_ptr, + .field_val, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .has_decl, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .slice_length, + .import, + .typeof_log2_int_type, + .switch_block, + .switch_block_ref, + .switch_block_err_union, + .union_init, + .field_type_ref, + .enum_from_int, + .int_from_enum, + .type_info, + .size_of, + .bit_size_of, + .int_from_ptr, + .align_of, + .int_from_bool, + .embed_file, + .error_name, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .abs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .int_from_float, + .float_from_int, + .ptr_from_int, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .mul_add, + .builtin_call, + .field_parent_ptr, + .max, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .closure_get, + .closure_capture, + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .ret_ptr, + .ret_type, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .trap, + .for_len, + .@"try", + .try_ptr, + .opt_eu_base_ptr_init, + .coerce_ptr_elem_ty, + .struct_init_empty, + .struct_init_empty_result, + .struct_init_empty_ref_result, + .struct_init_anon, + .struct_init, + .struct_init_ref, + .struct_init_field_type, + .struct_init_field_ptr, + .array_init_anon, + .array_init, + .array_init_ref, + .validate_array_init_ref_ty, + .array_init_elem_type, + .array_init_elem_ptr, + => false, + + .extended => switch (data.extended.opcode) { + .fence, .set_cold, .breakpoint => true, + else => false, + }, + }; + } + + /// Used by debug safety-checking code. + pub const data_tags = list: { + @setEvalBranchQuota(2000); + break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{ + .add = .pl_node, + .addwrap = .pl_node, + .add_sat = .pl_node, + .add_unsafe = .pl_node, + .sub = .pl_node, + .subwrap = .pl_node, + .sub_sat = .pl_node, + .mul = .pl_node, + .mulwrap = .pl_node, + .mul_sat = .pl_node, + + .param = .pl_tok, + .param_comptime = .pl_tok, + .param_anytype = .str_tok, + .param_anytype_comptime = .str_tok, + .array_cat = .pl_node, + .array_mul = .pl_node, + .array_type = .pl_node, + .array_type_sentinel = .pl_node, + .vector_type = .pl_node, + .elem_type = .un_node, + .indexable_ptr_elem_type = .un_node, + .vector_elem_type = .un_node, + .indexable_ptr_len = .un_node, + .anyframe_type = .un_node, + .as_node = .pl_node, + .as_shift_operand = .pl_node, + .bit_and = .pl_node, + .bitcast = .pl_node, + .bit_not = .un_node, + .bit_or = .pl_node, + .block = .pl_node, + .block_comptime = .pl_node, + .block_inline = .pl_node, + .declaration = .pl_node, + .suspend_block = .pl_node, + .bool_not = .un_node, + .bool_br_and = .pl_node, + .bool_br_or = .pl_node, + .@"break" = .@"break", + .break_inline = .@"break", + .check_comptime_control_flow = .un_node, + .for_len = .pl_node, + .call = .pl_node, + .field_call = .pl_node, + .cmp_lt = .pl_node, + .cmp_lte = .pl_node, + .cmp_eq = .pl_node, + .cmp_gte = .pl_node, + .cmp_gt = .pl_node, + .cmp_neq = .pl_node, + .condbr = .pl_node, + .condbr_inline = .pl_node, + .@"try" = .pl_node, + .try_ptr = .pl_node, + .error_set_decl = .pl_node, + .error_set_decl_anon = .pl_node, + .error_set_decl_func = .pl_node, + .dbg_stmt = .dbg_stmt, + .dbg_var_ptr = .str_op, + .dbg_var_val = .str_op, + .decl_ref = .str_tok, + .decl_val = .str_tok, + .load = .un_node, + .div = .pl_node, + .elem_ptr = .pl_node, + .elem_ptr_node = .pl_node, + .elem_val = .pl_node, + .elem_val_node = .pl_node, + .elem_val_imm = .elem_val_imm, + .ensure_result_used = .un_node, + .ensure_result_non_error = .un_node, + .ensure_err_union_payload_void = .un_node, + .error_union_type = .pl_node, + .error_value = .str_tok, + .@"export" = .pl_node, + .export_value = .pl_node, + .field_ptr = .pl_node, + .field_val = .pl_node, + .field_ptr_named = .pl_node, + .field_val_named = .pl_node, + .func = .pl_node, + .func_inferred = .pl_node, + .func_fancy = .pl_node, + .import = .str_tok, + .int = .int, + .int_big = .str, + .float = .float, + .float128 = .pl_node, + .int_type = .int_type, + .is_non_null = .un_node, + .is_non_null_ptr = .un_node, + .is_non_err = .un_node, + .is_non_err_ptr = .un_node, + .ret_is_non_err = .un_node, + .loop = .pl_node, + .repeat = .node, + .repeat_inline = .node, + .merge_error_sets = .pl_node, + .mod_rem = .pl_node, + .ref = .un_tok, + .ret_node = .un_node, + .ret_load = .un_node, + .ret_implicit = .un_tok, + .ret_err_value = .str_tok, + .ret_err_value_code = .str_tok, + .ret_ptr = .node, + .ret_type = .node, + .ptr_type = .ptr_type, + .slice_start = .pl_node, + .slice_end = .pl_node, + .slice_sentinel = .pl_node, + .slice_length = .pl_node, + .store_node = .pl_node, + .store_to_inferred_ptr = .pl_node, + .str = .str, + .negate = .un_node, + .negate_wrap = .un_node, + .typeof = .un_node, + .typeof_log2_int_type = .un_node, + .@"unreachable" = .@"unreachable", + .xor = .pl_node, + .optional_type = .un_node, + .optional_payload_safe = .un_node, + .optional_payload_unsafe = .un_node, + .optional_payload_safe_ptr = .un_node, + .optional_payload_unsafe_ptr = .un_node, + .err_union_payload_unsafe = .un_node, + .err_union_payload_unsafe_ptr = .un_node, + .err_union_code = .un_node, + .err_union_code_ptr = .un_node, + .enum_literal = .str_tok, + .switch_block = .pl_node, + .switch_block_ref = .pl_node, + .switch_block_err_union = .pl_node, + .validate_deref = .un_node, + .validate_destructure = .pl_node, + .field_type_ref = .pl_node, + .union_init = .pl_node, + .type_info = .un_node, + .size_of = .un_node, + .bit_size_of = .un_node, + .opt_eu_base_ptr_init = .un_node, + .coerce_ptr_elem_ty = .pl_node, + .validate_ref_ty = .un_tok, + + .int_from_ptr = .un_node, + .compile_error = .un_node, + .set_eval_branch_quota = .un_node, + .int_from_enum = .un_node, + .align_of = .un_node, + .int_from_bool = .un_node, + .embed_file = .un_node, + .error_name = .un_node, + .panic = .un_node, + .trap = .node, + .set_runtime_safety = .un_node, + .sqrt = .un_node, + .sin = .un_node, + .cos = .un_node, + .tan = .un_node, + .exp = .un_node, + .exp2 = .un_node, + .log = .un_node, + .log2 = .un_node, + .log10 = .un_node, + .abs = .un_node, + .floor = .un_node, + .ceil = .un_node, + .trunc = .un_node, + .round = .un_node, + .tag_name = .un_node, + .type_name = .un_node, + .frame_type = .un_node, + .frame_size = .un_node, + + .int_from_float = .pl_node, + .float_from_int = .pl_node, + .ptr_from_int = .pl_node, + .enum_from_int = .pl_node, + .float_cast = .pl_node, + .int_cast = .pl_node, + .ptr_cast = .pl_node, + .truncate = .pl_node, + .typeof_builtin = .pl_node, + + .has_decl = .pl_node, + .has_field = .pl_node, + + .clz = .un_node, + .ctz = .un_node, + .pop_count = .un_node, + .byte_swap = .un_node, + .bit_reverse = .un_node, + + .div_exact = .pl_node, + .div_floor = .pl_node, + .div_trunc = .pl_node, + .mod = .pl_node, + .rem = .pl_node, + + .shl = .pl_node, + .shl_exact = .pl_node, + .shl_sat = .pl_node, + .shr = .pl_node, + .shr_exact = .pl_node, + + .bit_offset_of = .pl_node, + .offset_of = .pl_node, + .splat = .pl_node, + .reduce = .pl_node, + .shuffle = .pl_node, + .atomic_load = .pl_node, + .atomic_rmw = .pl_node, + .atomic_store = .pl_node, + .mul_add = .pl_node, + .builtin_call = .pl_node, + .field_parent_ptr = .pl_node, + .max = .pl_node, + .memcpy = .pl_node, + .memset = .pl_node, + .min = .pl_node, + .c_import = .pl_node, + + .alloc = .un_node, + .alloc_mut = .un_node, + .alloc_comptime_mut = .un_node, + .alloc_inferred = .node, + .alloc_inferred_mut = .node, + .alloc_inferred_comptime = .node, + .alloc_inferred_comptime_mut = .node, + .resolve_inferred_alloc = .un_node, + .make_ptr_const = .un_node, + + .@"resume" = .un_node, + .@"await" = .un_node, + + .closure_capture = .un_tok, + .closure_get = .inst_node, + + .@"defer" = .@"defer", + .defer_err_code = .defer_err_code, + + .save_err_ret_index = .save_err_ret_index, + .restore_err_ret_index_unconditional = .un_node, + .restore_err_ret_index_fn_entry = .un_node, + + .struct_init_empty = .un_node, + .struct_init_empty_result = .un_node, + .struct_init_empty_ref_result = .un_node, + .struct_init_anon = .pl_node, + .struct_init = .pl_node, + .struct_init_ref = .pl_node, + .validate_struct_init_ty = .un_node, + .validate_struct_init_result_ty = .un_node, + .validate_ptr_struct_init = .pl_node, + .struct_init_field_type = .pl_node, + .struct_init_field_ptr = .pl_node, + .array_init_anon = .pl_node, + .array_init = .pl_node, + .array_init_ref = .pl_node, + .validate_array_init_ty = .pl_node, + .validate_array_init_result_ty = .pl_node, + .validate_array_init_ref_ty = .pl_node, + .validate_ptr_array_init = .pl_node, + .array_init_elem_type = .bin, + .array_init_elem_ptr = .pl_node, + + .extended = .extended, + }); + }; + + // Uncomment to view how many tag slots are available. + //comptime { + // @compileLog("ZIR tags left: ", 256 - @typeInfo(Tag).Enum.fields.len); + //} + }; + + /// Rarer instructions are here; ones that do not fit in the 8-bit `Tag` enum. + /// `noreturn` instructions may not go here; they must be part of the main `Tag` enum. + pub const Extended = enum(u16) { + /// Declares a global variable. + /// `operand` is payload index to `ExtendedVar`. + /// `small` is `ExtendedVar.Small`. + variable, + /// A struct type definition. Contains references to ZIR instructions for + /// the field types, defaults, and alignments. + /// `operand` is payload index to `StructDecl`. + /// `small` is `StructDecl.Small`. + struct_decl, + /// An enum type definition. Contains references to ZIR instructions for + /// the field value expressions and optional type tag expression. + /// `operand` is payload index to `EnumDecl`. + /// `small` is `EnumDecl.Small`. + enum_decl, + /// A union type definition. Contains references to ZIR instructions for + /// the field types and optional type tag expression. + /// `operand` is payload index to `UnionDecl`. + /// `small` is `UnionDecl.Small`. + union_decl, + /// An opaque type definition. Contains references to decls and captures. + /// `operand` is payload index to `OpaqueDecl`. + /// `small` is `OpaqueDecl.Small`. + opaque_decl, + /// Implements the `@This` builtin. + /// `operand` is `src_node: i32`. + this, + /// Implements the `@returnAddress` builtin. + /// `operand` is `src_node: i32`. + ret_addr, + /// Implements the `@src` builtin. + /// `operand` is payload index to `LineColumn`. + builtin_src, + /// Implements the `@errorReturnTrace` builtin. + /// `operand` is `src_node: i32`. + error_return_trace, + /// Implements the `@frame` builtin. + /// `operand` is `src_node: i32`. + frame, + /// Implements the `@frameAddress` builtin. + /// `operand` is `src_node: i32`. + frame_address, + /// Same as `alloc` from `Tag` but may contain an alignment instruction. + /// `operand` is payload index to `AllocExtended`. + /// `small`: + /// * 0b000X - has type + /// * 0b00X0 - has alignment + /// * 0b0X00 - 1=const, 0=var + /// * 0bX000 - is comptime + alloc, + /// The `@extern` builtin. + /// `operand` is payload index to `BinNode`. + builtin_extern, + /// Inline assembly. + /// `small`: + /// * 0b00000000_000XXXXX - `outputs_len`. + /// * 0b000000XX_XXX00000 - `inputs_len`. + /// * 0b0XXXXX00_00000000 - `clobbers_len`. + /// * 0bX0000000_00000000 - is volatile + /// `operand` is payload index to `Asm`. + @"asm", + /// Same as `asm` except the assembly template is not a string literal but a comptime + /// expression. + /// The `asm_source` field of the Asm is not a null-terminated string + /// but instead a Ref. + asm_expr, + /// Log compile time variables and emit an error message. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the compile log builtin call. + compile_log, + /// The builtin `@TypeOf` which returns the type after Peer Type Resolution + /// of one or more params. + /// `operand` is payload index to `TypeOfPeer`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + typeof_peer, + /// Implements the `@min` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + min_multi, + /// Implements the `@max` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + max_multi, + /// Implements the `@addWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + add_with_overflow, + /// Implements the `@subWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + sub_with_overflow, + /// Implements the `@mulWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + mul_with_overflow, + /// Implements the `@shlWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + shl_with_overflow, + /// `operand` is payload index to `UnNode`. + c_undef, + /// `operand` is payload index to `UnNode`. + c_include, + /// `operand` is payload index to `BinNode`. + c_define, + /// `operand` is payload index to `UnNode`. + wasm_memory_size, + /// `operand` is payload index to `BinNode`. + wasm_memory_grow, + /// The `@prefetch` builtin. + /// `operand` is payload index to `BinNode`. + prefetch, + /// Implements the `@fence` builtin. + /// `operand` is payload index to `UnNode`. + fence, + /// Implement builtin `@setFloatMode`. + /// `operand` is payload index to `UnNode`. + set_float_mode, + /// Implement builtin `@setAlignStack`. + /// `operand` is payload index to `UnNode`. + set_align_stack, + /// Implements `@setCold`. + /// `operand` is payload index to `UnNode`. + set_cold, + /// Implements the `@errorCast` builtin. + /// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. + error_cast, + /// `operand` is payload index to `UnNode`. + await_nosuspend, + /// Implements `@breakpoint`. + /// `operand` is `src_node: i32`. + breakpoint, + /// Implements the `@select` builtin. + /// `operand` is payload index to `Select`. + select, + /// Implement builtin `@errToInt`. + /// `operand` is payload index to `UnNode`. + int_from_error, + /// Implement builtin `@errorFromInt`. + /// `operand` is payload index to `UnNode`. + error_from_int, + /// Implement builtin `@Type`. + /// `operand` is payload index to `UnNode`. + /// `small` contains `NameStrategy`. + reify, + /// Implements the `@asyncCall` builtin. + /// `operand` is payload index to `AsyncCall`. + builtin_async_call, + /// Implements the `@cmpxchgStrong` and `@cmpxchgWeak` builtins. + /// `small` 0=>weak 1=>strong + /// `operand` is payload index to `Cmpxchg`. + cmpxchg, + /// Implement builtin `@cVaArg`. + /// `operand` is payload index to `BinNode`. + c_va_arg, + /// Implement builtin `@cVaCopy`. + /// `operand` is payload index to `UnNode`. + c_va_copy, + /// Implement builtin `@cVaEnd`. + /// `operand` is payload index to `UnNode`. + c_va_end, + /// Implement builtin `@cVaStart`. + /// `operand` is `src_node: i32`. + c_va_start, + /// Implements the following builtins: + /// `@ptrCast`, `@alignCast`, `@addrSpaceCast`, `@constCast`, `@volatileCast`. + /// Represents an arbitrary nesting of the above builtins. Such a nesting is treated as a + /// single operation which can modify multiple components of a pointer type. + /// `operand` is payload index to `BinNode`. + /// `small` contains `FullPtrCastFlags`. + /// AST node is the root of the nested casts. + /// `lhs` is dest type, `rhs` is operand. + ptr_cast_full, + /// `operand` is payload index to `UnNode`. + /// `small` contains `FullPtrCastFlags`. + /// Guaranteed to only have flags where no explicit destination type is + /// required (const_cast and volatile_cast). + /// AST node is the root of the nested casts. + ptr_cast_no_dest, + /// Implements the `@workItemId` builtin. + /// `operand` is payload index to `UnNode`. + work_item_id, + /// Implements the `@workGroupSize` builtin. + /// `operand` is payload index to `UnNode`. + work_group_size, + /// Implements the `@workGroupId` builtin. + /// `operand` is payload index to `UnNode`. + work_group_id, + /// Implements the `@inComptime` builtin. + /// `operand` is `src_node: i32`. + in_comptime, + /// Restores the error return index to its last saved state in a given + /// block. If the block is `.none`, restores to the state from the point + /// of function entry. If the operand is not `.none`, the restore is + /// conditional on the operand value not being an error. + /// `operand` is payload index to `RestoreErrRetIndex`. + /// `small` is undefined. + restore_err_ret_index, + /// Used as a placeholder instruction which is just a dummy index for Sema to replace + /// with a specific value. For instance, this is used for the capture of an `errdefer`. + /// This should never appear in a body. + value_placeholder, + + pub const InstData = struct { + opcode: Extended, + small: u16, + operand: u32, + }; + }; + + /// The position of a ZIR instruction within the `Zir` instructions array. + pub const Index = enum(u32) { + /// ZIR is structured so that the outermost "main" struct of any file + /// is always at index 0. + main_struct_inst = 0, + ref_start_index = static_len, + _, + + pub const static_len = 84; + + pub fn toRef(i: Index) Inst.Ref { + return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i)); + } + + pub fn toOptional(i: Index) OptionalIndex { + return @enumFromInt(@intFromEnum(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + /// ZIR is structured so that the outermost "main" struct of any file + /// is always at index 0. + main_struct_inst = 0, + ref_start_index = Index.static_len, + none = std.math.maxInt(u32), + _, + + pub fn unwrap(oi: OptionalIndex) ?Index { + return if (oi == .none) null else @enumFromInt(@intFromEnum(oi)); + } + }; + + /// A reference to ZIR instruction, or to an InternPool index, or neither. + /// + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. + /// + /// The tag type is specified so that it is safe to bitcast between `[]u32` + /// and `[]Ref`. + pub const Ref = enum(u32) { + u0_type, + i0_type, + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + manyptr_const_u8_sentinel_0_type, + single_const_pointer_to_comptime_int_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, + optional_noreturn_type, + anyerror_void_error_union_type, + adhoc_inferred_error_set_type, + generic_poison_type, + empty_struct_type, + undef, + zero, + zero_usize, + zero_u8, + one, + one_usize, + one_u8, + four_u8, + negative_one, + calling_convention_c, + calling_convention_inline, + void_value, + unreachable_value, + null_value, + bool_true, + bool_false, + empty_struct, + generic_poison, + + /// This tag is here to match Air and InternPool, however it is unused + /// for ZIR purposes. + var_args_param_type = std.math.maxInt(u32) - 1, + /// This Ref does not correspond to any ZIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = std.math.maxInt(u32), + + _, + + pub fn toIndex(inst: Ref) ?Index { + assert(inst != .none); + const ref_int = @intFromEnum(inst); + if (ref_int >= @intFromEnum(Index.ref_start_index)) { + return @enumFromInt(ref_int - @intFromEnum(Index.ref_start_index)); + } else { + return null; + } + } + + pub fn toIndexAllowNone(inst: Ref) ?Index { + if (inst == .none) return null; + return toIndex(inst); + } + }; + + /// All instructions have an 8-byte payload, which is contained within + /// this union. `Tag` determines which union field is active, as well as + /// how to interpret the data within. + pub const Data = union { + /// Used for `Tag.extended`. The extended opcode determines the meaning + /// of the `small` and `operand` fields. + extended: Extended.InstData, + /// Used for unary operators, with an AST node source location. + un_node: struct { + /// Offset from Decl AST node index. + src_node: i32, + /// The meaning of this operand depends on the corresponding `Tag`. + operand: Ref, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + /// Used for unary operators, with a token source location. + un_tok: struct { + /// Offset from Decl AST token index. + src_tok: Ast.TokenIndex, + /// The meaning of this operand depends on the corresponding `Tag`. + operand: Ref, + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + pl_node: struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + /// index into extra. + /// `Tag` determines what lives there. + payload_index: u32, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + pl_tok: struct { + /// Offset from Decl AST token index. + src_tok: Ast.TokenIndex, + /// index into extra. + /// `Tag` determines what lives there. + payload_index: u32, + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + bin: Bin, + /// For strings which may contain null bytes. + str: struct { + /// Offset into `string_bytes`. + start: NullTerminatedString, + /// Number of bytes in the string. + len: u32, + + pub fn get(self: @This(), code: Zir) []const u8 { + return code.string_bytes[@intFromEnum(self.start)..][0..self.len]; + } + }, + str_tok: struct { + /// Offset into `string_bytes`. Null-terminated. + start: NullTerminatedString, + /// Offset from Decl AST token index. + src_tok: u32, + + pub fn get(self: @This(), code: Zir) [:0]const u8 { + return code.nullTerminatedString(self.start); + } + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + /// Offset from Decl AST token index. + tok: Ast.TokenIndex, + /// Offset from Decl AST node index. + node: i32, + int: u64, + float: f64, + ptr_type: struct { + flags: packed struct { + is_allowzero: bool, + is_mutable: bool, + is_volatile: bool, + has_sentinel: bool, + has_align: bool, + has_addrspace: bool, + has_bit_range: bool, + _: u1 = undefined, + }, + size: std.builtin.Type.Pointer.Size, + /// Index into extra. See `PtrType`. + payload_index: u32, + }, + int_type: struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + signedness: std.builtin.Signedness, + bit_count: u16, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + @"unreachable": struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + @"break": struct { + operand: Ref, + payload_index: u32, + }, + dbg_stmt: LineColumn, + /// Used for unary operators which reference an inst, + /// with an AST node source location. + inst_node: struct { + /// Offset from Decl AST node index. + src_node: i32, + /// The meaning of this operand depends on the corresponding `Tag`. + inst: Index, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + str_op: struct { + /// Offset into `string_bytes`. Null-terminated. + str: NullTerminatedString, + operand: Ref, + + pub fn getStr(self: @This(), zir: Zir) [:0]const u8 { + return zir.nullTerminatedString(self.str); + } + }, + @"defer": struct { + index: u32, + len: u32, + }, + defer_err_code: struct { + err_code: Ref, + payload_index: u32, + }, + save_err_ret_index: struct { + operand: Ref, // If error type (or .none), save new trace index + }, + elem_val_imm: struct { + /// The indexable value being accessed. + operand: Ref, + /// The index being accessed. + idx: u32, + }, + + // Make sure we don't accidentally add a field to make this union + // bigger than expected. Note that in Debug builds, Zig is allowed + // to insert a secret field for safety checks. + comptime { + if (builtin.mode != .Debug and builtin.mode != .ReleaseSafe) { + assert(@sizeOf(Data) == 8); + } + } + + /// TODO this has to be kept in sync with `Data` which we want to be an untagged + /// union. There is some kind of language awkwardness here and it has to do with + /// deserializing an untagged union (in this case `Data`) from a file, and trying + /// to preserve the hidden safety field. + pub const FieldEnum = enum { + extended, + un_node, + un_tok, + pl_node, + pl_tok, + bin, + str, + str_tok, + tok, + node, + int, + float, + ptr_type, + int_type, + @"unreachable", + @"break", + dbg_stmt, + inst_node, + str_op, + @"defer", + defer_err_code, + save_err_ret_index, + elem_val_imm, + }; + }; + + pub const Break = struct { + pub const no_src_node = std.math.maxInt(i32); + + operand_src_node: i32, + block_inst: Index, + }; + + /// Trailing: + /// 0. Output for every outputs_len + /// 1. Input for every inputs_len + /// 2. clobber: NullTerminatedString // index into string_bytes (null terminated) for every clobbers_len. + pub const Asm = struct { + src_node: i32, + // null-terminated string index + asm_source: NullTerminatedString, + /// 1 bit for each outputs_len: whether it uses `-> T` or not. + /// 0b0 - operand is a pointer to where to store the output. + /// 0b1 - operand is a type; asm expression has the output as the result. + /// 0b0X is the first output, 0bX0 is the second, etc. + output_type_bits: u32, + + pub const Output = struct { + /// index into string_bytes (null terminated) + name: NullTerminatedString, + /// index into string_bytes (null terminated) + constraint: NullTerminatedString, + /// How to interpret this is determined by `output_type_bits`. + operand: Ref, + }; + + pub const Input = struct { + /// index into string_bytes (null terminated) + name: NullTerminatedString, + /// index into string_bytes (null terminated) + constraint: NullTerminatedString, + operand: Ref, + }; + }; + + /// Trailing: + /// if (ret_body_len == 1) { + /// 0. return_type: Ref + /// } + /// if (ret_body_len > 1) { + /// 1. return_type: Index // for each ret_body_len + /// } + /// 2. body: Index // for each body_len + /// 3. src_locs: SrcLocs // if body_len != 0 + /// 4. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype + pub const Func = struct { + /// If this is 0 it means a void return type. + /// If this is 1 it means return_type is a simple Ref + ret_body_len: u32, + /// Points to the block that contains the param instructions for this function. + /// If this is a `declaration`, it refers to the declaration's value body. + param_block: Index, + body_len: u32, + + pub const SrcLocs = struct { + /// Line index in the source file relative to the parent decl. + lbrace_line: u32, + /// Line index in the source file relative to the parent decl. + rbrace_line: u32, + /// lbrace_column is least significant bits u16 + /// rbrace_column is most significant bits u16 + columns: u32, + }; + }; + + /// Trailing: + /// 0. lib_name: NullTerminatedString, // null terminated string index, if has_lib_name is set + /// if (has_align_ref and !has_align_body) { + /// 1. align: Ref, + /// } + /// if (has_align_body) { + /// 2. align_body_len: u32 + /// 3. align_body: u32 // for each align_body_len + /// } + /// if (has_addrspace_ref and !has_addrspace_body) { + /// 4. addrspace: Ref, + /// } + /// if (has_addrspace_body) { + /// 5. addrspace_body_len: u32 + /// 6. addrspace_body: u32 // for each addrspace_body_len + /// } + /// if (has_section_ref and !has_section_body) { + /// 7. section: Ref, + /// } + /// if (has_section_body) { + /// 8. section_body_len: u32 + /// 9. section_body: u32 // for each section_body_len + /// } + /// if (has_cc_ref and !has_cc_body) { + /// 10. cc: Ref, + /// } + /// if (has_cc_body) { + /// 11. cc_body_len: u32 + /// 12. cc_body: u32 // for each cc_body_len + /// } + /// if (has_ret_ty_ref and !has_ret_ty_body) { + /// 13. ret_ty: Ref, + /// } + /// if (has_ret_ty_body) { + /// 14. ret_ty_body_len: u32 + /// 15. ret_ty_body: u32 // for each ret_ty_body_len + /// } + /// 16. noalias_bits: u32 // if has_any_noalias + /// - each bit starting with LSB corresponds to parameter indexes + /// 17. body: Index // for each body_len + /// 18. src_locs: Func.SrcLocs // if body_len != 0 + /// 19. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype + pub const FuncFancy = struct { + /// Points to the block that contains the param instructions for this function. + /// If this is a `declaration`, it refers to the declaration's value body. + param_block: Index, + body_len: u32, + bits: Bits, + + /// If both has_cc_ref and has_cc_body are false, it means auto calling convention. + /// If both has_align_ref and has_align_body are false, it means default alignment. + /// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type. + /// If both has_section_ref and has_section_body are false, it means default section. + /// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace. + pub const Bits = packed struct { + is_var_args: bool, + is_inferred_error: bool, + is_test: bool, + is_extern: bool, + is_noinline: bool, + has_align_ref: bool, + has_align_body: bool, + has_addrspace_ref: bool, + has_addrspace_body: bool, + has_section_ref: bool, + has_section_body: bool, + has_cc_ref: bool, + has_cc_body: bool, + has_ret_ty_ref: bool, + has_ret_ty_body: bool, + has_lib_name: bool, + has_any_noalias: bool, + _: u15 = undefined, + }; + }; + + /// Trailing: + /// 0. lib_name: NullTerminatedString, // null terminated string index, if has_lib_name is set + /// 1. align: Ref, // if has_align is set + /// 2. init: Ref // if has_init is set + /// The source node is obtained from the containing `block_inline`. + pub const ExtendedVar = struct { + var_type: Ref, + + pub const Small = packed struct { + has_lib_name: bool, + has_align: bool, + has_init: bool, + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + _: u10 = undefined, + }; + }; + + /// This data is stored inside extra, with trailing operands according to `operands_len`. + /// Each operand is a `Ref`. + pub const MultiOp = struct { + operands_len: u32, + }; + + /// Trailing: operand: Ref, // for each `operands_len` (stored in `small`). + pub const NodeMultiOp = struct { + src_node: i32, + }; + + /// This data is stored inside extra, with trailing operands according to `body_len`. + /// Each operand is an `Index`. + pub const Block = struct { + body_len: u32, + }; + + /// Trailing: + /// * inst: Index // for each `body_len` + pub const BoolBr = struct { + lhs: Ref, + body_len: u32, + }; + + /// Trailing: + /// 0. doc_comment: u32 // if `has_doc_comment`; null-terminated string index + /// 1. align_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `align` + /// 2. linksection_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `linksection` + /// 3. addrspace_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `addrspace` + /// 4. value_body_inst: Zir.Inst.Index + /// - for each `value_body_len` + /// - body to be exited via `break_inline` to this `declaration` instruction + /// 5. align_body_inst: Zir.Inst.Index + /// - for each `align_body_len` + /// - body to be exited via `break_inline` to this `declaration` instruction + /// 6. linksection_body_inst: Zir.Inst.Index + /// - for each `linksection_body_len` + /// - body to be exited via `break_inline` to this `declaration` instruction + /// 7. addrspace_body_inst: Zir.Inst.Index + /// - for each `addrspace_body_len` + /// - body to be exited via `break_inline` to this `declaration` instruction + pub const Declaration = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + src_hash_0: u32, + src_hash_1: u32, + src_hash_2: u32, + src_hash_3: u32, + /// The name of this `Decl`. Also indicates whether it is a test, comptime block, etc. + name: Name, + /// This Decl's line number relative to that of its parent. + /// TODO: column must be encoded similarly to respect non-formatted code! + line_offset: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + value_body_len: u28, + is_pub: bool, + is_export: bool, + has_doc_comment: bool, + has_align_linksection_addrspace: bool, + }; + + pub const Name = enum(u32) { + @"comptime" = std.math.maxInt(u32), + @"usingnamespace" = std.math.maxInt(u32) - 1, + unnamed_test = std.math.maxInt(u32) - 2, + /// In this case, `has_doc_comment` will be true, and the doc + /// comment body is the identifier name. + decltest = std.math.maxInt(u32) - 3, + /// Other values are `NullTerminatedString` values, i.e. index into + /// `string_bytes`. If the byte referenced is 0, the decl is a named + /// test, and the actual name begins at the following byte. + _, + + pub fn isNamedTest(name: Name, zir: Zir) bool { + return switch (name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => false, + _ => zir.string_bytes[@intFromEnum(name)] == 0, + }; + } + pub fn toString(name: Name, zir: Zir) ?NullTerminatedString { + switch (name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => return null, + _ => {}, + } + const idx: u32 = @intFromEnum(name); + if (zir.string_bytes[idx] == 0) { + // Named test + return @enumFromInt(idx + 1); + } + return @enumFromInt(idx); + } + }; + + pub const Bodies = struct { + value_body: []const Index, + align_body: ?[]const Index, + linksection_body: ?[]const Index, + addrspace_body: ?[]const Index, + }; + + pub fn getBodies(declaration: Declaration, extra_end: u32, zir: Zir) Bodies { + var extra_index: u32 = extra_end; + extra_index += @intFromBool(declaration.flags.has_doc_comment); + const value_body_len = declaration.flags.value_body_len; + const align_body_len, const linksection_body_len, const addrspace_body_len = lens: { + if (!declaration.flags.has_align_linksection_addrspace) { + break :lens .{ 0, 0, 0 }; + } + const lens = zir.extra[extra_index..][0..3].*; + extra_index += 3; + break :lens lens; + }; + return .{ + .value_body = b: { + defer extra_index += value_body_len; + break :b zir.bodySlice(extra_index, value_body_len); + }, + .align_body = if (align_body_len == 0) null else b: { + defer extra_index += align_body_len; + break :b zir.bodySlice(extra_index, align_body_len); + }, + .linksection_body = if (linksection_body_len == 0) null else b: { + defer extra_index += linksection_body_len; + break :b zir.bodySlice(extra_index, linksection_body_len); + }, + .addrspace_body = if (addrspace_body_len == 0) null else b: { + defer extra_index += addrspace_body_len; + break :b zir.bodySlice(extra_index, addrspace_body_len); + }, + }; + } + }; + + /// Stored inside extra, with trailing arguments according to `args_len`. + /// Implicit 0. arg_0_start: u32, // always same as `args_len` + /// 1. arg_end: u32, // for each `args_len` + /// arg_N_start is the same as arg_N-1_end + pub const Call = struct { + // Note: Flags *must* come first so that unusedResultExpr + // can find it when it goes to modify them. + flags: Flags, + callee: Ref, + + pub const Flags = packed struct { + /// std.builtin.CallModifier in packed form + pub const PackedModifier = u3; + pub const PackedArgsLen = u27; + + packed_modifier: PackedModifier, + ensure_result_used: bool = false, + pop_error_return_trace: bool, + args_len: PackedArgsLen, + + comptime { + if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) + @compileError("Layout of Call.Flags needs to be updated!"); + if (@bitSizeOf(std.builtin.CallModifier) != @bitSizeOf(PackedModifier)) + @compileError("Call.Flags.PackedModifier needs to be updated!"); + } + }; + }; + + /// Stored inside extra, with trailing arguments according to `args_len`. + /// Implicit 0. arg_0_start: u32, // always same as `args_len` + /// 1. arg_end: u32, // for each `args_len` + /// arg_N_start is the same as arg_N-1_end + pub const FieldCall = struct { + // Note: Flags *must* come first so that unusedResultExpr + // can find it when it goes to modify them. + flags: Call.Flags, + obj_ptr: Ref, + /// Offset into `string_bytes`. + field_name_start: NullTerminatedString, + }; + + pub const TypeOfPeer = struct { + src_node: i32, + body_len: u32, + body_index: u32, + }; + + pub const BuiltinCall = struct { + // Note: Flags *must* come first so that unusedResultExpr + // can find it when it goes to modify them. + flags: Flags, + modifier: Ref, + callee: Ref, + args: Ref, + + pub const Flags = packed struct { + is_nosuspend: bool, + ensure_result_used: bool, + _: u30 = undefined, + + comptime { + if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) + @compileError("Layout of BuiltinCall.Flags needs to be updated!"); + } + }; + }; + + /// This data is stored inside extra, with two sets of trailing `Ref`: + /// * 0. the then body, according to `then_body_len`. + /// * 1. the else body, according to `else_body_len`. + pub const CondBr = struct { + condition: Ref, + then_body_len: u32, + else_body_len: u32, + }; + + /// This data is stored inside extra, trailed by: + /// * 0. body: Index // for each `body_len`. + pub const Try = struct { + /// The error union to unwrap. + operand: Ref, + body_len: u32, + }; + + /// Stored in extra. Depending on the flags in Data, there will be up to 5 + /// trailing Ref fields: + /// 0. sentinel: Ref // if `has_sentinel` flag is set + /// 1. align: Ref // if `has_align` flag is set + /// 2. address_space: Ref // if `has_addrspace` flag is set + /// 3. bit_start: Ref // if `has_bit_range` flag is set + /// 4. host_size: Ref // if `has_bit_range` flag is set + pub const PtrType = struct { + elem_type: Ref, + src_node: i32, + }; + + pub const ArrayTypeSentinel = struct { + len: Ref, + sentinel: Ref, + elem_type: Ref, + }; + + pub const SliceStart = struct { + lhs: Ref, + start: Ref, + }; + + pub const SliceEnd = struct { + lhs: Ref, + start: Ref, + end: Ref, + }; + + pub const SliceSentinel = struct { + lhs: Ref, + start: Ref, + end: Ref, + sentinel: Ref, + }; + + pub const SliceLength = struct { + lhs: Ref, + start: Ref, + len: Ref, + sentinel: Ref, + start_src_node_offset: i32, + }; + + /// The meaning of these operands depends on the corresponding `Tag`. + pub const Bin = struct { + lhs: Ref, + rhs: Ref, + }; + + pub const BinNode = struct { + node: i32, + lhs: Ref, + rhs: Ref, + }; + + pub const UnNode = struct { + node: i32, + operand: Ref, + }; + + pub const ElemPtrImm = struct { + ptr: Ref, + index: u32, + }; + + pub const SwitchBlockErrUnion = struct { + operand: Ref, + bits: Bits, + main_src_node_offset: i32, + + pub const Bits = packed struct(u32) { + /// If true, one or more prongs have multiple items. + has_multi_cases: bool, + /// If true, there is an else prong. This is mutually exclusive with `has_under`. + has_else: bool, + any_uses_err_capture: bool, + payload_is_ref: bool, + scalar_cases_len: ScalarCasesLen, + + pub const ScalarCasesLen = u28; + }; + + pub const MultiProng = struct { + items: []const Ref, + body: []const Index, + }; + }; + + /// 0. multi_cases_len: u32 // If has_multi_cases is set. + /// 1. tag_capture_inst: u32 // If any_has_tag_capture is set. Index of instruction prongs use to refer to the inline tag capture. + /// 2. else_body { // If has_else or has_under is set. + /// info: ProngInfo, + /// body member Index for every info.body_len + /// } + /// 3. scalar_cases: { // for every scalar_cases_len + /// item: Ref, + /// info: ProngInfo, + /// body member Index for every info.body_len + /// } + /// 4. multi_cases: { // for every multi_cases_len + /// items_len: u32, + /// ranges_len: u32, + /// info: ProngInfo, + /// item: Ref // for every items_len + /// ranges: { // for every ranges_len + /// item_first: Ref, + /// item_last: Ref, + /// } + /// body member Index for every info.body_len + /// } + /// + /// When analyzing a case body, the switch instruction itself refers to the + /// captured payload. Whether this is captured by reference or by value + /// depends on whether the `byref` bit is set for the corresponding body. + pub const SwitchBlock = struct { + /// The operand passed to the `switch` expression. If this is a + /// `switch_block`, this is the operand value; if `switch_block_ref` it + /// is a pointer to the operand. `switch_block_ref` is always used if + /// any prong has a byref capture. + operand: Ref, + bits: Bits, + + /// These are stored in trailing data in `extra` for each prong. + pub const ProngInfo = packed struct(u32) { + body_len: u28, + capture: Capture, + is_inline: bool, + has_tag_capture: bool, + + pub const Capture = enum(u2) { + none, + by_val, + by_ref, + }; + }; + + pub const Bits = packed struct(u32) { + /// If true, one or more prongs have multiple items. + has_multi_cases: bool, + /// If true, there is an else prong. This is mutually exclusive with `has_under`. + has_else: bool, + /// If true, there is an underscore prong. This is mutually exclusive with `has_else`. + has_under: bool, + /// If true, at least one prong has an inline tag capture. + any_has_tag_capture: bool, + scalar_cases_len: ScalarCasesLen, + + pub const ScalarCasesLen = u28; + + pub fn specialProng(bits: Bits) SpecialProng { + const has_else: u2 = @intFromBool(bits.has_else); + const has_under: u2 = @intFromBool(bits.has_under); + return switch ((has_else << 1) | has_under) { + 0b00 => .none, + 0b01 => .under, + 0b10 => .@"else", + 0b11 => unreachable, + }; + } + }; + + pub const MultiProng = struct { + items: []const Ref, + body: []const Index, + }; + }; + + pub const ArrayInitRefTy = struct { + ptr_ty: Ref, + elem_count: u32, + }; + + pub const Field = struct { + lhs: Ref, + /// Offset into `string_bytes`. + field_name_start: NullTerminatedString, + }; + + pub const FieldNamed = struct { + lhs: Ref, + field_name: Ref, + }; + + pub const As = struct { + dest_type: Ref, + operand: Ref, + }; + + /// Trailing: + /// 0. fields_len: u32, // if has_fields_len + /// 1. decls_len: u32, // if has_decls_len + /// 2. backing_int_body_len: u32, // if has_backing_int + /// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 + /// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 + /// 5. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 6. flags: u32 // for every 8 fields + /// - sets of 4 bits: + /// 0b000X: whether corresponding field has an align expression + /// 0b00X0: whether corresponding field has a default expression + /// 0b0X00: whether corresponding field is comptime + /// 0bX000: whether corresponding field has a type expression + /// 7. fields: { // for every fields_len + /// field_name: u32, // if !is_tuple + /// doc_comment: NullTerminatedString, // .empty if no doc comment + /// field_type: Ref, // if corresponding bit is not set. none means anytype. + /// field_type_body_len: u32, // if corresponding bit is set + /// align_body_len: u32, // if corresponding bit is set + /// init_body_len: u32, // if corresponding bit is set + /// } + /// 8. bodies: { // for every fields_len + /// field_type_body_inst: Inst, // for each field_type_body_len + /// align_body_inst: Inst, // for each align_body_len + /// init_body_inst: Inst, // for each init_body_len + /// } + pub const StructDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc). + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, + src_node: i32, + + pub fn src(self: StructDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + + pub const Small = packed struct { + has_fields_len: bool, + has_decls_len: bool, + has_backing_int: bool, + known_non_opv: bool, + known_comptime_only: bool, + is_tuple: bool, + name_strategy: NameStrategy, + layout: std.builtin.Type.ContainerLayout, + any_default_inits: bool, + any_comptime_fields: bool, + any_aligned_fields: bool, + _: u3 = undefined, + }; + }; + + pub const NameStrategy = enum(u2) { + /// Use the same name as the parent declaration name. + /// e.g. `const Foo = struct {...};`. + parent, + /// Use the name of the currently executing comptime function call, + /// with the current parameters. e.g. `ArrayList(i32)`. + func, + /// Create an anonymous name for this declaration. + /// Like this: "ParentDeclName_struct_69" + anon, + /// Use the name specified in the next `dbg_var_{val,ptr}` instruction. + dbg_var, + }; + + pub const FullPtrCastFlags = packed struct(u5) { + ptr_cast: bool = false, + align_cast: bool = false, + addrspace_cast: bool = false, + const_cast: bool = false, + volatile_cast: bool = false, + + pub inline fn needResultTypeBuiltinName(flags: FullPtrCastFlags) []const u8 { + if (flags.ptr_cast) return "@ptrCast"; + if (flags.align_cast) return "@alignCast"; + if (flags.addrspace_cast) return "@addrSpaceCast"; + unreachable; + } + }; + + /// Trailing: + /// 0. tag_type: Ref, // if has_tag_type + /// 1. body_len: u32, // if has_body_len + /// 2. fields_len: u32, // if has_fields_len + /// 3. decls_len: u32, // if has_decls_len + /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 5. inst: Index // for every body_len + /// 6. has_bits: u32 // for every 32 fields + /// - the bit is whether corresponding field has an value expression + /// 7. fields: { // for every fields_len + /// field_name: u32, + /// doc_comment: u32, // .empty if no doc_comment + /// value: Ref, // if corresponding bit is set + /// } + pub const EnumDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and the backing type if specified. + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, + src_node: i32, + + pub fn src(self: EnumDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + + pub const Small = packed struct { + has_tag_type: bool, + has_body_len: bool, + has_fields_len: bool, + has_decls_len: bool, + name_strategy: NameStrategy, + nonexhaustive: bool, + _: u9 = undefined, + }; + }; + + /// Trailing: + /// 0. tag_type: Ref, // if has_tag_type + /// 1. body_len: u32, // if has_body_len + /// 2. fields_len: u32, // if has_fields_len + /// 3. decls_len: u32, // if has_decls_len + /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction + /// 5. inst: Index // for every body_len + /// 6. has_bits: u32 // for every 8 fields + /// - sets of 4 bits: + /// 0b000X: whether corresponding field has a type expression + /// 0b00X0: whether corresponding field has a align expression + /// 0b0X00: whether corresponding field has a tag value expression + /// 0bX000: unused + /// 7. fields: { // for every fields_len + /// field_name: NullTerminatedString, // null terminated string index + /// doc_comment: NullTerminatedString, // .empty if no doc comment + /// field_type: Ref, // if corresponding bit is set + /// - if none, means `anytype`. + /// align: Ref, // if corresponding bit is set + /// tag_value: Ref, // if corresponding bit is set + /// } + pub const UnionDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and any specified attributes (`extern` etc). + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, + src_node: i32, + + pub fn src(self: UnionDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + + pub const Small = packed struct { + has_tag_type: bool, + has_body_len: bool, + has_fields_len: bool, + has_decls_len: bool, + name_strategy: NameStrategy, + layout: std.builtin.Type.ContainerLayout, + /// has_tag_type | auto_enum_tag | result + /// ------------------------------------- + /// false | false | union { } + /// false | true | union(enum) { } + /// true | true | union(enum(T)) { } + /// true | false | union(T) { } + auto_enum_tag: bool, + any_aligned_fields: bool, + _: u6 = undefined, + }; + }; + + /// Trailing: + /// 0. decls_len: u32, // if has_decls_len + /// 1. decl: Index, // for every decls_len; points to a `declaration` instruction + pub const OpaqueDecl = struct { + src_node: i32, + + pub fn src(self: OpaqueDecl) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + + pub const Small = packed struct { + has_decls_len: bool, + name_strategy: NameStrategy, + _: u13 = undefined, + }; + }; + + /// Trailing: + /// { // for every fields_len + /// field_name: NullTerminatedString // null terminated string index + /// doc_comment: NullTerminatedString // null terminated string index + /// } + pub const ErrorSetDecl = struct { + fields_len: u32, + }; + + /// A f128 value, broken up into 4 u32 parts. + pub const Float128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: Float128) f128 { + const int_bits = @as(u128, self.piece0) | + (@as(u128, self.piece1) << 32) | + (@as(u128, self.piece2) << 64) | + (@as(u128, self.piece3) << 96); + return @as(f128, @bitCast(int_bits)); + } + }; + + /// Trailing is an item per field. + pub const StructInit = struct { + fields_len: u32, + + pub const Item = struct { + /// The `struct_init_field_type` ZIR instruction for this field init. + field_type: Index, + /// The field init expression to be used as the field value. This value will be coerced + /// to the field type if not already. + init: Ref, + }; + }; + + /// Trailing is an Item per field. + /// TODO make this instead array of inits followed by array of names because + /// it will be simpler Sema code and better for CPU cache. + pub const StructInitAnon = struct { + fields_len: u32, + + pub const Item = struct { + /// Null-terminated string table index. + field_name: NullTerminatedString, + /// The field init expression to be used as the field value. + init: Ref, + }; + }; + + pub const FieldType = struct { + container_type: Ref, + /// Offset into `string_bytes`, null terminated. + name_start: NullTerminatedString, + }; + + pub const FieldTypeRef = struct { + container_type: Ref, + field_name: Ref, + }; + + pub const Cmpxchg = struct { + node: i32, + ptr: Ref, + expected_value: Ref, + new_value: Ref, + success_order: Ref, + failure_order: Ref, + }; + + pub const AtomicRmw = struct { + ptr: Ref, + operation: Ref, + operand: Ref, + ordering: Ref, + }; + + pub const UnionInit = struct { + union_type: Ref, + field_name: Ref, + init: Ref, + }; + + pub const AtomicStore = struct { + ptr: Ref, + operand: Ref, + ordering: Ref, + }; + + pub const AtomicLoad = struct { + elem_type: Ref, + ptr: Ref, + ordering: Ref, + }; + + pub const MulAdd = struct { + mulend1: Ref, + mulend2: Ref, + addend: Ref, + }; + + pub const FieldParentPtr = struct { + parent_type: Ref, + field_name: Ref, + field_ptr: Ref, + }; + + pub const Shuffle = struct { + elem_type: Ref, + a: Ref, + b: Ref, + mask: Ref, + }; + + pub const Select = struct { + node: i32, + elem_type: Ref, + pred: Ref, + a: Ref, + b: Ref, + }; + + pub const AsyncCall = struct { + node: i32, + frame_buffer: Ref, + result_ptr: Ref, + fn_ptr: Ref, + args: Ref, + }; + + /// Trailing: inst: Index // for every body_len + pub const Param = struct { + /// Null-terminated string index. + name: NullTerminatedString, + /// Null-terminated string index. + doc_comment: NullTerminatedString, + /// The body contains the type of the parameter. + body_len: u32, + }; + + /// Trailing: + /// 0. type_inst: Ref, // if small 0b000X is set + /// 1. align_inst: Ref, // if small 0b00X0 is set + pub const AllocExtended = struct { + src_node: i32, + + pub const Small = packed struct { + has_type: bool, + has_align: bool, + is_const: bool, + is_comptime: bool, + _: u12 = undefined, + }; + }; + + pub const Export = struct { + /// If present, this is referring to a Decl via field access, e.g. `a.b`. + /// If omitted, this is referring to a Decl via identifier, e.g. `a`. + namespace: Ref, + /// Null-terminated string index. + decl_name: NullTerminatedString, + options: Ref, + }; + + pub const ExportValue = struct { + /// The comptime value to export. + operand: Ref, + options: Ref, + }; + + /// Trailing: `CompileErrors.Item` for each `items_len`. + pub const CompileErrors = struct { + items_len: u32, + + /// Trailing: `note_payload_index: u32` for each `notes_len`. + /// It's a payload index of another `Item`. + pub const Item = struct { + /// null terminated string index + msg: NullTerminatedString, + node: Ast.Node.Index, + /// If node is 0 then this will be populated. + token: Ast.TokenIndex, + /// Can be used in combination with `token`. + byte_offset: u32, + /// 0 or a payload index of a `Block`, each is a payload + /// index of another `Item`. + notes: u32, + + pub fn notesLen(item: Item, zir: Zir) u32 { + if (item.notes == 0) return 0; + const block = zir.extraData(Block, item.notes); + return block.data.body_len; + } + }; + }; + + /// Trailing: for each `imports_len` there is an Item + pub const Imports = struct { + imports_len: u32, + + pub const Item = struct { + /// null terminated string index + name: NullTerminatedString, + /// points to the import name + token: Ast.TokenIndex, + }; + }; + + pub const LineColumn = struct { + line: u32, + column: u32, + }; + + pub const ArrayInit = struct { + ty: Ref, + init_count: u32, + }; + + pub const Src = struct { + node: i32, + line: u32, + column: u32, + }; + + pub const DeferErrCode = struct { + remapped_err_code: Index, + index: u32, + len: u32, + }; + + pub const ValidateDestructure = struct { + /// The value being destructured. + operand: Ref, + /// The `destructure_assign` node. + destructure_node: i32, + /// The expected field count. + expect_len: u32, + }; + + pub const ArrayMul = struct { + /// The result type of the array multiplication operation, or `.none` if none was available. + res_ty: Ref, + /// The LHS of the array multiplication. + lhs: Ref, + /// The RHS of the array multiplication. + rhs: Ref, + }; + + pub const RestoreErrRetIndex = struct { + src_node: i32, + /// If `.none`, restore the trace to its state upon function entry. + block: Ref, + /// If `.none`, restore unconditionally. + operand: Ref, + + pub fn src(self: RestoreErrRetIndex) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }; +}; + +pub const SpecialProng = enum { none, @"else", under }; + +pub const DeclIterator = struct { + extra_index: u32, + decls_remaining: u32, + zir: Zir, + + pub fn next(it: *DeclIterator) ?Inst.Index { + if (it.decls_remaining == 0) return null; + const decl_inst: Zir.Inst.Index = @enumFromInt(it.zir.extra[it.extra_index]); + it.extra_index += 1; + it.decls_remaining -= 1; + assert(it.zir.instructions.items(.tag)[@intFromEnum(decl_inst)] == .declaration); + return decl_inst; + } +}; + +pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + switch (tags[@intFromEnum(decl_inst)]) { + // Functions are allowed and yield no iterations. + // There is one case matching this in the extended instruction set below. + .func, .func_inferred, .func_fancy => return .{ + .extra_index = undefined, + .decls_remaining = 0, + .zir = zir, + }, + + .extended => { + const extended = datas[@intFromEnum(decl_inst)].extended; + switch (extended.opcode) { + .struct_decl => { + const small: Inst.StructDecl.Small = @bitCast(extended.small); + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + + return .{ + .extra_index = extra_index, + .decls_remaining = decls_len, + .zir = zir, + }; + }, + .enum_decl => { + const small: Inst.EnumDecl.Small = @bitCast(extended.small); + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len); + extra_index += @intFromBool(small.has_tag_type); + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return .{ + .extra_index = extra_index, + .decls_remaining = decls_len, + .zir = zir, + }; + }, + .union_decl => { + const small: Inst.UnionDecl.Small = @bitCast(extended.small); + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len); + extra_index += @intFromBool(small.has_tag_type); + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return .{ + .extra_index = extra_index, + .decls_remaining = decls_len, + .zir = zir, + }; + }, + .opaque_decl => { + const small: Inst.OpaqueDecl.Small = @bitCast(extended.small); + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.OpaqueDecl).Struct.fields.len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return .{ + .extra_index = extra_index, + .decls_remaining = decls_len, + .zir = zir, + }; + }, + else => unreachable, + } + }, + else => unreachable, + } +} + +/// The iterator would have to allocate memory anyway to iterate. So here we populate +/// an ArrayList as the result. +pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_inst: Zir.Inst.Index) !void { + list.clearRetainingCapacity(); + const declaration, const extra_end = zir.getDeclaration(decl_inst); + const bodies = declaration.getBodies(extra_end, zir); + + try zir.findDeclsBody(list, bodies.value_body); + if (bodies.align_body) |b| try zir.findDeclsBody(list, b); + if (bodies.linksection_body) |b| try zir.findDeclsBody(list, b); + if (bodies.addrspace_body) |b| try zir.findDeclsBody(list, b); +} + +fn findDeclsInner( + zir: Zir, + list: *std.ArrayList(Inst.Index), + inst: Inst.Index, +) Allocator.Error!void { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + + switch (tags[@intFromEnum(inst)]) { + // Functions instructions are interesting and have a body. + .func, + .func_inferred, + => { + try list.append(inst); + + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + var extra_index: usize = extra.end; + switch (extra.data.ret_body_len) { + 0 => {}, + 1 => extra_index += 1, + else => { + const body = zir.bodySlice(extra_index, extra.data.ret_body_len); + extra_index += body.len; + try zir.findDeclsBody(list, body); + }, + } + const body = zir.bodySlice(extra_index, extra.data.body_len); + return zir.findDeclsBody(list, body); + }, + .func_fancy => { + try list.append(inst); + + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + var extra_index: usize = extra.end; + extra_index += @intFromBool(extra.data.bits.has_lib_name); + + if (extra.data.bits.has_align_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_align_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_addrspace_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_addrspace_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_section_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_section_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_cc_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_cc_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_ret_ty_ref) { + extra_index += 1; + } + + extra_index += @intFromBool(extra.data.bits.has_any_noalias); + + const body = zir.bodySlice(extra_index, extra.data.body_len); + return zir.findDeclsBody(list, body); + }, + .extended => { + const extended = datas[@intFromEnum(inst)].extended; + switch (extended.opcode) { + + // Decl instructions are interesting but have no body. + // TODO yes they do have a body actually. recurse over them just like block instructions. + .struct_decl, + .union_decl, + .enum_decl, + .opaque_decl, + => return list.append(inst), + + else => return, + } + }, + + // Block instructions, recurse over the bodies. + + .block, .block_comptime, .block_inline => { + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Block, inst_data.payload_index); + const body = zir.bodySlice(extra.end, extra.data.body_len); + return zir.findDeclsBody(list, body); + }, + .condbr, .condbr_inline => { + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.CondBr, inst_data.payload_index); + const then_body = zir.bodySlice(extra.end, extra.data.then_body_len); + const else_body = zir.bodySlice(extra.end + then_body.len, extra.data.else_body_len); + try zir.findDeclsBody(list, then_body); + try zir.findDeclsBody(list, else_body); + }, + .@"try", .try_ptr => { + const inst_data = datas[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Try, inst_data.payload_index); + const body = zir.bodySlice(extra.end, extra.data.body_len); + try zir.findDeclsBody(list, body); + }, + .switch_block => return findDeclsSwitch(zir, list, inst), + + .suspend_block => @panic("TODO iterate suspend block"), + + else => return, // Regular instruction, not interesting. + } +} + +fn findDeclsSwitch( + zir: Zir, + list: *std.ArrayList(Inst.Index), + inst: Inst.Index, +) Allocator.Error!void { + const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index); + + var extra_index: usize = extra.end; + + const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { + const multi_cases_len = zir.extra[extra_index]; + extra_index += 1; + break :blk multi_cases_len; + } else 0; + + const special_prong = extra.data.bits.specialProng(); + if (special_prong != .none) { + const body_len: u31 = @truncate(zir.extra[extra_index]); + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + extra_index += body.len; + + try zir.findDeclsBody(list, body); + } + + { + const scalar_cases_len = extra.data.bits.scalar_cases_len; + for (0..scalar_cases_len) |_| { + extra_index += 1; + const body_len: u31 = @truncate(zir.extra[extra_index]); + extra_index += 1; + const body = zir.bodySlice(extra_index, body_len); + extra_index += body_len; + + try zir.findDeclsBody(list, body); + } + } + { + for (0..multi_cases_len) |_| { + const items_len = zir.extra[extra_index]; + extra_index += 1; + const ranges_len = zir.extra[extra_index]; + extra_index += 1; + const body_len: u31 = @truncate(zir.extra[extra_index]); + extra_index += 1; + const items = zir.refSlice(extra_index, items_len); + extra_index += items_len; + _ = items; + + var range_i: usize = 0; + while (range_i < ranges_len) : (range_i += 1) { + extra_index += 1; + extra_index += 1; + } + + const body = zir.bodySlice(extra_index, body_len); + extra_index += body_len; + + try zir.findDeclsBody(list, body); + } + } +} + +fn findDeclsBody( + zir: Zir, + list: *std.ArrayList(Inst.Index), + body: []const Inst.Index, +) Allocator.Error!void { + for (body) |member| { + try zir.findDeclsInner(list, member); + } +} + +pub const FnInfo = struct { + param_body: []const Inst.Index, + param_body_inst: Inst.Index, + ret_ty_body: []const Inst.Index, + body: []const Inst.Index, + ret_ty_ref: Zir.Inst.Ref, + total_params_len: u32, +}; + +pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const Zir.Inst.Index { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + const inst_data = datas[@intFromEnum(fn_inst)].pl_node; + + const param_block_index = switch (tags[@intFromEnum(fn_inst)]) { + .func, .func_inferred => blk: { + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + break :blk extra.data.param_block; + }, + .func_fancy => blk: { + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + break :blk extra.data.param_block; + }, + else => unreachable, + }; + + switch (tags[@intFromEnum(param_block_index)]) { + .block, .block_comptime, .block_inline => { + const param_block = zir.extraData(Inst.Block, datas[@intFromEnum(param_block_index)].pl_node.payload_index); + return zir.bodySlice(param_block.end, param_block.data.body_len); + }, + .declaration => { + const decl, const extra_end = zir.getDeclaration(param_block_index); + return decl.getBodies(extra_end, zir).value_body; + }, + else => unreachable, + } +} + +pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + const info: struct { + param_block: Inst.Index, + body: []const Inst.Index, + ret_ty_ref: Inst.Ref, + ret_ty_body: []const Inst.Index, + } = switch (tags[@intFromEnum(fn_inst)]) { + .func, .func_inferred => blk: { + const inst_data = datas[@intFromEnum(fn_inst)].pl_node; + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + + var extra_index: usize = extra.end; + var ret_ty_ref: Inst.Ref = .none; + var ret_ty_body: []const Inst.Index = &.{}; + + switch (extra.data.ret_body_len) { + 0 => { + ret_ty_ref = .void_type; + }, + 1 => { + ret_ty_ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + }, + else => { + ret_ty_body = zir.bodySlice(extra_index, extra.data.ret_body_len); + extra_index += ret_ty_body.len; + }, + } + + const body = zir.bodySlice(extra_index, extra.data.body_len); + extra_index += body.len; + + break :blk .{ + .param_block = extra.data.param_block, + .ret_ty_ref = ret_ty_ref, + .ret_ty_body = ret_ty_body, + .body = body, + }; + }, + .func_fancy => blk: { + const inst_data = datas[@intFromEnum(fn_inst)].pl_node; + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + + var extra_index: usize = extra.end; + var ret_ty_ref: Inst.Ref = .void_type; + var ret_ty_body: []const Inst.Index = &.{}; + + extra_index += @intFromBool(extra.data.bits.has_lib_name); + if (extra.data.bits.has_align_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_align_ref) { + extra_index += 1; + } + if (extra.data.bits.has_addrspace_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_addrspace_ref) { + extra_index += 1; + } + if (extra.data.bits.has_section_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_section_ref) { + extra_index += 1; + } + if (extra.data.bits.has_cc_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_cc_ref) { + extra_index += 1; + } + if (extra.data.bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + ret_ty_body = zir.bodySlice(extra_index, body_len); + extra_index += ret_ty_body.len; + } else if (extra.data.bits.has_ret_ty_ref) { + ret_ty_ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + } + + extra_index += @intFromBool(extra.data.bits.has_any_noalias); + + const body = zir.bodySlice(extra_index, extra.data.body_len); + extra_index += body.len; + break :blk .{ + .param_block = extra.data.param_block, + .ret_ty_ref = ret_ty_ref, + .ret_ty_body = ret_ty_body, + .body = body, + }; + }, + else => unreachable, + }; + const param_body = switch (tags[@intFromEnum(info.param_block)]) { + .block, .block_comptime, .block_inline => param_body: { + const param_block = zir.extraData(Inst.Block, datas[@intFromEnum(info.param_block)].pl_node.payload_index); + break :param_body zir.bodySlice(param_block.end, param_block.data.body_len); + }, + .declaration => param_body: { + const decl, const extra_end = zir.getDeclaration(info.param_block); + break :param_body decl.getBodies(extra_end, zir).value_body; + }, + else => unreachable, + }; + var total_params_len: u32 = 0; + for (param_body) |inst| { + switch (tags[@intFromEnum(inst)]) { + .param, .param_comptime, .param_anytype, .param_anytype_comptime => { + total_params_len += 1; + }, + else => continue, + } + } + return .{ + .param_body = param_body, + .param_body_inst = info.param_block, + .ret_ty_body = info.ret_ty_body, + .ret_ty_ref = info.ret_ty_ref, + .body = info.body, + .total_params_len = total_params_len, + }; +} + +pub fn getDeclaration(zir: Zir, inst: Zir.Inst.Index) struct { Inst.Declaration, u32 } { + assert(zir.instructions.items(.tag)[@intFromEnum(inst)] == .declaration); + const pl_node = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Declaration, pl_node.payload_index); + return .{ + extra.data, + @intCast(extra.end), + }; +} + +pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash { + const tag = zir.instructions.items(.tag); + const data = zir.instructions.items(.data); + switch (tag[@intFromEnum(inst)]) { + .declaration => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Declaration, pl_node.payload_index); + return @bitCast([4]u32{ + extra.data.src_hash_0, + extra.data.src_hash_1, + extra.data.src_hash_2, + extra.data.src_hash_3, + }); + }, + .func, .func_inferred => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Func, pl_node.payload_index); + if (extra.data.body_len == 0) { + // Function type or extern fn - no associated hash + return null; + } + const extra_index = extra.end + + 1 + + extra.data.body_len + + @typeInfo(Inst.Func.SrcLocs).Struct.fields.len; + return @bitCast([4]u32{ + zir.extra[extra_index + 0], + zir.extra[extra_index + 1], + zir.extra[extra_index + 2], + zir.extra[extra_index + 3], + }); + }, + .func_fancy => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.FuncFancy, pl_node.payload_index); + if (extra.data.body_len == 0) { + // Function type or extern fn - no associated hash + return null; + } + const bits = extra.data.bits; + var extra_index = extra.end; + extra_index += @intFromBool(bits.has_lib_name); + if (bits.has_align_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_align_ref); + if (bits.has_addrspace_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_addrspace_ref); + if (bits.has_section_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_section_ref); + if (bits.has_cc_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_cc_ref); + if (bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_ret_ty_ref); + extra_index += @intFromBool(bits.has_any_noalias); + extra_index += extra.data.body_len; + extra_index += @typeInfo(Zir.Inst.Func.SrcLocs).Struct.fields.len; + return @bitCast([4]u32{ + zir.extra[extra_index + 0], + zir.extra[extra_index + 1], + zir.extra[extra_index + 2], + zir.extra[extra_index + 3], + }); + }, + .extended => {}, + else => return null, + } + const extended = data[@intFromEnum(inst)].extended; + switch (extended.opcode) { + .struct_decl => { + const extra = zir.extraData(Inst.StructDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + .union_decl => { + const extra = zir.extraData(Inst.UnionDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + .enum_decl => { + const extra = zir.extraData(Inst.EnumDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + else => return null, + } +} diff --git a/src/AstGen.zig b/src/AstGen.zig index 375dafe880..20b1077420 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -12,7 +12,7 @@ const StringIndexContext = std.hash_map.StringIndexContext; const isPrimitive = std.zig.primitives.isPrimitive; -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const BuiltinFn = std.zig.BuiltinFn; const AstRlAnnotate = std.zig.AstRlAnnotate; diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 6ede3637f8..f321fadbb7 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -9,7 +9,7 @@ const File = Zcu.File; const Module = @import("Package.zig").Module; const Tokenizer = std.zig.Tokenizer; const InternPool = @import("InternPool.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Ref = Zir.Inst.Ref; const log = std.log.scoped(.autodoc); const renderer = @import("autodoc/render_source.zig"); diff --git a/src/Compilation.zig b/src/Compilation.zig index 5fa93fa677..d4d2826880 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -35,7 +35,7 @@ const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); const libtsan = @import("libtsan.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Autodoc = @import("Autodoc.zig"); const resinator = @import("resinator.zig"); const Builtin = @import("Builtin.zig"); diff --git a/src/InternPool.zig b/src/InternPool.zig index 70c332e9ac..5ff179cb74 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -338,7 +338,7 @@ const Hash = std.hash.Wyhash; const InternPool = @This(); const Module = @import("Module.zig"); const Zcu = Module; -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const KeyAdapter = struct { intern_pool: *const InternPool, diff --git a/src/Module.zig b/src/Module.zig index aef54be700..6316979f4c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -26,7 +26,7 @@ const TypedValue = @import("TypedValue.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const trace = @import("tracy.zig").trace; const AstGen = @import("AstGen.zig"); const Sema = @import("Sema.zig"); diff --git a/src/Sema.zig b/src/Sema.zig index 5c9d7cbb5c..8976848bef 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -148,7 +148,7 @@ const Value = @import("Value.zig"); const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Namespace = Module.Namespace; diff --git a/src/Zir.zig b/src/Zir.zig deleted file mode 100644 index 117212ccbb..0000000000 --- a/src/Zir.zig +++ /dev/null @@ -1,4090 +0,0 @@ -//! Zig Intermediate Representation. Astgen.zig converts AST nodes to these -//! untyped IR instructions. Next, Sema.zig processes these into AIR. -//! The minimum amount of information needed to represent a list of ZIR instructions. -//! Once this structure is completed, it can be used to generate AIR, followed by -//! machine code, without any memory access into the AST tree token list, node list, -//! or source bytes. Exceptions include: -//! * Compile errors, which may need to reach into these data structures to -//! create a useful report. -//! * In the future, possibly inline assembly, which needs to get parsed and -//! handled by the codegen backend, and errors reported there. However for now, -//! inline assembly is not an exception. - -const std = @import("std"); -const builtin = @import("builtin"); -const mem = std.mem; -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; -const BigIntConst = std.math.big.int.Const; -const BigIntMutable = std.math.big.int.Mutable; -const Ast = std.zig.Ast; - -const Zir = @This(); -const LazySrcLoc = std.zig.LazySrcLoc; - -instructions: std.MultiArrayList(Inst).Slice, -/// In order to store references to strings in fewer bytes, we copy all -/// string bytes into here. String bytes can be null. It is up to whomever -/// is referencing the data here whether they want to store both index and length, -/// thus allowing null bytes, or store only index, and use null-termination. The -/// `string_bytes` array is agnostic to either usage. -/// Index 0 is reserved for special cases. -string_bytes: []u8, -/// The meaning of this data is determined by `Inst.Tag` value. -/// The first few indexes are reserved. See `ExtraIndex` for the values. -extra: []u32, - -/// The data stored at byte offset 0 when ZIR is stored in a file. -pub const Header = extern struct { - instructions_len: u32, - string_bytes_len: u32, - extra_len: u32, - /// We could leave this as padding, however it triggers a Valgrind warning because - /// we read and write undefined bytes to the file system. This is harmless, but - /// it's essentially free to have a zero field here and makes the warning go away, - /// making it more likely that following Valgrind warnings will be taken seriously. - unused: u32 = 0, - stat_inode: std.fs.File.INode, - stat_size: u64, - stat_mtime: i128, -}; - -pub const ExtraIndex = enum(u32) { - /// If this is 0, no compile errors. Otherwise there is a `CompileErrors` - /// payload at this index. - compile_errors, - /// If this is 0, this file contains no imports. Otherwise there is a `Imports` - /// payload at this index. - imports, - - _, -}; - -fn ExtraData(comptime T: type) type { - return struct { data: T, end: usize }; -} - -/// Returns the requested data, as well as the new index which is at the start of the -/// trailers for the object. -pub fn extraData(code: Zir, comptime T: type, index: usize) ExtraData(T) { - const fields = @typeInfo(T).Struct.fields; - var i: usize = index; - var result: T = undefined; - inline for (fields) |field| { - @field(result, field.name) = switch (field.type) { - u32 => code.extra[i], - - Inst.Ref, - Inst.Index, - Inst.Declaration.Name, - NullTerminatedString, - => @enumFromInt(code.extra[i]), - - i32, - Inst.Call.Flags, - Inst.BuiltinCall.Flags, - Inst.SwitchBlock.Bits, - Inst.SwitchBlockErrUnion.Bits, - Inst.FuncFancy.Bits, - Inst.Declaration.Flags, - => @bitCast(code.extra[i]), - - else => @compileError("bad field type"), - }; - i += 1; - } - return .{ - .data = result, - .end = i, - }; -} - -pub const NullTerminatedString = enum(u32) { - empty = 0, - _, -}; - -/// Given an index into `string_bytes` returns the null-terminated string found there. -pub fn nullTerminatedString(code: Zir, index: NullTerminatedString) [:0]const u8 { - const start = @intFromEnum(index); - var end: u32 = start; - while (code.string_bytes[end] != 0) { - end += 1; - } - return code.string_bytes[start..end :0]; -} - -pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { - return @ptrCast(code.extra[start..][0..len]); -} - -pub fn bodySlice(zir: Zir, start: usize, len: usize) []Inst.Index { - return @ptrCast(zir.extra[start..][0..len]); -} - -pub fn hasCompileErrors(code: Zir) bool { - return code.extra[@intFromEnum(ExtraIndex.compile_errors)] != 0; -} - -pub fn deinit(code: *Zir, gpa: Allocator) void { - code.instructions.deinit(gpa); - gpa.free(code.string_bytes); - gpa.free(code.extra); - code.* = undefined; -} - -/// These are untyped instructions generated from an Abstract Syntax Tree. -/// The data here is immutable because it is possible to have multiple -/// analyses on the same ZIR happening at the same time. -pub const Inst = struct { - tag: Tag, - data: Data, - - /// These names are used directly as the instruction names in the text format. - /// See `data_field_map` for a list of which `Data` fields are used by each `Tag`. - pub const Tag = enum(u8) { - /// Arithmetic addition, asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - add, - /// Twos complement wrapping integer addition. - /// Uses the `pl_node` union field. Payload is `Bin`. - addwrap, - /// Saturating addition. - /// Uses the `pl_node` union field. Payload is `Bin`. - add_sat, - /// The same as `add` except no safety check. - add_unsafe, - /// Arithmetic subtraction. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - sub, - /// Twos complement wrapping integer subtraction. - /// Uses the `pl_node` union field. Payload is `Bin`. - subwrap, - /// Saturating subtraction. - /// Uses the `pl_node` union field. Payload is `Bin`. - sub_sat, - /// Arithmetic multiplication. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - mul, - /// Twos complement wrapping integer multiplication. - /// Uses the `pl_node` union field. Payload is `Bin`. - mulwrap, - /// Saturating multiplication. - /// Uses the `pl_node` union field. Payload is `Bin`. - mul_sat, - /// Implements the `@divExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_exact, - /// Implements the `@divFloor` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_floor, - /// Implements the `@divTrunc` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_trunc, - /// Implements the `@mod` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - mod, - /// Implements the `@rem` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - rem, - /// Ambiguously remainder division or modulus. If the computation would possibly have - /// a different value depending on whether the operation is remainder division or modulus, - /// a compile error is emitted. Otherwise the computation is performed. - /// Uses the `pl_node` union field. Payload is `Bin`. - mod_rem, - /// Integer shift-left. Zeroes are shifted in from the right hand side. - /// Uses the `pl_node` union field. Payload is `Bin`. - shl, - /// Implements the `@shlExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - shl_exact, - /// Saturating shift-left. - /// Uses the `pl_node` union field. Payload is `Bin`. - shl_sat, - /// Integer shift-right. Arithmetic or logical depending on the signedness of - /// the integer type. - /// Uses the `pl_node` union field. Payload is `Bin`. - shr, - /// Implements the `@shrExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - shr_exact, - - /// Declares a parameter of the current function. Used for: - /// * debug info - /// * checking shadowing against declarations in the current namespace - /// * parameter type expressions referencing other parameters - /// These occur in the block outside a function body (the same block as - /// contains the func instruction). - /// Uses the `pl_tok` field. Token is the parameter name, payload is a `Param`. - param, - /// Same as `param` except the parameter is marked comptime. - param_comptime, - /// Same as `param` except the parameter is marked anytype. - /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. - param_anytype, - /// Same as `param` except the parameter is marked both comptime and anytype. - /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. - param_anytype_comptime, - /// Array concatenation. `a ++ b` - /// Uses the `pl_node` union field. Payload is `Bin`. - array_cat, - /// Array multiplication `a ** b` - /// Uses the `pl_node` union field. Payload is `ArrayMul`. - array_mul, - /// `[N]T` syntax. No source location provided. - /// Uses the `pl_node` union field. Payload is `Bin`. lhs is length, rhs is element type. - array_type, - /// `[N:S]T` syntax. Source location is the array type expression node. - /// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`. - array_type_sentinel, - /// `@Vector` builtin. - /// Uses the `pl_node` union field with `Bin` payload. - /// lhs is length, rhs is element type. - vector_type, - /// Given a pointer type, returns its element type. Reaches through any optional or error - /// union types wrapping the pointer. Asserts that the underlying type is a pointer type. - /// Returns generic poison if the element type is `anyopaque`. - /// Uses the `un_node` field. - elem_type, - /// Given an indexable pointer (slice, many-ptr, single-ptr-to-array), returns its - /// element type. Emits a compile error if the type is not an indexable pointer. - /// Uses the `un_node` field. - indexable_ptr_elem_type, - /// Given a vector type, returns its element type. - /// Uses the `un_node` field. - vector_elem_type, - /// Given a pointer to an indexable object, returns the len property. This is - /// used by for loops. This instruction also emits a for-loop specific compile - /// error if the indexable object is not indexable. - /// Uses the `un_node` field. The AST node is the for loop node. - indexable_ptr_len, - /// Create a `anyframe->T` type. - /// Uses the `un_node` field. - anyframe_type, - /// Type coercion to the function's return type. - /// Uses the `pl_node` field. Payload is `As`. AST node could be many things. - as_node, - /// Same as `as_node` but ignores runtime to comptime int error. - as_shift_operand, - /// Bitwise AND. `&` - bit_and, - /// Reinterpret the memory representation of a value as a different type. - /// Uses the pl_node field with payload `Bin`. - bitcast, - /// Bitwise NOT. `~` - /// Uses `un_node`. - bit_not, - /// Bitwise OR. `|` - bit_or, - /// A labeled block of code, which can return a value. - /// Uses the `pl_node` union field. Payload is `Block`. - block, - /// Like `block`, but forces full evaluation of its contents at compile-time. - /// Uses the `pl_node` union field. Payload is `Block`. - block_comptime, - /// A list of instructions which are analyzed in the parent context, without - /// generating a runtime block. Must terminate with an "inline" variant of - /// a noreturn instruction. - /// Uses the `pl_node` union field. Payload is `Block`. - block_inline, - /// This instruction may only ever appear in the list of declarations for a - /// namespace type, e.g. within a `struct_decl` instruction. It represents a - /// single source declaration (`const`/`var`/`fn`), containing the name, - /// attributes, type, and value of the declaration. - /// Uses the `pl_node` union field. Payload is `Declaration`. - declaration, - /// Implements `suspend {...}`. - /// Uses the `pl_node` union field. Payload is `Block`. - suspend_block, - /// Boolean NOT. See also `bit_not`. - /// Uses the `un_node` field. - bool_not, - /// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand - /// is a block, which is evaluated if `lhs` is `true`. - /// Uses the `pl_node` union field. Payload is `BoolBr`. - bool_br_and, - /// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand - /// is a block, which is evaluated if `lhs` is `false`. - /// Uses the `pl_node` union field. Payload is `BoolBr`. - bool_br_or, - /// Return a value from a block. - /// Uses the `break` union field. - /// Uses the source information from previous instruction. - @"break", - /// Return a value from a block. This instruction is used as the terminator - /// of a `block_inline`. It allows using the return value from `Sema.analyzeBody`. - /// This instruction may also be used when it is known that there is only one - /// break instruction in a block, and the target block is the parent. - /// Uses the `break` union field. - break_inline, - /// Checks that comptime control flow does not happen inside a runtime block. - /// Uses the `un_node` union field. - check_comptime_control_flow, - /// Function call. - /// Uses the `pl_node` union field with payload `Call`. - /// AST node is the function call. - call, - /// Function call using `a.b()` syntax. - /// Uses the named field as the callee. If there is no such field, searches in the type for - /// a decl matching the field name. The decl is resolved and we ensure that it's a function - /// which can accept the object as the first parameter, with one pointer fixup. This - /// function is then used as the callee, with the object as an implicit first parameter. - /// Uses the `pl_node` union field with payload `FieldCall`. - /// AST node is the function call. - field_call, - /// Implements the `@call` builtin. - /// Uses the `pl_node` union field with payload `BuiltinCall`. - /// AST node is the builtin call. - builtin_call, - /// `<` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_lt, - /// `<=` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_lte, - /// `==` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_eq, - /// `>=` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_gte, - /// `>` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_gt, - /// `!=` - /// Uses the `pl_node` union field. Payload is `Bin`. - cmp_neq, - /// Conditional branch. Splits control flow based on a boolean condition value. - /// Uses the `pl_node` union field. AST node is an if, while, for, etc. - /// Payload is `CondBr`. - condbr, - /// Same as `condbr`, except the condition is coerced to a comptime value, and - /// only the taken branch is analyzed. The then block and else block must - /// terminate with an "inline" variant of a noreturn instruction. - condbr_inline, - /// Given an operand which is an error union, splits control flow. In - /// case of error, control flow goes into the block that is part of this - /// instruction, which is guaranteed to end with a return instruction - /// and never breaks out of the block. - /// In the case of non-error, control flow proceeds to the next instruction - /// after the `try`, with the result of this instruction being the unwrapped - /// payload value, as if `err_union_payload_unsafe` was executed on the operand. - /// Uses the `pl_node` union field. Payload is `Try`. - @"try", - /// Same as `try` except the operand is a pointer and the result is a pointer. - try_ptr, - /// An error set type definition. Contains a list of field names. - /// Uses the `pl_node` union field. Payload is `ErrorSetDecl`. - error_set_decl, - error_set_decl_anon, - error_set_decl_func, - /// Declares the beginning of a statement. Used for debug info. - /// Uses the `dbg_stmt` union field. The line and column are offset - /// from the parent declaration. - dbg_stmt, - /// Marks a variable declaration. Used for debug info. - /// Uses the `str_op` union field. The string is the local variable name, - /// and the operand is the pointer to the variable's location. The local - /// may be a const or a var. - dbg_var_ptr, - /// Same as `dbg_var_ptr` but the local is always a const and the operand - /// is the local's value. - dbg_var_val, - /// Uses a name to identify a Decl and takes a pointer to it. - /// Uses the `str_tok` union field. - decl_ref, - /// Uses a name to identify a Decl and uses it as a value. - /// Uses the `str_tok` union field. - decl_val, - /// Load the value from a pointer. Assumes `x.*` syntax. - /// Uses `un_node` field. AST node is the `x.*` syntax. - load, - /// Arithmetic division. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - div, - /// Given a pointer to an array, slice, or pointer, returns a pointer to the element at - /// the provided index. - /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. - elem_ptr_node, - /// Same as `elem_ptr_node` but used only for for loop. - /// Uses the `pl_node` union field. AST node is the condition of a for loop. - /// Payload is `Bin`. - /// No OOB safety check is emitted. - elem_ptr, - /// Given an array, slice, or pointer, returns the element at the provided index. - /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. - elem_val_node, - /// Same as `elem_val_node` but used only for for loop. - /// Uses the `pl_node` union field. AST node is the condition of a for loop. - /// Payload is `Bin`. - /// No OOB safety check is emitted. - elem_val, - /// Same as `elem_val` but takes the index as an immediate value. - /// No OOB safety check is emitted. A prior instruction must validate this operation. - /// Uses the `elem_val_imm` union field. - elem_val_imm, - /// Emits a compile error if the operand is not `void`. - /// Uses the `un_node` field. - ensure_result_used, - /// Emits a compile error if an error is ignored. - /// Uses the `un_node` field. - ensure_result_non_error, - /// Emits a compile error error union payload is not void. - ensure_err_union_payload_void, - /// Create a `E!T` type. - /// Uses the `pl_node` field with `Bin` payload. - error_union_type, - /// `error.Foo` syntax. Uses the `str_tok` field of the Data union. - error_value, - /// Implements the `@export` builtin function, based on either an identifier to a Decl, - /// or field access of a Decl. The thing being exported is the Decl. - /// Uses the `pl_node` union field. Payload is `Export`. - @"export", - /// Implements the `@export` builtin function, based on a comptime-known value. - /// The thing being exported is the comptime-known value which is the operand. - /// Uses the `pl_node` union field. Payload is `ExportValue`. - export_value, - /// Given a pointer to a struct or object that contains virtual fields, returns a pointer - /// to the named field. The field name is stored in string_bytes. Used by a.b syntax. - /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. - field_ptr, - /// Given a struct or object that contains virtual fields, returns the named field. - /// The field name is stored in string_bytes. Used by a.b syntax. - /// This instruction also accepts a pointer. - /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. - field_val, - /// Given a pointer to a struct or object that contains virtual fields, returns a pointer - /// to the named field. The field name is a comptime instruction. Used by @field. - /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. - field_ptr_named, - /// Given a struct or object that contains virtual fields, returns the named field. - /// The field name is a comptime instruction. Used by @field. - /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. - field_val_named, - /// Returns a function type, or a function instance, depending on whether - /// the body_len is 0. Calling convention is auto. - /// Uses the `pl_node` union field. `payload_index` points to a `Func`. - func, - /// Same as `func` but has an inferred error set. - func_inferred, - /// Represents a function declaration or function prototype, depending on - /// whether body_len is 0. - /// Uses the `pl_node` union field. `payload_index` points to a `FuncFancy`. - func_fancy, - /// Implements the `@import` builtin. - /// Uses the `str_tok` field. - import, - /// Integer literal that fits in a u64. Uses the `int` union field. - int, - /// Arbitrary sized integer literal. Uses the `str` union field. - int_big, - /// A float literal that fits in a f64. Uses the float union value. - float, - /// A float literal that fits in a f128. Uses the `pl_node` union value. - /// Payload is `Float128`. - float128, - /// Make an integer type out of signedness and bit count. - /// Payload is `int_type` - int_type, - /// Return a boolean false if an optional is null. `x != null` - /// Uses the `un_node` field. - is_non_null, - /// Return a boolean false if an optional is null. `x.* != null` - /// Uses the `un_node` field. - is_non_null_ptr, - /// Return a boolean false if value is an error - /// Uses the `un_node` field. - is_non_err, - /// Return a boolean false if dereferenced pointer is an error - /// Uses the `un_node` field. - is_non_err_ptr, - /// Same as `is_non_er` but doesn't validate that the type can be an error. - /// Uses the `un_node` field. - ret_is_non_err, - /// A labeled block of code that loops forever. At the end of the body will have either - /// a `repeat` instruction or a `repeat_inline` instruction. - /// Uses the `pl_node` field. The AST node is either a for loop or while loop. - /// This ZIR instruction is needed because AIR does not (yet?) match ZIR, and Sema - /// needs to emit more than 1 AIR block for this instruction. - /// The payload is `Block`. - loop, - /// Sends runtime control flow back to the beginning of the current block. - /// Uses the `node` field. - repeat, - /// Sends comptime control flow back to the beginning of the current block. - /// Uses the `node` field. - repeat_inline, - /// Asserts that all the lengths provided match. Used to build a for loop. - /// Return value is the length as a usize. - /// Uses the `pl_node` field with payload `MultiOp`. - /// There is exactly one item corresponding to each AST node inside the for - /// loop condition. Any item may be `none`, indicating an unbounded range. - /// Illegal behaviors: - /// * If all lengths are unbounded ranges (always a compile error). - /// * If any two lengths do not match each other. - for_len, - /// Merge two error sets into one, `E1 || E2`. - /// Uses the `pl_node` field with payload `Bin`. - merge_error_sets, - /// Turns an R-Value into a const L-Value. In other words, it takes a value, - /// stores it in a memory location, and returns a const pointer to it. If the value - /// is `comptime`, the memory location is global static constant data. Otherwise, - /// the memory location is in the stack frame, local to the scope containing the - /// instruction. - /// Uses the `un_tok` union field. - ref, - /// Sends control flow back to the function's callee. - /// Includes an operand as the return value. - /// Includes an AST node source location. - /// Uses the `un_node` union field. - ret_node, - /// Sends control flow back to the function's callee. - /// The operand is a `ret_ptr` instruction, where the return value can be found. - /// Includes an AST node source location. - /// Uses the `un_node` union field. - ret_load, - /// Sends control flow back to the function's callee. - /// Includes an operand as the return value. - /// Includes a token source location. - /// Uses the `un_tok` union field. - ret_implicit, - /// Sends control flow back to the function's callee. - /// The return operand is `error.foo` where `foo` is given by the string. - /// If the current function has an inferred error set, the error given by the - /// name is added to it. - /// Uses the `str_tok` union field. - ret_err_value, - /// A string name is provided which is an anonymous error set value. - /// If the current function has an inferred error set, the error given by the - /// name is added to it. - /// Results in the error code. Note that control flow is not diverted with - /// this instruction; a following 'ret' instruction will do the diversion. - /// Uses the `str_tok` union field. - ret_err_value_code, - /// Obtains a pointer to the return value. - /// Uses the `node` union field. - ret_ptr, - /// Obtains the return type of the in-scope function. - /// Uses the `node` union field. - ret_type, - /// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range. - /// Uses the `ptr_type` union field. - ptr_type, - /// Slice operation `lhs[rhs..]`. No sentinel and no end offset. - /// Returns a pointer to the subslice. - /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceStart`. - slice_start, - /// Slice operation `array_ptr[start..end]`. No sentinel. - /// Returns a pointer to the subslice. - /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceEnd`. - slice_end, - /// Slice operation `array_ptr[start..end:sentinel]`. - /// Returns a pointer to the subslice. - /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceSentinel`. - slice_sentinel, - /// Slice operation `array_ptr[start..][0..len]`. Optional sentinel. - /// Returns a pointer to the subslice. - /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceLength`. - slice_length, - /// Same as `store` except provides a source location. - /// Uses the `pl_node` union field. Payload is `Bin`. - store_node, - /// Same as `store_node` but the type of the value being stored will be - /// used to infer the pointer type of an `alloc_inferred`. - /// Uses the `pl_node` union field. Payload is `Bin`. - store_to_inferred_ptr, - /// String Literal. Makes an anonymous Decl and then takes a pointer to it. - /// Uses the `str` union field. - str, - /// Arithmetic negation. Asserts no integer overflow. - /// Same as sub with a lhs of 0, split into a separate instruction to save memory. - /// Uses `un_node`. - negate, - /// Twos complement wrapping integer negation. - /// Same as subwrap with a lhs of 0, split into a separate instruction to save memory. - /// Uses `un_node`. - negate_wrap, - /// Returns the type of a value. - /// Uses the `un_node` field. - typeof, - /// Implements `@TypeOf` for one operand. - /// Uses the `pl_node` field. - typeof_builtin, - /// Given a value, look at the type of it, which must be an integer type. - /// Returns the integer type for the RHS of a shift operation. - /// Uses the `un_node` field. - typeof_log2_int_type, - /// Asserts control-flow will not reach this instruction (`unreachable`). - /// Uses the `@"unreachable"` union field. - @"unreachable", - /// Bitwise XOR. `^` - /// Uses the `pl_node` union field. Payload is `Bin`. - xor, - /// Create an optional type '?T' - /// Uses the `un_node` field. - optional_type, - /// ?T => T with safety. - /// Given an optional value, returns the payload value, with a safety check that - /// the value is non-null. Used for `orelse`, `if` and `while`. - /// Uses the `un_node` field. - optional_payload_safe, - /// ?T => T without safety. - /// Given an optional value, returns the payload value. No safety checks. - /// Uses the `un_node` field. - optional_payload_unsafe, - /// *?T => *T with safety. - /// Given a pointer to an optional value, returns a pointer to the payload value, - /// with a safety check that the value is non-null. Used for `orelse`, `if` and `while`. - /// Uses the `un_node` field. - optional_payload_safe_ptr, - /// *?T => *T without safety. - /// Given a pointer to an optional value, returns a pointer to the payload value. - /// No safety checks. - /// Uses the `un_node` field. - optional_payload_unsafe_ptr, - /// E!T => T without safety. - /// Given an error union value, returns the payload value. No safety checks. - /// Uses the `un_node` field. - err_union_payload_unsafe, - /// *E!T => *T without safety. - /// Given a pointer to a error union value, returns a pointer to the payload value. - /// No safety checks. - /// Uses the `un_node` field. - err_union_payload_unsafe_ptr, - /// E!T => E without safety. - /// Given an error union value, returns the error code. No safety checks. - /// Uses the `un_node` field. - err_union_code, - /// *E!T => E without safety. - /// Given a pointer to an error union value, returns the error code. No safety checks. - /// Uses the `un_node` field. - err_union_code_ptr, - /// An enum literal. Uses the `str_tok` union field. - enum_literal, - /// A switch expression. Uses the `pl_node` union field. - /// AST node is the switch, payload is `SwitchBlock`. - switch_block, - /// A switch expression. Uses the `pl_node` union field. - /// AST node is the switch, payload is `SwitchBlock`. Operand is a pointer. - switch_block_ref, - /// A switch on an error union `a catch |err| switch (err) {...}`. - /// Uses the `pl_node` union field. AST node is the `catch`, payload is `SwitchBlockErrUnion`. - switch_block_err_union, - /// Check that operand type supports the dereference operand (.*). - /// Uses the `un_node` field. - validate_deref, - /// Check that the operand's type is an array or tuple with the given number of elements. - /// Uses the `pl_node` field. Payload is `ValidateDestructure`. - validate_destructure, - /// Given a struct or union, and a field name as a Ref, - /// returns the field type. Uses the `pl_node` field. Payload is `FieldTypeRef`. - field_type_ref, - /// Given a pointer, initializes all error unions and optionals in the pointee to payloads, - /// returning the base payload pointer. For instance, converts *E!?T into a valid *T - /// (clobbering any existing error or null value). - /// Uses the `un_node` field. - opt_eu_base_ptr_init, - /// Coerce a given value such that when a reference is taken, the resulting pointer will be - /// coercible to the given type. For instance, given a value of type 'u32' and the pointer - /// type '*u64', coerces the value to a 'u64'. Asserts that the type is a pointer type. - /// Uses the `pl_node` field. Payload is `Bin`. - /// LHS is the pointer type, RHS is the value. - coerce_ptr_elem_ty, - /// Given a type, validate that it is a pointer type suitable for return from the address-of - /// operator. Emit a compile error if not. - /// Uses the `un_tok` union field. Token is the `&` operator. Operand is the type. - validate_ref_ty, - - // The following tags all relate to struct initialization expressions. - - /// A struct literal with a specified explicit type, with no fields. - /// Uses the `un_node` field. - struct_init_empty, - /// An anonymous struct literal with a known result type, with no fields. - /// Uses the `un_node` field. - struct_init_empty_result, - /// An anonymous struct literal with no fields, returned by reference, with a known result - /// type for the pointer. Asserts that the type is a pointer. - /// Uses the `un_node` field. - struct_init_empty_ref_result, - /// Struct initialization without a type. Creates a value of an anonymous struct type. - /// Uses the `pl_node` field. Payload is `StructInitAnon`. - struct_init_anon, - /// Finalizes a typed struct or union initialization, performs validation, and returns the - /// struct or union value. The given type must be validated prior to this instruction, using - /// `validate_struct_init_ty` or `validate_struct_init_result_ty`. If the given type is - /// generic poison, this is downgraded to an anonymous initialization. - /// Uses the `pl_node` field. Payload is `StructInit`. - struct_init, - /// Struct initialization syntax, make the result a pointer. Equivalent to `struct_init` - /// followed by `ref` - this ZIR tag exists as an optimization for a common pattern. - /// Uses the `pl_node` field. Payload is `StructInit`. - struct_init_ref, - /// Checks that the type supports struct init syntax. Always returns void. - /// Uses the `un_node` field. - validate_struct_init_ty, - /// Like `validate_struct_init_ty`, but additionally accepts types which structs coerce to. - /// Used on the known result type of a struct init expression. Always returns void. - /// Uses the `un_node` field. - validate_struct_init_result_ty, - /// Given a set of `struct_init_field_ptr` instructions, assumes they are all part of a - /// struct initialization expression, and emits compile errors for duplicate fields as well - /// as missing fields, if applicable. - /// This instruction asserts that there is at least one struct_init_field_ptr instruction, - /// because it must use one of them to find out the struct type. - /// Uses the `pl_node` field. Payload is `Block`. - validate_ptr_struct_init, - /// Given a type being used for a struct initialization expression, returns the type of the - /// field with the given name. - /// Uses the `pl_node` field. Payload is `FieldType`. - struct_init_field_type, - /// Given a pointer being used as the result pointer of a struct initialization expression, - /// return a pointer to the field of the given name. - /// Uses the `pl_node` field. The AST node is the field initializer. Payload is Field. - struct_init_field_ptr, - - // The following tags all relate to array initialization expressions. - - /// Array initialization without a type. Creates a value of a tuple type. - /// Uses the `pl_node` field. Payload is `MultiOp`. - array_init_anon, - /// Array initialization syntax with a known type. The given type must be validated prior to - /// this instruction, using some `validate_array_init_*_ty` instruction. - /// Uses the `pl_node` field. Payload is `MultiOp`, where the first operand is the type. - array_init, - /// Array initialization syntax, make the result a pointer. Equivalent to `array_init` - /// followed by `ref`- this ZIR tag exists as an optimization for a common pattern. - /// Uses the `pl_node` field. Payload is `MultiOp`, where the first operand is the type. - array_init_ref, - /// Checks that the type supports array init syntax. Always returns void. - /// Uses the `pl_node` field. Payload is `ArrayInit`. - validate_array_init_ty, - /// Like `validate_array_init_ty`, but additionally accepts types which arrays coerce to. - /// Used on the known result type of an array init expression. Always returns void. - /// Uses the `pl_node` field. Payload is `ArrayInit`. - validate_array_init_result_ty, - /// Given a pointer or slice type and an element count, return the expected type of an array - /// initializer such that a pointer to the initializer has the given pointer type, checking - /// that this type supports array init syntax and emitting a compile error if not. Preserves - /// error union and optional wrappers on the array type, if any. - /// Asserts that the given type is a pointer or slice type. - /// Uses the `pl_node` field. Payload is `ArrayInitRefTy`. - validate_array_init_ref_ty, - /// Given a set of `array_init_elem_ptr` instructions, assumes they are all part of an array - /// initialization expression, and emits a compile error if the number of elements does not - /// match the array type. - /// This instruction asserts that there is at least one `array_init_elem_ptr` instruction, - /// because it must use one of them to find out the array type. - /// Uses the `pl_node` field. Payload is `Block`. - validate_ptr_array_init, - /// Given a type being used for an array initialization expression, returns the type of the - /// element at the given index. - /// Uses the `bin` union field. lhs is the indexable type, rhs is the index. - array_init_elem_type, - /// Given a pointer being used as the result pointer of an array initialization expression, - /// return a pointer to the element at the given index. - /// Uses the `pl_node` union field. AST node is an element inside array initialization - /// syntax. Payload is `ElemPtrImm`. - array_init_elem_ptr, - - /// Implements the `@unionInit` builtin. - /// Uses the `pl_node` field. Payload is `UnionInit`. - union_init, - /// Implements the `@typeInfo` builtin. Uses `un_node`. - type_info, - /// Implements the `@sizeOf` builtin. Uses `un_node`. - size_of, - /// Implements the `@bitSizeOf` builtin. Uses `un_node`. - bit_size_of, - - /// Implement builtin `@intFromPtr`. Uses `un_node`. - /// Convert a pointer to a `usize` integer. - int_from_ptr, - /// Emit an error message and fail compilation. - /// Uses the `un_node` field. - compile_error, - /// Changes the maximum number of backwards branches that compile-time - /// code execution can use before giving up and making a compile error. - /// Uses the `un_node` union field. - set_eval_branch_quota, - /// Converts an enum value into an integer. Resulting type will be the tag type - /// of the enum. Uses `un_node`. - int_from_enum, - /// Implement builtin `@alignOf`. Uses `un_node`. - align_of, - /// Implement builtin `@intFromBool`. Uses `un_node`. - int_from_bool, - /// Implement builtin `@embedFile`. Uses `un_node`. - embed_file, - /// Implement builtin `@errorName`. Uses `un_node`. - error_name, - /// Implement builtin `@panic`. Uses `un_node`. - panic, - /// Implements `@trap`. - /// Uses the `node` field. - trap, - /// Implement builtin `@setRuntimeSafety`. Uses `un_node`. - set_runtime_safety, - /// Implement builtin `@sqrt`. Uses `un_node`. - sqrt, - /// Implement builtin `@sin`. Uses `un_node`. - sin, - /// Implement builtin `@cos`. Uses `un_node`. - cos, - /// Implement builtin `@tan`. Uses `un_node`. - tan, - /// Implement builtin `@exp`. Uses `un_node`. - exp, - /// Implement builtin `@exp2`. Uses `un_node`. - exp2, - /// Implement builtin `@log`. Uses `un_node`. - log, - /// Implement builtin `@log2`. Uses `un_node`. - log2, - /// Implement builtin `@log10`. Uses `un_node`. - log10, - /// Implement builtin `@abs`. Uses `un_node`. - abs, - /// Implement builtin `@floor`. Uses `un_node`. - floor, - /// Implement builtin `@ceil`. Uses `un_node`. - ceil, - /// Implement builtin `@trunc`. Uses `un_node`. - trunc, - /// Implement builtin `@round`. Uses `un_node`. - round, - /// Implement builtin `@tagName`. Uses `un_node`. - tag_name, - /// Implement builtin `@typeName`. Uses `un_node`. - type_name, - /// Implement builtin `@Frame`. Uses `un_node`. - frame_type, - /// Implement builtin `@frameSize`. Uses `un_node`. - frame_size, - - /// Implements the `@intFromFloat` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - int_from_float, - /// Implements the `@floatFromInt` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - float_from_int, - /// Implements the `@ptrFromInt` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - ptr_from_int, - /// Converts an integer into an enum value. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - enum_from_int, - /// Convert a larger float type to any other float type, possibly causing - /// a loss of precision. - /// Uses the `pl_node` field. AST is the `@floatCast` syntax. - /// Payload is `Bin` with lhs as the dest type, rhs the operand. - float_cast, - /// Implements the `@intCast` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - /// Convert an integer value to another integer type, asserting that the destination type - /// can hold the same mathematical value. - int_cast, - /// Implements the `@ptrCast` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - /// Not every `@ptrCast` will correspond to this instruction - see also - /// `ptr_cast_full` in `Extended`. - ptr_cast, - /// Implements the `@truncate` builtin. - /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. - truncate, - - /// Implements the `@hasDecl` builtin. - /// Uses the `pl_node` union field. Payload is `Bin`. - has_decl, - /// Implements the `@hasField` builtin. - /// Uses the `pl_node` union field. Payload is `Bin`. - has_field, - - /// Implements the `@clz` builtin. Uses the `un_node` union field. - clz, - /// Implements the `@ctz` builtin. Uses the `un_node` union field. - ctz, - /// Implements the `@popCount` builtin. Uses the `un_node` union field. - pop_count, - /// Implements the `@byteSwap` builtin. Uses the `un_node` union field. - byte_swap, - /// Implements the `@bitReverse` builtin. Uses the `un_node` union field. - bit_reverse, - - /// Implements the `@bitOffsetOf` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - bit_offset_of, - /// Implements the `@offsetOf` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - offset_of, - /// Implements the `@splat` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - splat, - /// Implements the `@reduce` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - reduce, - /// Implements the `@shuffle` builtin. - /// Uses the `pl_node` union field with payload `Shuffle`. - shuffle, - /// Implements the `@atomicLoad` builtin. - /// Uses the `pl_node` union field with payload `AtomicLoad`. - atomic_load, - /// Implements the `@atomicRmw` builtin. - /// Uses the `pl_node` union field with payload `AtomicRmw`. - atomic_rmw, - /// Implements the `@atomicStore` builtin. - /// Uses the `pl_node` union field with payload `AtomicStore`. - atomic_store, - /// Implements the `@mulAdd` builtin. - /// Uses the `pl_node` union field with payload `MulAdd`. - /// The addend communicates the type of the builtin. - /// The mulends need to be coerced to the same type. - mul_add, - /// Implements the `@fieldParentPtr` builtin. - /// Uses the `pl_node` union field with payload `FieldParentPtr`. - field_parent_ptr, - /// Implements the `@memcpy` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - memcpy, - /// Implements the `@memset` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - memset, - /// Implements the `@min` builtin for 2 args. - /// Uses the `pl_node` union field with payload `Bin` - min, - /// Implements the `@max` builtin for 2 args. - /// Uses the `pl_node` union field with payload `Bin` - max, - /// Implements the `@cImport` builtin. - /// Uses the `pl_node` union field with payload `Block`. - c_import, - - /// Allocates stack local memory. - /// Uses the `un_node` union field. The operand is the type of the allocated object. - /// The node source location points to a var decl node. - /// A `make_ptr_const` instruction should be used once the value has - /// been stored to the allocation. To ensure comptime value detection - /// functions, there are some restrictions on how this pointer should be - /// used prior to the `make_ptr_const` instruction: no pointer derived - /// from this `alloc` may be returned from a block or stored to another - /// address. In other words, it must be trivial to determine whether any - /// given pointer derives from this one. - alloc, - /// Same as `alloc` except mutable. As such, `make_ptr_const` need not be used, - /// and there are no restrictions on the usage of the pointer. - alloc_mut, - /// Allocates comptime-mutable memory. - /// Uses the `un_node` union field. The operand is the type of the allocated object. - /// The node source location points to a var decl node. - alloc_comptime_mut, - /// Same as `alloc` except the type is inferred. - /// Uses the `node` union field. - alloc_inferred, - /// Same as `alloc_inferred` except mutable. - alloc_inferred_mut, - /// Allocates comptime const memory. - /// Uses the `node` union field. The type of the allocated object is inferred. - /// The node source location points to a var decl node. - alloc_inferred_comptime, - /// Same as `alloc_comptime_mut` except the type is inferred. - alloc_inferred_comptime_mut, - /// Each `store_to_inferred_ptr` puts the type of the stored value into a set, - /// and then `resolve_inferred_alloc` triggers peer type resolution on the set. - /// The operand is a `alloc_inferred` or `alloc_inferred_mut` instruction, which - /// is the allocation that needs to have its type inferred. - /// Uses the `un_node` field. The AST node is the var decl. - resolve_inferred_alloc, - /// Turns a pointer coming from an `alloc` or `Extended.alloc` into a constant - /// version of the same pointer. For inferred allocations this is instead implicitly - /// handled by the `resolve_inferred_alloc` instruction. - /// Uses the `un_node` union field. - make_ptr_const, - - /// Implements `resume` syntax. Uses `un_node` field. - @"resume", - @"await", - - /// When a type or function refers to a comptime value from an outer - /// scope, that forms a closure over comptime value. The outer scope - /// will record a capture of that value, which encodes its current state - /// and marks it to persist. Uses `un_tok` field. Operand is the - /// instruction value to capture. - closure_capture, - /// The inner scope of a closure uses closure_get to retrieve the value - /// stored by the outer scope. Uses `inst_node` field. Operand is the - /// closure_capture instruction ref. - closure_get, - - /// A defer statement. - /// Uses the `defer` union field. - @"defer", - /// An errdefer statement with a code. - /// Uses the `err_defer_code` union field. - defer_err_code, - - /// Requests that Sema update the saved error return trace index for the enclosing - /// block, if the operand is .none or of an error/error-union type. - /// Uses the `save_err_ret_index` field. - save_err_ret_index, - /// Specialized form of `Extended.restore_err_ret_index`. - /// Unconditionally restores the error return index to its last saved state - /// in the block referred to by `operand`. If `operand` is `none`, restores - /// to the point of function entry. - /// Uses the `un_node` field. - restore_err_ret_index_unconditional, - /// Specialized form of `Extended.restore_err_ret_index`. - /// Restores the error return index to its state at the entry of - /// the current function conditional on `operand` being a non-error. - /// If `operand` is `none`, restores unconditionally. - /// Uses the `un_node` field. - restore_err_ret_index_fn_entry, - - /// The ZIR instruction tag is one of the `Extended` ones. - /// Uses the `extended` union field. - extended, - - /// Returns whether the instruction is one of the control flow "noreturn" types. - /// Function calls do not count. - pub fn isNoReturn(tag: Tag) bool { - return switch (tag) { - .param, - .param_comptime, - .param_anytype, - .param_anytype_comptime, - .add, - .addwrap, - .add_sat, - .add_unsafe, - .alloc, - .alloc_mut, - .alloc_comptime_mut, - .alloc_inferred, - .alloc_inferred_mut, - .alloc_inferred_comptime, - .alloc_inferred_comptime_mut, - .make_ptr_const, - .array_cat, - .array_mul, - .array_type, - .array_type_sentinel, - .vector_type, - .elem_type, - .indexable_ptr_elem_type, - .vector_elem_type, - .indexable_ptr_len, - .anyframe_type, - .as_node, - .as_shift_operand, - .bit_and, - .bitcast, - .bit_or, - .block, - .block_comptime, - .block_inline, - .declaration, - .suspend_block, - .loop, - .bool_br_and, - .bool_br_or, - .bool_not, - .call, - .field_call, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .error_set_decl, - .error_set_decl_anon, - .error_set_decl_func, - .dbg_stmt, - .dbg_var_ptr, - .dbg_var_val, - .decl_ref, - .decl_val, - .load, - .div, - .elem_ptr, - .elem_val, - .elem_ptr_node, - .elem_val_node, - .elem_val_imm, - .ensure_result_used, - .ensure_result_non_error, - .ensure_err_union_payload_void, - .@"export", - .export_value, - .field_ptr, - .field_val, - .field_ptr_named, - .field_val_named, - .func, - .func_inferred, - .func_fancy, - .has_decl, - .int, - .int_big, - .float, - .float128, - .int_type, - .is_non_null, - .is_non_null_ptr, - .is_non_err, - .is_non_err_ptr, - .ret_is_non_err, - .mod_rem, - .mul, - .mulwrap, - .mul_sat, - .ref, - .shl, - .shl_sat, - .shr, - .store_node, - .store_to_inferred_ptr, - .str, - .sub, - .subwrap, - .sub_sat, - .negate, - .negate_wrap, - .typeof, - .typeof_builtin, - .xor, - .optional_type, - .optional_payload_safe, - .optional_payload_unsafe, - .optional_payload_safe_ptr, - .optional_payload_unsafe_ptr, - .err_union_payload_unsafe, - .err_union_payload_unsafe_ptr, - .err_union_code, - .err_union_code_ptr, - .ptr_type, - .enum_literal, - .merge_error_sets, - .error_union_type, - .bit_not, - .error_value, - .slice_start, - .slice_end, - .slice_sentinel, - .slice_length, - .import, - .typeof_log2_int_type, - .resolve_inferred_alloc, - .set_eval_branch_quota, - .switch_block, - .switch_block_ref, - .switch_block_err_union, - .validate_deref, - .validate_destructure, - .union_init, - .field_type_ref, - .enum_from_int, - .int_from_enum, - .type_info, - .size_of, - .bit_size_of, - .int_from_ptr, - .align_of, - .int_from_bool, - .embed_file, - .error_name, - .set_runtime_safety, - .sqrt, - .sin, - .cos, - .tan, - .exp, - .exp2, - .log, - .log2, - .log10, - .abs, - .floor, - .ceil, - .trunc, - .round, - .tag_name, - .type_name, - .frame_type, - .frame_size, - .int_from_float, - .float_from_int, - .ptr_from_int, - .float_cast, - .int_cast, - .ptr_cast, - .truncate, - .has_field, - .clz, - .ctz, - .pop_count, - .byte_swap, - .bit_reverse, - .div_exact, - .div_floor, - .div_trunc, - .mod, - .rem, - .shl_exact, - .shr_exact, - .bit_offset_of, - .offset_of, - .splat, - .reduce, - .shuffle, - .atomic_load, - .atomic_rmw, - .atomic_store, - .mul_add, - .builtin_call, - .field_parent_ptr, - .max, - .memcpy, - .memset, - .min, - .c_import, - .@"resume", - .@"await", - .ret_err_value_code, - .extended, - .closure_get, - .closure_capture, - .ret_ptr, - .ret_type, - .@"try", - .try_ptr, - .@"defer", - .defer_err_code, - .save_err_ret_index, - .for_len, - .opt_eu_base_ptr_init, - .coerce_ptr_elem_ty, - .struct_init_empty, - .struct_init_empty_result, - .struct_init_empty_ref_result, - .struct_init_anon, - .struct_init, - .struct_init_ref, - .validate_struct_init_ty, - .validate_struct_init_result_ty, - .validate_ptr_struct_init, - .struct_init_field_type, - .struct_init_field_ptr, - .array_init_anon, - .array_init, - .array_init_ref, - .validate_array_init_ty, - .validate_array_init_result_ty, - .validate_array_init_ref_ty, - .validate_ptr_array_init, - .array_init_elem_type, - .array_init_elem_ptr, - .validate_ref_ty, - .restore_err_ret_index_unconditional, - .restore_err_ret_index_fn_entry, - => false, - - .@"break", - .break_inline, - .condbr, - .condbr_inline, - .compile_error, - .ret_node, - .ret_load, - .ret_implicit, - .ret_err_value, - .@"unreachable", - .repeat, - .repeat_inline, - .panic, - .trap, - .check_comptime_control_flow, - => true, - }; - } - - pub fn isParam(tag: Tag) bool { - return switch (tag) { - .param, - .param_comptime, - .param_anytype, - .param_anytype_comptime, - => true, - - else => false, - }; - } - - /// AstGen uses this to find out if `Ref.void_value` should be used in place - /// of the result of a given instruction. This allows Sema to forego adding - /// the instruction to the map after analysis. - pub fn isAlwaysVoid(tag: Tag, data: Data) bool { - return switch (tag) { - .dbg_stmt, - .dbg_var_ptr, - .dbg_var_val, - .ensure_result_used, - .ensure_result_non_error, - .ensure_err_union_payload_void, - .set_eval_branch_quota, - .atomic_store, - .store_node, - .store_to_inferred_ptr, - .resolve_inferred_alloc, - .validate_deref, - .validate_destructure, - .@"export", - .export_value, - .set_runtime_safety, - .memcpy, - .memset, - .check_comptime_control_flow, - .@"defer", - .defer_err_code, - .save_err_ret_index, - .restore_err_ret_index_unconditional, - .restore_err_ret_index_fn_entry, - .validate_struct_init_ty, - .validate_struct_init_result_ty, - .validate_ptr_struct_init, - .validate_array_init_ty, - .validate_array_init_result_ty, - .validate_ptr_array_init, - .validate_ref_ty, - => true, - - .param, - .param_comptime, - .param_anytype, - .param_anytype_comptime, - .add, - .addwrap, - .add_sat, - .add_unsafe, - .alloc, - .alloc_mut, - .alloc_comptime_mut, - .alloc_inferred, - .alloc_inferred_mut, - .alloc_inferred_comptime, - .alloc_inferred_comptime_mut, - .make_ptr_const, - .array_cat, - .array_mul, - .array_type, - .array_type_sentinel, - .vector_type, - .elem_type, - .indexable_ptr_elem_type, - .vector_elem_type, - .indexable_ptr_len, - .anyframe_type, - .as_node, - .as_shift_operand, - .bit_and, - .bitcast, - .bit_or, - .block, - .block_comptime, - .block_inline, - .declaration, - .suspend_block, - .loop, - .bool_br_and, - .bool_br_or, - .bool_not, - .call, - .field_call, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .error_set_decl, - .error_set_decl_anon, - .error_set_decl_func, - .decl_ref, - .decl_val, - .load, - .div, - .elem_ptr, - .elem_val, - .elem_ptr_node, - .elem_val_node, - .elem_val_imm, - .field_ptr, - .field_val, - .field_ptr_named, - .field_val_named, - .func, - .func_inferred, - .func_fancy, - .has_decl, - .int, - .int_big, - .float, - .float128, - .int_type, - .is_non_null, - .is_non_null_ptr, - .is_non_err, - .is_non_err_ptr, - .ret_is_non_err, - .mod_rem, - .mul, - .mulwrap, - .mul_sat, - .ref, - .shl, - .shl_sat, - .shr, - .str, - .sub, - .subwrap, - .sub_sat, - .negate, - .negate_wrap, - .typeof, - .typeof_builtin, - .xor, - .optional_type, - .optional_payload_safe, - .optional_payload_unsafe, - .optional_payload_safe_ptr, - .optional_payload_unsafe_ptr, - .err_union_payload_unsafe, - .err_union_payload_unsafe_ptr, - .err_union_code, - .err_union_code_ptr, - .ptr_type, - .enum_literal, - .merge_error_sets, - .error_union_type, - .bit_not, - .error_value, - .slice_start, - .slice_end, - .slice_sentinel, - .slice_length, - .import, - .typeof_log2_int_type, - .switch_block, - .switch_block_ref, - .switch_block_err_union, - .union_init, - .field_type_ref, - .enum_from_int, - .int_from_enum, - .type_info, - .size_of, - .bit_size_of, - .int_from_ptr, - .align_of, - .int_from_bool, - .embed_file, - .error_name, - .sqrt, - .sin, - .cos, - .tan, - .exp, - .exp2, - .log, - .log2, - .log10, - .abs, - .floor, - .ceil, - .trunc, - .round, - .tag_name, - .type_name, - .frame_type, - .frame_size, - .int_from_float, - .float_from_int, - .ptr_from_int, - .float_cast, - .int_cast, - .ptr_cast, - .truncate, - .has_field, - .clz, - .ctz, - .pop_count, - .byte_swap, - .bit_reverse, - .div_exact, - .div_floor, - .div_trunc, - .mod, - .rem, - .shl_exact, - .shr_exact, - .bit_offset_of, - .offset_of, - .splat, - .reduce, - .shuffle, - .atomic_load, - .atomic_rmw, - .mul_add, - .builtin_call, - .field_parent_ptr, - .max, - .min, - .c_import, - .@"resume", - .@"await", - .ret_err_value_code, - .closure_get, - .closure_capture, - .@"break", - .break_inline, - .condbr, - .condbr_inline, - .compile_error, - .ret_node, - .ret_load, - .ret_implicit, - .ret_err_value, - .ret_ptr, - .ret_type, - .@"unreachable", - .repeat, - .repeat_inline, - .panic, - .trap, - .for_len, - .@"try", - .try_ptr, - .opt_eu_base_ptr_init, - .coerce_ptr_elem_ty, - .struct_init_empty, - .struct_init_empty_result, - .struct_init_empty_ref_result, - .struct_init_anon, - .struct_init, - .struct_init_ref, - .struct_init_field_type, - .struct_init_field_ptr, - .array_init_anon, - .array_init, - .array_init_ref, - .validate_array_init_ref_ty, - .array_init_elem_type, - .array_init_elem_ptr, - => false, - - .extended => switch (data.extended.opcode) { - .fence, .set_cold, .breakpoint => true, - else => false, - }, - }; - } - - /// Used by debug safety-checking code. - pub const data_tags = list: { - @setEvalBranchQuota(2000); - break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{ - .add = .pl_node, - .addwrap = .pl_node, - .add_sat = .pl_node, - .add_unsafe = .pl_node, - .sub = .pl_node, - .subwrap = .pl_node, - .sub_sat = .pl_node, - .mul = .pl_node, - .mulwrap = .pl_node, - .mul_sat = .pl_node, - - .param = .pl_tok, - .param_comptime = .pl_tok, - .param_anytype = .str_tok, - .param_anytype_comptime = .str_tok, - .array_cat = .pl_node, - .array_mul = .pl_node, - .array_type = .pl_node, - .array_type_sentinel = .pl_node, - .vector_type = .pl_node, - .elem_type = .un_node, - .indexable_ptr_elem_type = .un_node, - .vector_elem_type = .un_node, - .indexable_ptr_len = .un_node, - .anyframe_type = .un_node, - .as_node = .pl_node, - .as_shift_operand = .pl_node, - .bit_and = .pl_node, - .bitcast = .pl_node, - .bit_not = .un_node, - .bit_or = .pl_node, - .block = .pl_node, - .block_comptime = .pl_node, - .block_inline = .pl_node, - .declaration = .pl_node, - .suspend_block = .pl_node, - .bool_not = .un_node, - .bool_br_and = .pl_node, - .bool_br_or = .pl_node, - .@"break" = .@"break", - .break_inline = .@"break", - .check_comptime_control_flow = .un_node, - .for_len = .pl_node, - .call = .pl_node, - .field_call = .pl_node, - .cmp_lt = .pl_node, - .cmp_lte = .pl_node, - .cmp_eq = .pl_node, - .cmp_gte = .pl_node, - .cmp_gt = .pl_node, - .cmp_neq = .pl_node, - .condbr = .pl_node, - .condbr_inline = .pl_node, - .@"try" = .pl_node, - .try_ptr = .pl_node, - .error_set_decl = .pl_node, - .error_set_decl_anon = .pl_node, - .error_set_decl_func = .pl_node, - .dbg_stmt = .dbg_stmt, - .dbg_var_ptr = .str_op, - .dbg_var_val = .str_op, - .decl_ref = .str_tok, - .decl_val = .str_tok, - .load = .un_node, - .div = .pl_node, - .elem_ptr = .pl_node, - .elem_ptr_node = .pl_node, - .elem_val = .pl_node, - .elem_val_node = .pl_node, - .elem_val_imm = .elem_val_imm, - .ensure_result_used = .un_node, - .ensure_result_non_error = .un_node, - .ensure_err_union_payload_void = .un_node, - .error_union_type = .pl_node, - .error_value = .str_tok, - .@"export" = .pl_node, - .export_value = .pl_node, - .field_ptr = .pl_node, - .field_val = .pl_node, - .field_ptr_named = .pl_node, - .field_val_named = .pl_node, - .func = .pl_node, - .func_inferred = .pl_node, - .func_fancy = .pl_node, - .import = .str_tok, - .int = .int, - .int_big = .str, - .float = .float, - .float128 = .pl_node, - .int_type = .int_type, - .is_non_null = .un_node, - .is_non_null_ptr = .un_node, - .is_non_err = .un_node, - .is_non_err_ptr = .un_node, - .ret_is_non_err = .un_node, - .loop = .pl_node, - .repeat = .node, - .repeat_inline = .node, - .merge_error_sets = .pl_node, - .mod_rem = .pl_node, - .ref = .un_tok, - .ret_node = .un_node, - .ret_load = .un_node, - .ret_implicit = .un_tok, - .ret_err_value = .str_tok, - .ret_err_value_code = .str_tok, - .ret_ptr = .node, - .ret_type = .node, - .ptr_type = .ptr_type, - .slice_start = .pl_node, - .slice_end = .pl_node, - .slice_sentinel = .pl_node, - .slice_length = .pl_node, - .store_node = .pl_node, - .store_to_inferred_ptr = .pl_node, - .str = .str, - .negate = .un_node, - .negate_wrap = .un_node, - .typeof = .un_node, - .typeof_log2_int_type = .un_node, - .@"unreachable" = .@"unreachable", - .xor = .pl_node, - .optional_type = .un_node, - .optional_payload_safe = .un_node, - .optional_payload_unsafe = .un_node, - .optional_payload_safe_ptr = .un_node, - .optional_payload_unsafe_ptr = .un_node, - .err_union_payload_unsafe = .un_node, - .err_union_payload_unsafe_ptr = .un_node, - .err_union_code = .un_node, - .err_union_code_ptr = .un_node, - .enum_literal = .str_tok, - .switch_block = .pl_node, - .switch_block_ref = .pl_node, - .switch_block_err_union = .pl_node, - .validate_deref = .un_node, - .validate_destructure = .pl_node, - .field_type_ref = .pl_node, - .union_init = .pl_node, - .type_info = .un_node, - .size_of = .un_node, - .bit_size_of = .un_node, - .opt_eu_base_ptr_init = .un_node, - .coerce_ptr_elem_ty = .pl_node, - .validate_ref_ty = .un_tok, - - .int_from_ptr = .un_node, - .compile_error = .un_node, - .set_eval_branch_quota = .un_node, - .int_from_enum = .un_node, - .align_of = .un_node, - .int_from_bool = .un_node, - .embed_file = .un_node, - .error_name = .un_node, - .panic = .un_node, - .trap = .node, - .set_runtime_safety = .un_node, - .sqrt = .un_node, - .sin = .un_node, - .cos = .un_node, - .tan = .un_node, - .exp = .un_node, - .exp2 = .un_node, - .log = .un_node, - .log2 = .un_node, - .log10 = .un_node, - .abs = .un_node, - .floor = .un_node, - .ceil = .un_node, - .trunc = .un_node, - .round = .un_node, - .tag_name = .un_node, - .type_name = .un_node, - .frame_type = .un_node, - .frame_size = .un_node, - - .int_from_float = .pl_node, - .float_from_int = .pl_node, - .ptr_from_int = .pl_node, - .enum_from_int = .pl_node, - .float_cast = .pl_node, - .int_cast = .pl_node, - .ptr_cast = .pl_node, - .truncate = .pl_node, - .typeof_builtin = .pl_node, - - .has_decl = .pl_node, - .has_field = .pl_node, - - .clz = .un_node, - .ctz = .un_node, - .pop_count = .un_node, - .byte_swap = .un_node, - .bit_reverse = .un_node, - - .div_exact = .pl_node, - .div_floor = .pl_node, - .div_trunc = .pl_node, - .mod = .pl_node, - .rem = .pl_node, - - .shl = .pl_node, - .shl_exact = .pl_node, - .shl_sat = .pl_node, - .shr = .pl_node, - .shr_exact = .pl_node, - - .bit_offset_of = .pl_node, - .offset_of = .pl_node, - .splat = .pl_node, - .reduce = .pl_node, - .shuffle = .pl_node, - .atomic_load = .pl_node, - .atomic_rmw = .pl_node, - .atomic_store = .pl_node, - .mul_add = .pl_node, - .builtin_call = .pl_node, - .field_parent_ptr = .pl_node, - .max = .pl_node, - .memcpy = .pl_node, - .memset = .pl_node, - .min = .pl_node, - .c_import = .pl_node, - - .alloc = .un_node, - .alloc_mut = .un_node, - .alloc_comptime_mut = .un_node, - .alloc_inferred = .node, - .alloc_inferred_mut = .node, - .alloc_inferred_comptime = .node, - .alloc_inferred_comptime_mut = .node, - .resolve_inferred_alloc = .un_node, - .make_ptr_const = .un_node, - - .@"resume" = .un_node, - .@"await" = .un_node, - - .closure_capture = .un_tok, - .closure_get = .inst_node, - - .@"defer" = .@"defer", - .defer_err_code = .defer_err_code, - - .save_err_ret_index = .save_err_ret_index, - .restore_err_ret_index_unconditional = .un_node, - .restore_err_ret_index_fn_entry = .un_node, - - .struct_init_empty = .un_node, - .struct_init_empty_result = .un_node, - .struct_init_empty_ref_result = .un_node, - .struct_init_anon = .pl_node, - .struct_init = .pl_node, - .struct_init_ref = .pl_node, - .validate_struct_init_ty = .un_node, - .validate_struct_init_result_ty = .un_node, - .validate_ptr_struct_init = .pl_node, - .struct_init_field_type = .pl_node, - .struct_init_field_ptr = .pl_node, - .array_init_anon = .pl_node, - .array_init = .pl_node, - .array_init_ref = .pl_node, - .validate_array_init_ty = .pl_node, - .validate_array_init_result_ty = .pl_node, - .validate_array_init_ref_ty = .pl_node, - .validate_ptr_array_init = .pl_node, - .array_init_elem_type = .bin, - .array_init_elem_ptr = .pl_node, - - .extended = .extended, - }); - }; - - // Uncomment to view how many tag slots are available. - //comptime { - // @compileLog("ZIR tags left: ", 256 - @typeInfo(Tag).Enum.fields.len); - //} - }; - - /// Rarer instructions are here; ones that do not fit in the 8-bit `Tag` enum. - /// `noreturn` instructions may not go here; they must be part of the main `Tag` enum. - pub const Extended = enum(u16) { - /// Declares a global variable. - /// `operand` is payload index to `ExtendedVar`. - /// `small` is `ExtendedVar.Small`. - variable, - /// A struct type definition. Contains references to ZIR instructions for - /// the field types, defaults, and alignments. - /// `operand` is payload index to `StructDecl`. - /// `small` is `StructDecl.Small`. - struct_decl, - /// An enum type definition. Contains references to ZIR instructions for - /// the field value expressions and optional type tag expression. - /// `operand` is payload index to `EnumDecl`. - /// `small` is `EnumDecl.Small`. - enum_decl, - /// A union type definition. Contains references to ZIR instructions for - /// the field types and optional type tag expression. - /// `operand` is payload index to `UnionDecl`. - /// `small` is `UnionDecl.Small`. - union_decl, - /// An opaque type definition. Contains references to decls and captures. - /// `operand` is payload index to `OpaqueDecl`. - /// `small` is `OpaqueDecl.Small`. - opaque_decl, - /// Implements the `@This` builtin. - /// `operand` is `src_node: i32`. - this, - /// Implements the `@returnAddress` builtin. - /// `operand` is `src_node: i32`. - ret_addr, - /// Implements the `@src` builtin. - /// `operand` is payload index to `LineColumn`. - builtin_src, - /// Implements the `@errorReturnTrace` builtin. - /// `operand` is `src_node: i32`. - error_return_trace, - /// Implements the `@frame` builtin. - /// `operand` is `src_node: i32`. - frame, - /// Implements the `@frameAddress` builtin. - /// `operand` is `src_node: i32`. - frame_address, - /// Same as `alloc` from `Tag` but may contain an alignment instruction. - /// `operand` is payload index to `AllocExtended`. - /// `small`: - /// * 0b000X - has type - /// * 0b00X0 - has alignment - /// * 0b0X00 - 1=const, 0=var - /// * 0bX000 - is comptime - alloc, - /// The `@extern` builtin. - /// `operand` is payload index to `BinNode`. - builtin_extern, - /// Inline assembly. - /// `small`: - /// * 0b00000000_000XXXXX - `outputs_len`. - /// * 0b000000XX_XXX00000 - `inputs_len`. - /// * 0b0XXXXX00_00000000 - `clobbers_len`. - /// * 0bX0000000_00000000 - is volatile - /// `operand` is payload index to `Asm`. - @"asm", - /// Same as `asm` except the assembly template is not a string literal but a comptime - /// expression. - /// The `asm_source` field of the Asm is not a null-terminated string - /// but instead a Ref. - asm_expr, - /// Log compile time variables and emit an error message. - /// `operand` is payload index to `NodeMultiOp`. - /// `small` is `operands_len`. - /// The AST node is the compile log builtin call. - compile_log, - /// The builtin `@TypeOf` which returns the type after Peer Type Resolution - /// of one or more params. - /// `operand` is payload index to `TypeOfPeer`. - /// `small` is `operands_len`. - /// The AST node is the builtin call. - typeof_peer, - /// Implements the `@min` builtin for more than 2 args. - /// `operand` is payload index to `NodeMultiOp`. - /// `small` is `operands_len`. - /// The AST node is the builtin call. - min_multi, - /// Implements the `@max` builtin for more than 2 args. - /// `operand` is payload index to `NodeMultiOp`. - /// `small` is `operands_len`. - /// The AST node is the builtin call. - max_multi, - /// Implements the `@addWithOverflow` builtin. - /// `operand` is payload index to `BinNode`. - /// `small` is unused. - add_with_overflow, - /// Implements the `@subWithOverflow` builtin. - /// `operand` is payload index to `BinNode`. - /// `small` is unused. - sub_with_overflow, - /// Implements the `@mulWithOverflow` builtin. - /// `operand` is payload index to `BinNode`. - /// `small` is unused. - mul_with_overflow, - /// Implements the `@shlWithOverflow` builtin. - /// `operand` is payload index to `BinNode`. - /// `small` is unused. - shl_with_overflow, - /// `operand` is payload index to `UnNode`. - c_undef, - /// `operand` is payload index to `UnNode`. - c_include, - /// `operand` is payload index to `BinNode`. - c_define, - /// `operand` is payload index to `UnNode`. - wasm_memory_size, - /// `operand` is payload index to `BinNode`. - wasm_memory_grow, - /// The `@prefetch` builtin. - /// `operand` is payload index to `BinNode`. - prefetch, - /// Implements the `@fence` builtin. - /// `operand` is payload index to `UnNode`. - fence, - /// Implement builtin `@setFloatMode`. - /// `operand` is payload index to `UnNode`. - set_float_mode, - /// Implement builtin `@setAlignStack`. - /// `operand` is payload index to `UnNode`. - set_align_stack, - /// Implements `@setCold`. - /// `operand` is payload index to `UnNode`. - set_cold, - /// Implements the `@errorCast` builtin. - /// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. - error_cast, - /// `operand` is payload index to `UnNode`. - await_nosuspend, - /// Implements `@breakpoint`. - /// `operand` is `src_node: i32`. - breakpoint, - /// Implements the `@select` builtin. - /// `operand` is payload index to `Select`. - select, - /// Implement builtin `@errToInt`. - /// `operand` is payload index to `UnNode`. - int_from_error, - /// Implement builtin `@errorFromInt`. - /// `operand` is payload index to `UnNode`. - error_from_int, - /// Implement builtin `@Type`. - /// `operand` is payload index to `UnNode`. - /// `small` contains `NameStrategy`. - reify, - /// Implements the `@asyncCall` builtin. - /// `operand` is payload index to `AsyncCall`. - builtin_async_call, - /// Implements the `@cmpxchgStrong` and `@cmpxchgWeak` builtins. - /// `small` 0=>weak 1=>strong - /// `operand` is payload index to `Cmpxchg`. - cmpxchg, - /// Implement builtin `@cVaArg`. - /// `operand` is payload index to `BinNode`. - c_va_arg, - /// Implement builtin `@cVaCopy`. - /// `operand` is payload index to `UnNode`. - c_va_copy, - /// Implement builtin `@cVaEnd`. - /// `operand` is payload index to `UnNode`. - c_va_end, - /// Implement builtin `@cVaStart`. - /// `operand` is `src_node: i32`. - c_va_start, - /// Implements the following builtins: - /// `@ptrCast`, `@alignCast`, `@addrSpaceCast`, `@constCast`, `@volatileCast`. - /// Represents an arbitrary nesting of the above builtins. Such a nesting is treated as a - /// single operation which can modify multiple components of a pointer type. - /// `operand` is payload index to `BinNode`. - /// `small` contains `FullPtrCastFlags`. - /// AST node is the root of the nested casts. - /// `lhs` is dest type, `rhs` is operand. - ptr_cast_full, - /// `operand` is payload index to `UnNode`. - /// `small` contains `FullPtrCastFlags`. - /// Guaranteed to only have flags where no explicit destination type is - /// required (const_cast and volatile_cast). - /// AST node is the root of the nested casts. - ptr_cast_no_dest, - /// Implements the `@workItemId` builtin. - /// `operand` is payload index to `UnNode`. - work_item_id, - /// Implements the `@workGroupSize` builtin. - /// `operand` is payload index to `UnNode`. - work_group_size, - /// Implements the `@workGroupId` builtin. - /// `operand` is payload index to `UnNode`. - work_group_id, - /// Implements the `@inComptime` builtin. - /// `operand` is `src_node: i32`. - in_comptime, - /// Restores the error return index to its last saved state in a given - /// block. If the block is `.none`, restores to the state from the point - /// of function entry. If the operand is not `.none`, the restore is - /// conditional on the operand value not being an error. - /// `operand` is payload index to `RestoreErrRetIndex`. - /// `small` is undefined. - restore_err_ret_index, - /// Used as a placeholder instruction which is just a dummy index for Sema to replace - /// with a specific value. For instance, this is used for the capture of an `errdefer`. - /// This should never appear in a body. - value_placeholder, - - pub const InstData = struct { - opcode: Extended, - small: u16, - operand: u32, - }; - }; - - /// The position of a ZIR instruction within the `Zir` instructions array. - pub const Index = enum(u32) { - /// ZIR is structured so that the outermost "main" struct of any file - /// is always at index 0. - main_struct_inst = 0, - ref_start_index = static_len, - _, - - pub const static_len = 84; - - pub fn toRef(i: Index) Inst.Ref { - return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i)); - } - - pub fn toOptional(i: Index) OptionalIndex { - return @enumFromInt(@intFromEnum(i)); - } - }; - - pub const OptionalIndex = enum(u32) { - /// ZIR is structured so that the outermost "main" struct of any file - /// is always at index 0. - main_struct_inst = 0, - ref_start_index = Index.static_len, - none = std.math.maxInt(u32), - _, - - pub fn unwrap(oi: OptionalIndex) ?Index { - return if (oi == .none) null else @enumFromInt(@intFromEnum(oi)); - } - }; - - /// A reference to ZIR instruction, or to an InternPool index, or neither. - /// - /// If the integer tag value is < InternPool.static_len, then it - /// corresponds to an InternPool index. Otherwise, this refers to a ZIR - /// instruction. - /// - /// The tag type is specified so that it is safe to bitcast between `[]u32` - /// and `[]Ref`. - pub const Ref = enum(u32) { - u0_type, - i0_type, - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u80_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - call_modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, - slice_const_u8_type, - slice_const_u8_sentinel_0_type, - optional_noreturn_type, - anyerror_void_error_union_type, - adhoc_inferred_error_set_type, - generic_poison_type, - empty_struct_type, - undef, - zero, - zero_usize, - zero_u8, - one, - one_usize, - one_u8, - four_u8, - negative_one, - calling_convention_c, - calling_convention_inline, - void_value, - unreachable_value, - null_value, - bool_true, - bool_false, - empty_struct, - generic_poison, - - /// This tag is here to match Air and InternPool, however it is unused - /// for ZIR purposes. - var_args_param_type = std.math.maxInt(u32) - 1, - /// This Ref does not correspond to any ZIR instruction or constant - /// value and may instead be used as a sentinel to indicate null. - none = std.math.maxInt(u32), - - _, - - pub fn toIndex(inst: Ref) ?Index { - assert(inst != .none); - const ref_int = @intFromEnum(inst); - if (ref_int >= @intFromEnum(Index.ref_start_index)) { - return @enumFromInt(ref_int - @intFromEnum(Index.ref_start_index)); - } else { - return null; - } - } - - pub fn toIndexAllowNone(inst: Ref) ?Index { - if (inst == .none) return null; - return toIndex(inst); - } - }; - - /// All instructions have an 8-byte payload, which is contained within - /// this union. `Tag` determines which union field is active, as well as - /// how to interpret the data within. - pub const Data = union { - /// Used for `Tag.extended`. The extended opcode determines the meaning - /// of the `small` and `operand` fields. - extended: Extended.InstData, - /// Used for unary operators, with an AST node source location. - un_node: struct { - /// Offset from Decl AST node index. - src_node: i32, - /// The meaning of this operand depends on the corresponding `Tag`. - operand: Ref, - - pub fn src(self: @This()) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }, - /// Used for unary operators, with a token source location. - un_tok: struct { - /// Offset from Decl AST token index. - src_tok: Ast.TokenIndex, - /// The meaning of this operand depends on the corresponding `Tag`. - operand: Ref, - - pub fn src(self: @This()) LazySrcLoc { - return .{ .token_offset = self.src_tok }; - } - }, - pl_node: struct { - /// Offset from Decl AST node index. - /// `Tag` determines which kind of AST node this points to. - src_node: i32, - /// index into extra. - /// `Tag` determines what lives there. - payload_index: u32, - - pub fn src(self: @This()) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }, - pl_tok: struct { - /// Offset from Decl AST token index. - src_tok: Ast.TokenIndex, - /// index into extra. - /// `Tag` determines what lives there. - payload_index: u32, - - pub fn src(self: @This()) LazySrcLoc { - return .{ .token_offset = self.src_tok }; - } - }, - bin: Bin, - /// For strings which may contain null bytes. - str: struct { - /// Offset into `string_bytes`. - start: NullTerminatedString, - /// Number of bytes in the string. - len: u32, - - pub fn get(self: @This(), code: Zir) []const u8 { - return code.string_bytes[@intFromEnum(self.start)..][0..self.len]; - } - }, - str_tok: struct { - /// Offset into `string_bytes`. Null-terminated. - start: NullTerminatedString, - /// Offset from Decl AST token index. - src_tok: u32, - - pub fn get(self: @This(), code: Zir) [:0]const u8 { - return code.nullTerminatedString(self.start); - } - - pub fn src(self: @This()) LazySrcLoc { - return .{ .token_offset = self.src_tok }; - } - }, - /// Offset from Decl AST token index. - tok: Ast.TokenIndex, - /// Offset from Decl AST node index. - node: i32, - int: u64, - float: f64, - ptr_type: struct { - flags: packed struct { - is_allowzero: bool, - is_mutable: bool, - is_volatile: bool, - has_sentinel: bool, - has_align: bool, - has_addrspace: bool, - has_bit_range: bool, - _: u1 = undefined, - }, - size: std.builtin.Type.Pointer.Size, - /// Index into extra. See `PtrType`. - payload_index: u32, - }, - int_type: struct { - /// Offset from Decl AST node index. - /// `Tag` determines which kind of AST node this points to. - src_node: i32, - signedness: std.builtin.Signedness, - bit_count: u16, - - pub fn src(self: @This()) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }, - @"unreachable": struct { - /// Offset from Decl AST node index. - /// `Tag` determines which kind of AST node this points to. - src_node: i32, - - pub fn src(self: @This()) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }, - @"break": struct { - operand: Ref, - payload_index: u32, - }, - dbg_stmt: LineColumn, - /// Used for unary operators which reference an inst, - /// with an AST node source location. - inst_node: struct { - /// Offset from Decl AST node index. - src_node: i32, - /// The meaning of this operand depends on the corresponding `Tag`. - inst: Index, - - pub fn src(self: @This()) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }, - str_op: struct { - /// Offset into `string_bytes`. Null-terminated. - str: NullTerminatedString, - operand: Ref, - - pub fn getStr(self: @This(), zir: Zir) [:0]const u8 { - return zir.nullTerminatedString(self.str); - } - }, - @"defer": struct { - index: u32, - len: u32, - }, - defer_err_code: struct { - err_code: Ref, - payload_index: u32, - }, - save_err_ret_index: struct { - operand: Ref, // If error type (or .none), save new trace index - }, - elem_val_imm: struct { - /// The indexable value being accessed. - operand: Ref, - /// The index being accessed. - idx: u32, - }, - - // Make sure we don't accidentally add a field to make this union - // bigger than expected. Note that in Debug builds, Zig is allowed - // to insert a secret field for safety checks. - comptime { - if (builtin.mode != .Debug and builtin.mode != .ReleaseSafe) { - assert(@sizeOf(Data) == 8); - } - } - - /// TODO this has to be kept in sync with `Data` which we want to be an untagged - /// union. There is some kind of language awkwardness here and it has to do with - /// deserializing an untagged union (in this case `Data`) from a file, and trying - /// to preserve the hidden safety field. - pub const FieldEnum = enum { - extended, - un_node, - un_tok, - pl_node, - pl_tok, - bin, - str, - str_tok, - tok, - node, - int, - float, - ptr_type, - int_type, - @"unreachable", - @"break", - dbg_stmt, - inst_node, - str_op, - @"defer", - defer_err_code, - save_err_ret_index, - elem_val_imm, - }; - }; - - pub const Break = struct { - pub const no_src_node = std.math.maxInt(i32); - - operand_src_node: i32, - block_inst: Index, - }; - - /// Trailing: - /// 0. Output for every outputs_len - /// 1. Input for every inputs_len - /// 2. clobber: NullTerminatedString // index into string_bytes (null terminated) for every clobbers_len. - pub const Asm = struct { - src_node: i32, - // null-terminated string index - asm_source: NullTerminatedString, - /// 1 bit for each outputs_len: whether it uses `-> T` or not. - /// 0b0 - operand is a pointer to where to store the output. - /// 0b1 - operand is a type; asm expression has the output as the result. - /// 0b0X is the first output, 0bX0 is the second, etc. - output_type_bits: u32, - - pub const Output = struct { - /// index into string_bytes (null terminated) - name: NullTerminatedString, - /// index into string_bytes (null terminated) - constraint: NullTerminatedString, - /// How to interpret this is determined by `output_type_bits`. - operand: Ref, - }; - - pub const Input = struct { - /// index into string_bytes (null terminated) - name: NullTerminatedString, - /// index into string_bytes (null terminated) - constraint: NullTerminatedString, - operand: Ref, - }; - }; - - /// Trailing: - /// if (ret_body_len == 1) { - /// 0. return_type: Ref - /// } - /// if (ret_body_len > 1) { - /// 1. return_type: Index // for each ret_body_len - /// } - /// 2. body: Index // for each body_len - /// 3. src_locs: SrcLocs // if body_len != 0 - /// 4. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype - pub const Func = struct { - /// If this is 0 it means a void return type. - /// If this is 1 it means return_type is a simple Ref - ret_body_len: u32, - /// Points to the block that contains the param instructions for this function. - /// If this is a `declaration`, it refers to the declaration's value body. - param_block: Index, - body_len: u32, - - pub const SrcLocs = struct { - /// Line index in the source file relative to the parent decl. - lbrace_line: u32, - /// Line index in the source file relative to the parent decl. - rbrace_line: u32, - /// lbrace_column is least significant bits u16 - /// rbrace_column is most significant bits u16 - columns: u32, - }; - }; - - /// Trailing: - /// 0. lib_name: NullTerminatedString, // null terminated string index, if has_lib_name is set - /// if (has_align_ref and !has_align_body) { - /// 1. align: Ref, - /// } - /// if (has_align_body) { - /// 2. align_body_len: u32 - /// 3. align_body: u32 // for each align_body_len - /// } - /// if (has_addrspace_ref and !has_addrspace_body) { - /// 4. addrspace: Ref, - /// } - /// if (has_addrspace_body) { - /// 5. addrspace_body_len: u32 - /// 6. addrspace_body: u32 // for each addrspace_body_len - /// } - /// if (has_section_ref and !has_section_body) { - /// 7. section: Ref, - /// } - /// if (has_section_body) { - /// 8. section_body_len: u32 - /// 9. section_body: u32 // for each section_body_len - /// } - /// if (has_cc_ref and !has_cc_body) { - /// 10. cc: Ref, - /// } - /// if (has_cc_body) { - /// 11. cc_body_len: u32 - /// 12. cc_body: u32 // for each cc_body_len - /// } - /// if (has_ret_ty_ref and !has_ret_ty_body) { - /// 13. ret_ty: Ref, - /// } - /// if (has_ret_ty_body) { - /// 14. ret_ty_body_len: u32 - /// 15. ret_ty_body: u32 // for each ret_ty_body_len - /// } - /// 16. noalias_bits: u32 // if has_any_noalias - /// - each bit starting with LSB corresponds to parameter indexes - /// 17. body: Index // for each body_len - /// 18. src_locs: Func.SrcLocs // if body_len != 0 - /// 19. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype - pub const FuncFancy = struct { - /// Points to the block that contains the param instructions for this function. - /// If this is a `declaration`, it refers to the declaration's value body. - param_block: Index, - body_len: u32, - bits: Bits, - - /// If both has_cc_ref and has_cc_body are false, it means auto calling convention. - /// If both has_align_ref and has_align_body are false, it means default alignment. - /// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type. - /// If both has_section_ref and has_section_body are false, it means default section. - /// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace. - pub const Bits = packed struct { - is_var_args: bool, - is_inferred_error: bool, - is_test: bool, - is_extern: bool, - is_noinline: bool, - has_align_ref: bool, - has_align_body: bool, - has_addrspace_ref: bool, - has_addrspace_body: bool, - has_section_ref: bool, - has_section_body: bool, - has_cc_ref: bool, - has_cc_body: bool, - has_ret_ty_ref: bool, - has_ret_ty_body: bool, - has_lib_name: bool, - has_any_noalias: bool, - _: u15 = undefined, - }; - }; - - /// Trailing: - /// 0. lib_name: NullTerminatedString, // null terminated string index, if has_lib_name is set - /// 1. align: Ref, // if has_align is set - /// 2. init: Ref // if has_init is set - /// The source node is obtained from the containing `block_inline`. - pub const ExtendedVar = struct { - var_type: Ref, - - pub const Small = packed struct { - has_lib_name: bool, - has_align: bool, - has_init: bool, - is_extern: bool, - is_const: bool, - is_threadlocal: bool, - _: u10 = undefined, - }; - }; - - /// This data is stored inside extra, with trailing operands according to `operands_len`. - /// Each operand is a `Ref`. - pub const MultiOp = struct { - operands_len: u32, - }; - - /// Trailing: operand: Ref, // for each `operands_len` (stored in `small`). - pub const NodeMultiOp = struct { - src_node: i32, - }; - - /// This data is stored inside extra, with trailing operands according to `body_len`. - /// Each operand is an `Index`. - pub const Block = struct { - body_len: u32, - }; - - /// Trailing: - /// * inst: Index // for each `body_len` - pub const BoolBr = struct { - lhs: Ref, - body_len: u32, - }; - - /// Trailing: - /// 0. doc_comment: u32 // if `has_doc_comment`; null-terminated string index - /// 1. align_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `align` - /// 2. linksection_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `linksection` - /// 3. addrspace_body_len: u32 // if `has_align_linksection_addrspace`; 0 means no `addrspace` - /// 4. value_body_inst: Zir.Inst.Index - /// - for each `value_body_len` - /// - body to be exited via `break_inline` to this `declaration` instruction - /// 5. align_body_inst: Zir.Inst.Index - /// - for each `align_body_len` - /// - body to be exited via `break_inline` to this `declaration` instruction - /// 6. linksection_body_inst: Zir.Inst.Index - /// - for each `linksection_body_len` - /// - body to be exited via `break_inline` to this `declaration` instruction - /// 7. addrspace_body_inst: Zir.Inst.Index - /// - for each `addrspace_body_len` - /// - body to be exited via `break_inline` to this `declaration` instruction - pub const Declaration = struct { - // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. - src_hash_0: u32, - src_hash_1: u32, - src_hash_2: u32, - src_hash_3: u32, - /// The name of this `Decl`. Also indicates whether it is a test, comptime block, etc. - name: Name, - /// This Decl's line number relative to that of its parent. - /// TODO: column must be encoded similarly to respect non-formatted code! - line_offset: u32, - flags: Flags, - - pub const Flags = packed struct(u32) { - value_body_len: u28, - is_pub: bool, - is_export: bool, - has_doc_comment: bool, - has_align_linksection_addrspace: bool, - }; - - pub const Name = enum(u32) { - @"comptime" = std.math.maxInt(u32), - @"usingnamespace" = std.math.maxInt(u32) - 1, - unnamed_test = std.math.maxInt(u32) - 2, - /// In this case, `has_doc_comment` will be true, and the doc - /// comment body is the identifier name. - decltest = std.math.maxInt(u32) - 3, - /// Other values are `NullTerminatedString` values, i.e. index into - /// `string_bytes`. If the byte referenced is 0, the decl is a named - /// test, and the actual name begins at the following byte. - _, - - pub fn isNamedTest(name: Name, zir: Zir) bool { - return switch (name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => false, - _ => zir.string_bytes[@intFromEnum(name)] == 0, - }; - } - pub fn toString(name: Name, zir: Zir) ?NullTerminatedString { - switch (name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => return null, - _ => {}, - } - const idx: u32 = @intFromEnum(name); - if (zir.string_bytes[idx] == 0) { - // Named test - return @enumFromInt(idx + 1); - } - return @enumFromInt(idx); - } - }; - - pub const Bodies = struct { - value_body: []const Index, - align_body: ?[]const Index, - linksection_body: ?[]const Index, - addrspace_body: ?[]const Index, - }; - - pub fn getBodies(declaration: Declaration, extra_end: u32, zir: Zir) Bodies { - var extra_index: u32 = extra_end; - extra_index += @intFromBool(declaration.flags.has_doc_comment); - const value_body_len = declaration.flags.value_body_len; - const align_body_len, const linksection_body_len, const addrspace_body_len = lens: { - if (!declaration.flags.has_align_linksection_addrspace) { - break :lens .{ 0, 0, 0 }; - } - const lens = zir.extra[extra_index..][0..3].*; - extra_index += 3; - break :lens lens; - }; - return .{ - .value_body = b: { - defer extra_index += value_body_len; - break :b zir.bodySlice(extra_index, value_body_len); - }, - .align_body = if (align_body_len == 0) null else b: { - defer extra_index += align_body_len; - break :b zir.bodySlice(extra_index, align_body_len); - }, - .linksection_body = if (linksection_body_len == 0) null else b: { - defer extra_index += linksection_body_len; - break :b zir.bodySlice(extra_index, linksection_body_len); - }, - .addrspace_body = if (addrspace_body_len == 0) null else b: { - defer extra_index += addrspace_body_len; - break :b zir.bodySlice(extra_index, addrspace_body_len); - }, - }; - } - }; - - /// Stored inside extra, with trailing arguments according to `args_len`. - /// Implicit 0. arg_0_start: u32, // always same as `args_len` - /// 1. arg_end: u32, // for each `args_len` - /// arg_N_start is the same as arg_N-1_end - pub const Call = struct { - // Note: Flags *must* come first so that unusedResultExpr - // can find it when it goes to modify them. - flags: Flags, - callee: Ref, - - pub const Flags = packed struct { - /// std.builtin.CallModifier in packed form - pub const PackedModifier = u3; - pub const PackedArgsLen = u27; - - packed_modifier: PackedModifier, - ensure_result_used: bool = false, - pop_error_return_trace: bool, - args_len: PackedArgsLen, - - comptime { - if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) - @compileError("Layout of Call.Flags needs to be updated!"); - if (@bitSizeOf(std.builtin.CallModifier) != @bitSizeOf(PackedModifier)) - @compileError("Call.Flags.PackedModifier needs to be updated!"); - } - }; - }; - - /// Stored inside extra, with trailing arguments according to `args_len`. - /// Implicit 0. arg_0_start: u32, // always same as `args_len` - /// 1. arg_end: u32, // for each `args_len` - /// arg_N_start is the same as arg_N-1_end - pub const FieldCall = struct { - // Note: Flags *must* come first so that unusedResultExpr - // can find it when it goes to modify them. - flags: Call.Flags, - obj_ptr: Ref, - /// Offset into `string_bytes`. - field_name_start: NullTerminatedString, - }; - - pub const TypeOfPeer = struct { - src_node: i32, - body_len: u32, - body_index: u32, - }; - - pub const BuiltinCall = struct { - // Note: Flags *must* come first so that unusedResultExpr - // can find it when it goes to modify them. - flags: Flags, - modifier: Ref, - callee: Ref, - args: Ref, - - pub const Flags = packed struct { - is_nosuspend: bool, - ensure_result_used: bool, - _: u30 = undefined, - - comptime { - if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) - @compileError("Layout of BuiltinCall.Flags needs to be updated!"); - } - }; - }; - - /// This data is stored inside extra, with two sets of trailing `Ref`: - /// * 0. the then body, according to `then_body_len`. - /// * 1. the else body, according to `else_body_len`. - pub const CondBr = struct { - condition: Ref, - then_body_len: u32, - else_body_len: u32, - }; - - /// This data is stored inside extra, trailed by: - /// * 0. body: Index // for each `body_len`. - pub const Try = struct { - /// The error union to unwrap. - operand: Ref, - body_len: u32, - }; - - /// Stored in extra. Depending on the flags in Data, there will be up to 5 - /// trailing Ref fields: - /// 0. sentinel: Ref // if `has_sentinel` flag is set - /// 1. align: Ref // if `has_align` flag is set - /// 2. address_space: Ref // if `has_addrspace` flag is set - /// 3. bit_start: Ref // if `has_bit_range` flag is set - /// 4. host_size: Ref // if `has_bit_range` flag is set - pub const PtrType = struct { - elem_type: Ref, - src_node: i32, - }; - - pub const ArrayTypeSentinel = struct { - len: Ref, - sentinel: Ref, - elem_type: Ref, - }; - - pub const SliceStart = struct { - lhs: Ref, - start: Ref, - }; - - pub const SliceEnd = struct { - lhs: Ref, - start: Ref, - end: Ref, - }; - - pub const SliceSentinel = struct { - lhs: Ref, - start: Ref, - end: Ref, - sentinel: Ref, - }; - - pub const SliceLength = struct { - lhs: Ref, - start: Ref, - len: Ref, - sentinel: Ref, - start_src_node_offset: i32, - }; - - /// The meaning of these operands depends on the corresponding `Tag`. - pub const Bin = struct { - lhs: Ref, - rhs: Ref, - }; - - pub const BinNode = struct { - node: i32, - lhs: Ref, - rhs: Ref, - }; - - pub const UnNode = struct { - node: i32, - operand: Ref, - }; - - pub const ElemPtrImm = struct { - ptr: Ref, - index: u32, - }; - - pub const SwitchBlockErrUnion = struct { - operand: Ref, - bits: Bits, - main_src_node_offset: i32, - - pub const Bits = packed struct(u32) { - /// If true, one or more prongs have multiple items. - has_multi_cases: bool, - /// If true, there is an else prong. This is mutually exclusive with `has_under`. - has_else: bool, - any_uses_err_capture: bool, - payload_is_ref: bool, - scalar_cases_len: ScalarCasesLen, - - pub const ScalarCasesLen = u28; - }; - - pub const MultiProng = struct { - items: []const Ref, - body: []const Index, - }; - }; - - /// 0. multi_cases_len: u32 // If has_multi_cases is set. - /// 1. tag_capture_inst: u32 // If any_has_tag_capture is set. Index of instruction prongs use to refer to the inline tag capture. - /// 2. else_body { // If has_else or has_under is set. - /// info: ProngInfo, - /// body member Index for every info.body_len - /// } - /// 3. scalar_cases: { // for every scalar_cases_len - /// item: Ref, - /// info: ProngInfo, - /// body member Index for every info.body_len - /// } - /// 4. multi_cases: { // for every multi_cases_len - /// items_len: u32, - /// ranges_len: u32, - /// info: ProngInfo, - /// item: Ref // for every items_len - /// ranges: { // for every ranges_len - /// item_first: Ref, - /// item_last: Ref, - /// } - /// body member Index for every info.body_len - /// } - /// - /// When analyzing a case body, the switch instruction itself refers to the - /// captured payload. Whether this is captured by reference or by value - /// depends on whether the `byref` bit is set for the corresponding body. - pub const SwitchBlock = struct { - /// The operand passed to the `switch` expression. If this is a - /// `switch_block`, this is the operand value; if `switch_block_ref` it - /// is a pointer to the operand. `switch_block_ref` is always used if - /// any prong has a byref capture. - operand: Ref, - bits: Bits, - - /// These are stored in trailing data in `extra` for each prong. - pub const ProngInfo = packed struct(u32) { - body_len: u28, - capture: Capture, - is_inline: bool, - has_tag_capture: bool, - - pub const Capture = enum(u2) { - none, - by_val, - by_ref, - }; - }; - - pub const Bits = packed struct(u32) { - /// If true, one or more prongs have multiple items. - has_multi_cases: bool, - /// If true, there is an else prong. This is mutually exclusive with `has_under`. - has_else: bool, - /// If true, there is an underscore prong. This is mutually exclusive with `has_else`. - has_under: bool, - /// If true, at least one prong has an inline tag capture. - any_has_tag_capture: bool, - scalar_cases_len: ScalarCasesLen, - - pub const ScalarCasesLen = u28; - - pub fn specialProng(bits: Bits) SpecialProng { - const has_else: u2 = @intFromBool(bits.has_else); - const has_under: u2 = @intFromBool(bits.has_under); - return switch ((has_else << 1) | has_under) { - 0b00 => .none, - 0b01 => .under, - 0b10 => .@"else", - 0b11 => unreachable, - }; - } - }; - - pub const MultiProng = struct { - items: []const Ref, - body: []const Index, - }; - }; - - pub const ArrayInitRefTy = struct { - ptr_ty: Ref, - elem_count: u32, - }; - - pub const Field = struct { - lhs: Ref, - /// Offset into `string_bytes`. - field_name_start: NullTerminatedString, - }; - - pub const FieldNamed = struct { - lhs: Ref, - field_name: Ref, - }; - - pub const As = struct { - dest_type: Ref, - operand: Ref, - }; - - /// Trailing: - /// 0. fields_len: u32, // if has_fields_len - /// 1. decls_len: u32, // if has_decls_len - /// 2. backing_int_body_len: u32, // if has_backing_int - /// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 - /// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 - /// 5. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 6. flags: u32 // for every 8 fields - /// - sets of 4 bits: - /// 0b000X: whether corresponding field has an align expression - /// 0b00X0: whether corresponding field has a default expression - /// 0b0X00: whether corresponding field is comptime - /// 0bX000: whether corresponding field has a type expression - /// 7. fields: { // for every fields_len - /// field_name: u32, // if !is_tuple - /// doc_comment: NullTerminatedString, // .empty if no doc comment - /// field_type: Ref, // if corresponding bit is not set. none means anytype. - /// field_type_body_len: u32, // if corresponding bit is set - /// align_body_len: u32, // if corresponding bit is set - /// init_body_len: u32, // if corresponding bit is set - /// } - /// 8. bodies: { // for every fields_len - /// field_type_body_inst: Inst, // for each field_type_body_len - /// align_body_inst: Inst, // for each align_body_len - /// init_body_inst: Inst, // for each init_body_len - /// } - pub const StructDecl = struct { - // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. - // This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc). - fields_hash_0: u32, - fields_hash_1: u32, - fields_hash_2: u32, - fields_hash_3: u32, - src_node: i32, - - pub fn src(self: StructDecl) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - - pub const Small = packed struct { - has_fields_len: bool, - has_decls_len: bool, - has_backing_int: bool, - known_non_opv: bool, - known_comptime_only: bool, - is_tuple: bool, - name_strategy: NameStrategy, - layout: std.builtin.Type.ContainerLayout, - any_default_inits: bool, - any_comptime_fields: bool, - any_aligned_fields: bool, - _: u3 = undefined, - }; - }; - - pub const NameStrategy = enum(u2) { - /// Use the same name as the parent declaration name. - /// e.g. `const Foo = struct {...};`. - parent, - /// Use the name of the currently executing comptime function call, - /// with the current parameters. e.g. `ArrayList(i32)`. - func, - /// Create an anonymous name for this declaration. - /// Like this: "ParentDeclName_struct_69" - anon, - /// Use the name specified in the next `dbg_var_{val,ptr}` instruction. - dbg_var, - }; - - pub const FullPtrCastFlags = packed struct(u5) { - ptr_cast: bool = false, - align_cast: bool = false, - addrspace_cast: bool = false, - const_cast: bool = false, - volatile_cast: bool = false, - - pub inline fn needResultTypeBuiltinName(flags: FullPtrCastFlags) []const u8 { - if (flags.ptr_cast) return "@ptrCast"; - if (flags.align_cast) return "@alignCast"; - if (flags.addrspace_cast) return "@addrSpaceCast"; - unreachable; - } - }; - - /// Trailing: - /// 0. tag_type: Ref, // if has_tag_type - /// 1. body_len: u32, // if has_body_len - /// 2. fields_len: u32, // if has_fields_len - /// 3. decls_len: u32, // if has_decls_len - /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 5. inst: Index // for every body_len - /// 6. has_bits: u32 // for every 32 fields - /// - the bit is whether corresponding field has an value expression - /// 7. fields: { // for every fields_len - /// field_name: u32, - /// doc_comment: u32, // .empty if no doc_comment - /// value: Ref, // if corresponding bit is set - /// } - pub const EnumDecl = struct { - // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. - // This hash contains the source of all fields, and the backing type if specified. - fields_hash_0: u32, - fields_hash_1: u32, - fields_hash_2: u32, - fields_hash_3: u32, - src_node: i32, - - pub fn src(self: EnumDecl) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - - pub const Small = packed struct { - has_tag_type: bool, - has_body_len: bool, - has_fields_len: bool, - has_decls_len: bool, - name_strategy: NameStrategy, - nonexhaustive: bool, - _: u9 = undefined, - }; - }; - - /// Trailing: - /// 0. tag_type: Ref, // if has_tag_type - /// 1. body_len: u32, // if has_body_len - /// 2. fields_len: u32, // if has_fields_len - /// 3. decls_len: u32, // if has_decls_len - /// 4. decl: Index, // for every decls_len; points to a `declaration` instruction - /// 5. inst: Index // for every body_len - /// 6. has_bits: u32 // for every 8 fields - /// - sets of 4 bits: - /// 0b000X: whether corresponding field has a type expression - /// 0b00X0: whether corresponding field has a align expression - /// 0b0X00: whether corresponding field has a tag value expression - /// 0bX000: unused - /// 7. fields: { // for every fields_len - /// field_name: NullTerminatedString, // null terminated string index - /// doc_comment: NullTerminatedString, // .empty if no doc comment - /// field_type: Ref, // if corresponding bit is set - /// - if none, means `anytype`. - /// align: Ref, // if corresponding bit is set - /// tag_value: Ref, // if corresponding bit is set - /// } - pub const UnionDecl = struct { - // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. - // This hash contains the source of all fields, and any specified attributes (`extern` etc). - fields_hash_0: u32, - fields_hash_1: u32, - fields_hash_2: u32, - fields_hash_3: u32, - src_node: i32, - - pub fn src(self: UnionDecl) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - - pub const Small = packed struct { - has_tag_type: bool, - has_body_len: bool, - has_fields_len: bool, - has_decls_len: bool, - name_strategy: NameStrategy, - layout: std.builtin.Type.ContainerLayout, - /// has_tag_type | auto_enum_tag | result - /// ------------------------------------- - /// false | false | union { } - /// false | true | union(enum) { } - /// true | true | union(enum(T)) { } - /// true | false | union(T) { } - auto_enum_tag: bool, - any_aligned_fields: bool, - _: u6 = undefined, - }; - }; - - /// Trailing: - /// 0. decls_len: u32, // if has_decls_len - /// 1. decl: Index, // for every decls_len; points to a `declaration` instruction - pub const OpaqueDecl = struct { - src_node: i32, - - pub fn src(self: OpaqueDecl) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - - pub const Small = packed struct { - has_decls_len: bool, - name_strategy: NameStrategy, - _: u13 = undefined, - }; - }; - - /// Trailing: - /// { // for every fields_len - /// field_name: NullTerminatedString // null terminated string index - /// doc_comment: NullTerminatedString // null terminated string index - /// } - pub const ErrorSetDecl = struct { - fields_len: u32, - }; - - /// A f128 value, broken up into 4 u32 parts. - pub const Float128 = struct { - piece0: u32, - piece1: u32, - piece2: u32, - piece3: u32, - - pub fn get(self: Float128) f128 { - const int_bits = @as(u128, self.piece0) | - (@as(u128, self.piece1) << 32) | - (@as(u128, self.piece2) << 64) | - (@as(u128, self.piece3) << 96); - return @as(f128, @bitCast(int_bits)); - } - }; - - /// Trailing is an item per field. - pub const StructInit = struct { - fields_len: u32, - - pub const Item = struct { - /// The `struct_init_field_type` ZIR instruction for this field init. - field_type: Index, - /// The field init expression to be used as the field value. This value will be coerced - /// to the field type if not already. - init: Ref, - }; - }; - - /// Trailing is an Item per field. - /// TODO make this instead array of inits followed by array of names because - /// it will be simpler Sema code and better for CPU cache. - pub const StructInitAnon = struct { - fields_len: u32, - - pub const Item = struct { - /// Null-terminated string table index. - field_name: NullTerminatedString, - /// The field init expression to be used as the field value. - init: Ref, - }; - }; - - pub const FieldType = struct { - container_type: Ref, - /// Offset into `string_bytes`, null terminated. - name_start: NullTerminatedString, - }; - - pub const FieldTypeRef = struct { - container_type: Ref, - field_name: Ref, - }; - - pub const Cmpxchg = struct { - node: i32, - ptr: Ref, - expected_value: Ref, - new_value: Ref, - success_order: Ref, - failure_order: Ref, - }; - - pub const AtomicRmw = struct { - ptr: Ref, - operation: Ref, - operand: Ref, - ordering: Ref, - }; - - pub const UnionInit = struct { - union_type: Ref, - field_name: Ref, - init: Ref, - }; - - pub const AtomicStore = struct { - ptr: Ref, - operand: Ref, - ordering: Ref, - }; - - pub const AtomicLoad = struct { - elem_type: Ref, - ptr: Ref, - ordering: Ref, - }; - - pub const MulAdd = struct { - mulend1: Ref, - mulend2: Ref, - addend: Ref, - }; - - pub const FieldParentPtr = struct { - parent_type: Ref, - field_name: Ref, - field_ptr: Ref, - }; - - pub const Shuffle = struct { - elem_type: Ref, - a: Ref, - b: Ref, - mask: Ref, - }; - - pub const Select = struct { - node: i32, - elem_type: Ref, - pred: Ref, - a: Ref, - b: Ref, - }; - - pub const AsyncCall = struct { - node: i32, - frame_buffer: Ref, - result_ptr: Ref, - fn_ptr: Ref, - args: Ref, - }; - - /// Trailing: inst: Index // for every body_len - pub const Param = struct { - /// Null-terminated string index. - name: NullTerminatedString, - /// Null-terminated string index. - doc_comment: NullTerminatedString, - /// The body contains the type of the parameter. - body_len: u32, - }; - - /// Trailing: - /// 0. type_inst: Ref, // if small 0b000X is set - /// 1. align_inst: Ref, // if small 0b00X0 is set - pub const AllocExtended = struct { - src_node: i32, - - pub const Small = packed struct { - has_type: bool, - has_align: bool, - is_const: bool, - is_comptime: bool, - _: u12 = undefined, - }; - }; - - pub const Export = struct { - /// If present, this is referring to a Decl via field access, e.g. `a.b`. - /// If omitted, this is referring to a Decl via identifier, e.g. `a`. - namespace: Ref, - /// Null-terminated string index. - decl_name: NullTerminatedString, - options: Ref, - }; - - pub const ExportValue = struct { - /// The comptime value to export. - operand: Ref, - options: Ref, - }; - - /// Trailing: `CompileErrors.Item` for each `items_len`. - pub const CompileErrors = struct { - items_len: u32, - - /// Trailing: `note_payload_index: u32` for each `notes_len`. - /// It's a payload index of another `Item`. - pub const Item = struct { - /// null terminated string index - msg: NullTerminatedString, - node: Ast.Node.Index, - /// If node is 0 then this will be populated. - token: Ast.TokenIndex, - /// Can be used in combination with `token`. - byte_offset: u32, - /// 0 or a payload index of a `Block`, each is a payload - /// index of another `Item`. - notes: u32, - - pub fn notesLen(item: Item, zir: Zir) u32 { - if (item.notes == 0) return 0; - const block = zir.extraData(Block, item.notes); - return block.data.body_len; - } - }; - }; - - /// Trailing: for each `imports_len` there is an Item - pub const Imports = struct { - imports_len: u32, - - pub const Item = struct { - /// null terminated string index - name: NullTerminatedString, - /// points to the import name - token: Ast.TokenIndex, - }; - }; - - pub const LineColumn = struct { - line: u32, - column: u32, - }; - - pub const ArrayInit = struct { - ty: Ref, - init_count: u32, - }; - - pub const Src = struct { - node: i32, - line: u32, - column: u32, - }; - - pub const DeferErrCode = struct { - remapped_err_code: Index, - index: u32, - len: u32, - }; - - pub const ValidateDestructure = struct { - /// The value being destructured. - operand: Ref, - /// The `destructure_assign` node. - destructure_node: i32, - /// The expected field count. - expect_len: u32, - }; - - pub const ArrayMul = struct { - /// The result type of the array multiplication operation, or `.none` if none was available. - res_ty: Ref, - /// The LHS of the array multiplication. - lhs: Ref, - /// The RHS of the array multiplication. - rhs: Ref, - }; - - pub const RestoreErrRetIndex = struct { - src_node: i32, - /// If `.none`, restore the trace to its state upon function entry. - block: Ref, - /// If `.none`, restore unconditionally. - operand: Ref, - - pub fn src(self: RestoreErrRetIndex) LazySrcLoc { - return LazySrcLoc.nodeOffset(self.src_node); - } - }; -}; - -pub const SpecialProng = enum { none, @"else", under }; - -pub const DeclIterator = struct { - extra_index: u32, - decls_remaining: u32, - zir: Zir, - - pub fn next(it: *DeclIterator) ?Inst.Index { - if (it.decls_remaining == 0) return null; - const decl_inst: Zir.Inst.Index = @enumFromInt(it.zir.extra[it.extra_index]); - it.extra_index += 1; - it.decls_remaining -= 1; - assert(it.zir.instructions.items(.tag)[@intFromEnum(decl_inst)] == .declaration); - return decl_inst; - } -}; - -pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { - const tags = zir.instructions.items(.tag); - const datas = zir.instructions.items(.data); - switch (tags[@intFromEnum(decl_inst)]) { - // Functions are allowed and yield no iterations. - // There is one case matching this in the extended instruction set below. - .func, .func_inferred, .func_fancy => return .{ - .extra_index = undefined, - .decls_remaining = 0, - .zir = zir, - }, - - .extended => { - const extended = datas[@intFromEnum(decl_inst)].extended; - switch (extended.opcode) { - .struct_decl => { - const small: Inst.StructDecl.Small = @bitCast(extended.small); - var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) decls_len: { - const decls_len = zir.extra[extra_index]; - extra_index += 1; - break :decls_len decls_len; - } else 0; - - if (small.has_backing_int) { - const backing_int_body_len = zir.extra[extra_index]; - extra_index += 1; // backing_int_body_len - if (backing_int_body_len == 0) { - extra_index += 1; // backing_int_ref - } else { - extra_index += backing_int_body_len; // backing_int_body_inst - } - } - - return .{ - .extra_index = extra_index, - .decls_remaining = decls_len, - .zir = zir, - }; - }, - .enum_decl => { - const small: Inst.EnumDecl.Small = @bitCast(extended.small); - var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_tag_type); - extra_index += @intFromBool(small.has_body_len); - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) decls_len: { - const decls_len = zir.extra[extra_index]; - extra_index += 1; - break :decls_len decls_len; - } else 0; - - return .{ - .extra_index = extra_index, - .decls_remaining = decls_len, - .zir = zir, - }; - }, - .union_decl => { - const small: Inst.UnionDecl.Small = @bitCast(extended.small); - var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len); - extra_index += @intFromBool(small.has_tag_type); - extra_index += @intFromBool(small.has_body_len); - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) decls_len: { - const decls_len = zir.extra[extra_index]; - extra_index += 1; - break :decls_len decls_len; - } else 0; - - return .{ - .extra_index = extra_index, - .decls_remaining = decls_len, - .zir = zir, - }; - }, - .opaque_decl => { - const small: Inst.OpaqueDecl.Small = @bitCast(extended.small); - var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.OpaqueDecl).Struct.fields.len); - const decls_len = if (small.has_decls_len) decls_len: { - const decls_len = zir.extra[extra_index]; - extra_index += 1; - break :decls_len decls_len; - } else 0; - - return .{ - .extra_index = extra_index, - .decls_remaining = decls_len, - .zir = zir, - }; - }, - else => unreachable, - } - }, - else => unreachable, - } -} - -/// The iterator would have to allocate memory anyway to iterate. So here we populate -/// an ArrayList as the result. -pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_inst: Zir.Inst.Index) !void { - list.clearRetainingCapacity(); - const declaration, const extra_end = zir.getDeclaration(decl_inst); - const bodies = declaration.getBodies(extra_end, zir); - - try zir.findDeclsBody(list, bodies.value_body); - if (bodies.align_body) |b| try zir.findDeclsBody(list, b); - if (bodies.linksection_body) |b| try zir.findDeclsBody(list, b); - if (bodies.addrspace_body) |b| try zir.findDeclsBody(list, b); -} - -fn findDeclsInner( - zir: Zir, - list: *std.ArrayList(Inst.Index), - inst: Inst.Index, -) Allocator.Error!void { - const tags = zir.instructions.items(.tag); - const datas = zir.instructions.items(.data); - - switch (tags[@intFromEnum(inst)]) { - // Functions instructions are interesting and have a body. - .func, - .func_inferred, - => { - try list.append(inst); - - const inst_data = datas[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Func, inst_data.payload_index); - var extra_index: usize = extra.end; - switch (extra.data.ret_body_len) { - 0 => {}, - 1 => extra_index += 1, - else => { - const body = zir.bodySlice(extra_index, extra.data.ret_body_len); - extra_index += body.len; - try zir.findDeclsBody(list, body); - }, - } - const body = zir.bodySlice(extra_index, extra.data.body_len); - return zir.findDeclsBody(list, body); - }, - .func_fancy => { - try list.append(inst); - - const inst_data = datas[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); - var extra_index: usize = extra.end; - extra_index += @intFromBool(extra.data.bits.has_lib_name); - - if (extra.data.bits.has_align_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); - extra_index += body.len; - } else if (extra.data.bits.has_align_ref) { - extra_index += 1; - } - - if (extra.data.bits.has_addrspace_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); - extra_index += body.len; - } else if (extra.data.bits.has_addrspace_ref) { - extra_index += 1; - } - - if (extra.data.bits.has_section_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); - extra_index += body.len; - } else if (extra.data.bits.has_section_ref) { - extra_index += 1; - } - - if (extra.data.bits.has_cc_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); - extra_index += body.len; - } else if (extra.data.bits.has_cc_ref) { - extra_index += 1; - } - - if (extra.data.bits.has_ret_ty_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - try zir.findDeclsBody(list, body); - extra_index += body.len; - } else if (extra.data.bits.has_ret_ty_ref) { - extra_index += 1; - } - - extra_index += @intFromBool(extra.data.bits.has_any_noalias); - - const body = zir.bodySlice(extra_index, extra.data.body_len); - return zir.findDeclsBody(list, body); - }, - .extended => { - const extended = datas[@intFromEnum(inst)].extended; - switch (extended.opcode) { - - // Decl instructions are interesting but have no body. - // TODO yes they do have a body actually. recurse over them just like block instructions. - .struct_decl, - .union_decl, - .enum_decl, - .opaque_decl, - => return list.append(inst), - - else => return, - } - }, - - // Block instructions, recurse over the bodies. - - .block, .block_comptime, .block_inline => { - const inst_data = datas[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Block, inst_data.payload_index); - const body = zir.bodySlice(extra.end, extra.data.body_len); - return zir.findDeclsBody(list, body); - }, - .condbr, .condbr_inline => { - const inst_data = datas[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.CondBr, inst_data.payload_index); - const then_body = zir.bodySlice(extra.end, extra.data.then_body_len); - const else_body = zir.bodySlice(extra.end + then_body.len, extra.data.else_body_len); - try zir.findDeclsBody(list, then_body); - try zir.findDeclsBody(list, else_body); - }, - .@"try", .try_ptr => { - const inst_data = datas[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Try, inst_data.payload_index); - const body = zir.bodySlice(extra.end, extra.data.body_len); - try zir.findDeclsBody(list, body); - }, - .switch_block => return findDeclsSwitch(zir, list, inst), - - .suspend_block => @panic("TODO iterate suspend block"), - - else => return, // Regular instruction, not interesting. - } -} - -fn findDeclsSwitch( - zir: Zir, - list: *std.ArrayList(Inst.Index), - inst: Inst.Index, -) Allocator.Error!void { - const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index); - - var extra_index: usize = extra.end; - - const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { - const multi_cases_len = zir.extra[extra_index]; - extra_index += 1; - break :blk multi_cases_len; - } else 0; - - const special_prong = extra.data.bits.specialProng(); - if (special_prong != .none) { - const body_len: u31 = @truncate(zir.extra[extra_index]); - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - extra_index += body.len; - - try zir.findDeclsBody(list, body); - } - - { - const scalar_cases_len = extra.data.bits.scalar_cases_len; - for (0..scalar_cases_len) |_| { - extra_index += 1; - const body_len: u31 = @truncate(zir.extra[extra_index]); - extra_index += 1; - const body = zir.bodySlice(extra_index, body_len); - extra_index += body_len; - - try zir.findDeclsBody(list, body); - } - } - { - for (0..multi_cases_len) |_| { - const items_len = zir.extra[extra_index]; - extra_index += 1; - const ranges_len = zir.extra[extra_index]; - extra_index += 1; - const body_len: u31 = @truncate(zir.extra[extra_index]); - extra_index += 1; - const items = zir.refSlice(extra_index, items_len); - extra_index += items_len; - _ = items; - - var range_i: usize = 0; - while (range_i < ranges_len) : (range_i += 1) { - extra_index += 1; - extra_index += 1; - } - - const body = zir.bodySlice(extra_index, body_len); - extra_index += body_len; - - try zir.findDeclsBody(list, body); - } - } -} - -fn findDeclsBody( - zir: Zir, - list: *std.ArrayList(Inst.Index), - body: []const Inst.Index, -) Allocator.Error!void { - for (body) |member| { - try zir.findDeclsInner(list, member); - } -} - -pub const FnInfo = struct { - param_body: []const Inst.Index, - param_body_inst: Inst.Index, - ret_ty_body: []const Inst.Index, - body: []const Inst.Index, - ret_ty_ref: Zir.Inst.Ref, - total_params_len: u32, -}; - -pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const Zir.Inst.Index { - const tags = zir.instructions.items(.tag); - const datas = zir.instructions.items(.data); - const inst_data = datas[@intFromEnum(fn_inst)].pl_node; - - const param_block_index = switch (tags[@intFromEnum(fn_inst)]) { - .func, .func_inferred => blk: { - const extra = zir.extraData(Inst.Func, inst_data.payload_index); - break :blk extra.data.param_block; - }, - .func_fancy => blk: { - const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); - break :blk extra.data.param_block; - }, - else => unreachable, - }; - - switch (tags[@intFromEnum(param_block_index)]) { - .block, .block_comptime, .block_inline => { - const param_block = zir.extraData(Inst.Block, datas[@intFromEnum(param_block_index)].pl_node.payload_index); - return zir.bodySlice(param_block.end, param_block.data.body_len); - }, - .declaration => { - const decl, const extra_end = zir.getDeclaration(param_block_index); - return decl.getBodies(extra_end, zir).value_body; - }, - else => unreachable, - } -} - -pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { - const tags = zir.instructions.items(.tag); - const datas = zir.instructions.items(.data); - const info: struct { - param_block: Inst.Index, - body: []const Inst.Index, - ret_ty_ref: Inst.Ref, - ret_ty_body: []const Inst.Index, - } = switch (tags[@intFromEnum(fn_inst)]) { - .func, .func_inferred => blk: { - const inst_data = datas[@intFromEnum(fn_inst)].pl_node; - const extra = zir.extraData(Inst.Func, inst_data.payload_index); - - var extra_index: usize = extra.end; - var ret_ty_ref: Inst.Ref = .none; - var ret_ty_body: []const Inst.Index = &.{}; - - switch (extra.data.ret_body_len) { - 0 => { - ret_ty_ref = .void_type; - }, - 1 => { - ret_ty_ref = @enumFromInt(zir.extra[extra_index]); - extra_index += 1; - }, - else => { - ret_ty_body = zir.bodySlice(extra_index, extra.data.ret_body_len); - extra_index += ret_ty_body.len; - }, - } - - const body = zir.bodySlice(extra_index, extra.data.body_len); - extra_index += body.len; - - break :blk .{ - .param_block = extra.data.param_block, - .ret_ty_ref = ret_ty_ref, - .ret_ty_body = ret_ty_body, - .body = body, - }; - }, - .func_fancy => blk: { - const inst_data = datas[@intFromEnum(fn_inst)].pl_node; - const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); - - var extra_index: usize = extra.end; - var ret_ty_ref: Inst.Ref = .void_type; - var ret_ty_body: []const Inst.Index = &.{}; - - extra_index += @intFromBool(extra.data.bits.has_lib_name); - if (extra.data.bits.has_align_body) { - extra_index += zir.extra[extra_index] + 1; - } else if (extra.data.bits.has_align_ref) { - extra_index += 1; - } - if (extra.data.bits.has_addrspace_body) { - extra_index += zir.extra[extra_index] + 1; - } else if (extra.data.bits.has_addrspace_ref) { - extra_index += 1; - } - if (extra.data.bits.has_section_body) { - extra_index += zir.extra[extra_index] + 1; - } else if (extra.data.bits.has_section_ref) { - extra_index += 1; - } - if (extra.data.bits.has_cc_body) { - extra_index += zir.extra[extra_index] + 1; - } else if (extra.data.bits.has_cc_ref) { - extra_index += 1; - } - if (extra.data.bits.has_ret_ty_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1; - ret_ty_body = zir.bodySlice(extra_index, body_len); - extra_index += ret_ty_body.len; - } else if (extra.data.bits.has_ret_ty_ref) { - ret_ty_ref = @enumFromInt(zir.extra[extra_index]); - extra_index += 1; - } - - extra_index += @intFromBool(extra.data.bits.has_any_noalias); - - const body = zir.bodySlice(extra_index, extra.data.body_len); - extra_index += body.len; - break :blk .{ - .param_block = extra.data.param_block, - .ret_ty_ref = ret_ty_ref, - .ret_ty_body = ret_ty_body, - .body = body, - }; - }, - else => unreachable, - }; - const param_body = switch (tags[@intFromEnum(info.param_block)]) { - .block, .block_comptime, .block_inline => param_body: { - const param_block = zir.extraData(Inst.Block, datas[@intFromEnum(info.param_block)].pl_node.payload_index); - break :param_body zir.bodySlice(param_block.end, param_block.data.body_len); - }, - .declaration => param_body: { - const decl, const extra_end = zir.getDeclaration(info.param_block); - break :param_body decl.getBodies(extra_end, zir).value_body; - }, - else => unreachable, - }; - var total_params_len: u32 = 0; - for (param_body) |inst| { - switch (tags[@intFromEnum(inst)]) { - .param, .param_comptime, .param_anytype, .param_anytype_comptime => { - total_params_len += 1; - }, - else => continue, - } - } - return .{ - .param_body = param_body, - .param_body_inst = info.param_block, - .ret_ty_body = info.ret_ty_body, - .ret_ty_ref = info.ret_ty_ref, - .body = info.body, - .total_params_len = total_params_len, - }; -} - -pub fn getDeclaration(zir: Zir, inst: Zir.Inst.Index) struct { Inst.Declaration, u32 } { - assert(zir.instructions.items(.tag)[@intFromEnum(inst)] == .declaration); - const pl_node = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Declaration, pl_node.payload_index); - return .{ - extra.data, - @intCast(extra.end), - }; -} - -pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash { - const tag = zir.instructions.items(.tag); - const data = zir.instructions.items(.data); - switch (tag[@intFromEnum(inst)]) { - .declaration => { - const pl_node = data[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Declaration, pl_node.payload_index); - return @bitCast([4]u32{ - extra.data.src_hash_0, - extra.data.src_hash_1, - extra.data.src_hash_2, - extra.data.src_hash_3, - }); - }, - .func, .func_inferred => { - const pl_node = data[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.Func, pl_node.payload_index); - if (extra.data.body_len == 0) { - // Function type or extern fn - no associated hash - return null; - } - const extra_index = extra.end + - 1 + - extra.data.body_len + - @typeInfo(Inst.Func.SrcLocs).Struct.fields.len; - return @bitCast([4]u32{ - zir.extra[extra_index + 0], - zir.extra[extra_index + 1], - zir.extra[extra_index + 2], - zir.extra[extra_index + 3], - }); - }, - .func_fancy => { - const pl_node = data[@intFromEnum(inst)].pl_node; - const extra = zir.extraData(Inst.FuncFancy, pl_node.payload_index); - if (extra.data.body_len == 0) { - // Function type or extern fn - no associated hash - return null; - } - const bits = extra.data.bits; - var extra_index = extra.end; - extra_index += @intFromBool(bits.has_lib_name); - if (bits.has_align_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1 + body_len; - } else extra_index += @intFromBool(bits.has_align_ref); - if (bits.has_addrspace_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1 + body_len; - } else extra_index += @intFromBool(bits.has_addrspace_ref); - if (bits.has_section_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1 + body_len; - } else extra_index += @intFromBool(bits.has_section_ref); - if (bits.has_cc_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1 + body_len; - } else extra_index += @intFromBool(bits.has_cc_ref); - if (bits.has_ret_ty_body) { - const body_len = zir.extra[extra_index]; - extra_index += 1 + body_len; - } else extra_index += @intFromBool(bits.has_ret_ty_ref); - extra_index += @intFromBool(bits.has_any_noalias); - extra_index += extra.data.body_len; - extra_index += @typeInfo(Zir.Inst.Func.SrcLocs).Struct.fields.len; - return @bitCast([4]u32{ - zir.extra[extra_index + 0], - zir.extra[extra_index + 1], - zir.extra[extra_index + 2], - zir.extra[extra_index + 3], - }); - }, - .extended => {}, - else => return null, - } - const extended = data[@intFromEnum(inst)].extended; - switch (extended.opcode) { - .struct_decl => { - const extra = zir.extraData(Inst.StructDecl, extended.operand).data; - return @bitCast([4]u32{ - extra.fields_hash_0, - extra.fields_hash_1, - extra.fields_hash_2, - extra.fields_hash_3, - }); - }, - .union_decl => { - const extra = zir.extraData(Inst.UnionDecl, extended.operand).data; - return @bitCast([4]u32{ - extra.fields_hash_0, - extra.fields_hash_1, - extra.fields_hash_2, - extra.fields_hash_3, - }); - }, - .enum_decl => { - const extra = zir.extraData(Inst.EnumDecl, extended.operand).data; - return @bitCast([4]u32{ - extra.fields_hash_0, - extra.fields_hash_1, - extra.fields_hash_2, - extra.fields_hash_3, - }); - }, - else => return null, - } -} diff --git a/src/codegen.zig b/src/codegen.zig index 7bcba80065..4856eff7d8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -21,7 +21,7 @@ const Target = std.Target; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Value = @import("Value.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; pub const Result = union(enum) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 7641da4a94..fb8d1e1e8e 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -10,7 +10,6 @@ const Type = @import("../type.zig").Type; const Value = @import("../Value.zig"); const LazySrcLoc = std.zig.LazySrcLoc; const Air = @import("../Air.zig"); -const Zir = @import("../Zir.zig"); const Liveness = @import("../Liveness.zig"); const InternPool = @import("../InternPool.zig"); diff --git a/src/crash_report.zig b/src/crash_report.zig index cedce84d94..0c2354af4e 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -8,7 +8,7 @@ const native_os = builtin.os.tag; const Module = @import("Module.zig"); const Sema = @import("Sema.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Decl = Module.Decl; pub const is_enabled = builtin.mode == .Debug; diff --git a/src/main.zig b/src/main.zig index bb8b25c60e..9522734dea 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6655,7 +6655,7 @@ fn cmdAstCheck( arena: Allocator, args: []const []const u8, ) !void { - const Zir = @import("Zir.zig"); + const Zir = std.zig.Zir; var color: Color = .auto; var want_output_text = false; @@ -6817,7 +6817,7 @@ fn cmdDumpZir( args: []const []const u8, ) !void { _ = arena; - const Zir = @import("Zir.zig"); + const Zir = std.zig.Zir; const cache_file = args[0]; @@ -6877,7 +6877,7 @@ fn cmdChangelist( args: []const []const u8, ) !void { const color: Color = .auto; - const Zir = @import("Zir.zig"); + const Zir = std.zig.Zir; const old_source_file = args[0]; const new_source_file = args[1]; diff --git a/src/print_zir.zig b/src/print_zir.zig index b4c3a7ce24..42c2576ac5 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -5,7 +5,7 @@ const assert = std.debug.assert; const Ast = std.zig.Ast; const InternPool = @import("InternPool.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const Module = @import("Module.zig"); const LazySrcLoc = std.zig.LazySrcLoc; diff --git a/src/reduce.zig b/src/reduce.zig index 027557bea5..35456fe7c3 100644 --- a/src/reduce.zig +++ b/src/reduce.zig @@ -6,7 +6,7 @@ const fatal = @import("./main.zig").fatal; const Ast = std.zig.Ast; const Walk = @import("reduce/Walk.zig"); const AstGen = @import("AstGen.zig"); -const Zir = @import("Zir.zig"); +const Zir = std.zig.Zir; const usage = \\zig reduce [options] ./checker root_source_file.zig [-- [argv]] -- cgit v1.2.3 From b116063e02bf2bb1975f5ae862fcd25f8fbeda09 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2024 21:51:19 -0700 Subject: move AstGen to std.zig.AstGen Part of an effort to ship more of the compiler in source form. --- CMakeLists.txt | 2 +- lib/std/zig.zig | 1 + lib/std/zig/AstGen.zig | 13661 +++++++++++++++++++++++++++++++++++++++++++++++ src/AstGen.zig | 13661 ----------------------------------------------- src/Builtin.zig | 2 +- src/Module.zig | 2 +- src/main.zig | 2 +- src/reduce.zig | 2 +- 8 files changed, 13667 insertions(+), 13666 deletions(-) create mode 100644 lib/std/zig/AstGen.zig delete mode 100644 src/AstGen.zig (limited to 'src/Module.zig') diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a62cbe9c0..7b7c120ab2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -505,6 +505,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/lib/std/unicode.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig" + "${CMAKE_SOURCE_DIR}/lib/std/zig/AstGen.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/AstRlAnnotate.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig" @@ -517,7 +518,6 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/lib/std/zig/tokenizer.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/Zir.zig" "${CMAKE_SOURCE_DIR}/src/Air.zig" - "${CMAKE_SOURCE_DIR}/src/AstGen.zig" "${CMAKE_SOURCE_DIR}/src/Compilation.zig" "${CMAKE_SOURCE_DIR}/src/Compilation/Config.zig" "${CMAKE_SOURCE_DIR}/src/Liveness.zig" diff --git a/lib/std/zig.zig b/lib/std/zig.zig index c5da47d5d8..9085b23de1 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -10,6 +10,7 @@ pub const string_literal = @import("zig/string_literal.zig"); pub const number_literal = @import("zig/number_literal.zig"); pub const primitives = @import("zig/primitives.zig"); pub const Ast = @import("zig/Ast.zig"); +pub const AstGen = @import("zig/AstGen.zig"); pub const Zir = @import("zig/Zir.zig"); pub const system = @import("zig/system.zig"); /// Deprecated: use `std.Target.Query`. diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig new file mode 100644 index 0000000000..20b1077420 --- /dev/null +++ b/lib/std/zig/AstGen.zig @@ -0,0 +1,13661 @@ +//! Ingests an AST and produces ZIR code. +const AstGen = @This(); + +const std = @import("std"); +const Ast = std.zig.Ast; +const mem = std.mem; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const ArrayListUnmanaged = std.ArrayListUnmanaged; +const StringIndexAdapter = std.hash_map.StringIndexAdapter; +const StringIndexContext = std.hash_map.StringIndexContext; + +const isPrimitive = std.zig.primitives.isPrimitive; + +const Zir = std.zig.Zir; +const BuiltinFn = std.zig.BuiltinFn; +const AstRlAnnotate = std.zig.AstRlAnnotate; + +gpa: Allocator, +tree: *const Ast, +/// The set of nodes which, given the choice, must expose a result pointer to +/// sub-expressions. See `AstRlAnnotate` for details. +nodes_need_rl: *const AstRlAnnotate.RlNeededSet, +instructions: std.MultiArrayList(Zir.Inst) = .{}, +extra: ArrayListUnmanaged(u32) = .{}, +string_bytes: ArrayListUnmanaged(u8) = .{}, +/// Tracks the current byte offset within the source file. +/// Used to populate line deltas in the ZIR. AstGen maintains +/// this "cursor" throughout the entire AST lowering process in order +/// to avoid starting over the line/column scan for every declaration, which +/// would be O(N^2). +source_offset: u32 = 0, +/// Tracks the corresponding line of `source_offset`. +/// This value is absolute. +source_line: u32 = 0, +/// Tracks the corresponding column of `source_offset`. +/// This value is absolute. +source_column: u32 = 0, +/// Used for temporary allocations; freed after AstGen is complete. +/// The resulting ZIR code has no references to anything in this arena. +arena: Allocator, +string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, +compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, +/// The topmost block of the current function. +fn_block: ?*GenZir = null, +fn_var_args: bool = false, +/// The return type of the current function. This may be a trivial `Ref`, or +/// otherwise it refers to a `ret_type` instruction. +fn_ret_ty: Zir.Inst.Ref = .none, +/// Maps string table indexes to the first `@import` ZIR instruction +/// that uses this string as the operand. +imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}, +/// Used for temporary storage when building payloads. +scratch: std.ArrayListUnmanaged(u32) = .{}, +/// Whenever a `ref` instruction is needed, it is created and saved in this +/// table instead of being immediately appended to the current block body. +/// Then, when the instruction is being added to the parent block (typically from +/// setBlockBody), if it has a ref_table entry, then the ref instruction is added +/// there. This makes sure two properties are upheld: +/// 1. All pointers to the same locals return the same address. This is required +/// to be compliant with the language specification. +/// 2. `ref` instructions will dominate their uses. This is a required property +/// of ZIR. +/// The key is the ref operand; the value is the ref instruction. +ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + +const InnerError = error{ OutOfMemory, AnalysisFail }; + +fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try astgen.extra.ensureUnusedCapacity(astgen.gpa, fields.len); + return addExtraAssumeCapacity(astgen, extra); +} + +fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const extra_index: u32 = @intCast(astgen.extra.items.len); + astgen.extra.items.len += fields.len; + setExtra(astgen, extra_index, extra); + return extra_index; +} + +fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { + const fields = std.meta.fields(@TypeOf(extra)); + var i = index; + inline for (fields) |field| { + astgen.extra.items[i] = switch (field.type) { + u32 => @field(extra, field.name), + + Zir.Inst.Ref, + Zir.Inst.Index, + Zir.Inst.Declaration.Name, + Zir.NullTerminatedString, + => @intFromEnum(@field(extra, field.name)), + + i32, + Zir.Inst.Call.Flags, + Zir.Inst.BuiltinCall.Flags, + Zir.Inst.SwitchBlock.Bits, + Zir.Inst.SwitchBlockErrUnion.Bits, + Zir.Inst.FuncFancy.Bits, + Zir.Inst.Declaration.Flags, + => @bitCast(@field(extra, field.name)), + + else => @compileError("bad field type"), + }; + i += 1; + } +} + +fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 { + const extra_index: u32 = @intCast(astgen.extra.items.len); + try astgen.extra.resize(astgen.gpa, extra_index + size); + return extra_index; +} + +fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { + return astgen.extra.appendSlice(astgen.gpa, @ptrCast(refs)); +} + +fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { + astgen.extra.appendSliceAssumeCapacity(@ptrCast(refs)); +} + +pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var nodes_need_rl = try AstRlAnnotate.annotate(gpa, arena.allocator(), tree); + defer nodes_need_rl.deinit(gpa); + + var astgen: AstGen = .{ + .gpa = gpa, + .arena = arena.allocator(), + .tree = &tree, + .nodes_need_rl = &nodes_need_rl, + }; + defer astgen.deinit(gpa); + + // String table index 0 is reserved for `NullTerminatedString.empty`. + try astgen.string_bytes.append(gpa, 0); + + // We expect at least as many ZIR instructions and extra data items + // as AST nodes. + try astgen.instructions.ensureTotalCapacity(gpa, tree.nodes.len); + + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Zir.ExtraIndex).Enum.fields.len; + try astgen.extra.ensureTotalCapacity(gpa, tree.nodes.len + reserved_count); + astgen.extra.items.len += reserved_count; + + var top_scope: Scope.Top = .{}; + + var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var gen_scope: GenZir = .{ + .is_comptime = true, + .parent = &top_scope.base, + .anon_name_strategy = .parent, + .decl_node_index = 0, + .decl_line = 0, + .astgen = &astgen, + .instructions = &gz_instructions, + .instructions_top = 0, + }; + defer gz_instructions.deinit(gpa); + + // The AST -> ZIR lowering process assumes an AST that does not have any + // parse errors. + if (tree.errors.len == 0) { + if (AstGen.structDeclInner( + &gen_scope, + &gen_scope.base, + 0, + tree.containerDeclRoot(), + .Auto, + 0, + )) |struct_decl_ref| { + assert(struct_decl_ref.toIndex().? == .main_struct_inst); + } else |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, // Handled via compile_errors below. + } + } else { + try lowerAstErrors(&astgen); + } + + const err_index = @intFromEnum(Zir.ExtraIndex.compile_errors); + if (astgen.compile_errors.items.len == 0) { + astgen.extra.items[err_index] = 0; + } else { + try astgen.extra.ensureUnusedCapacity(gpa, 1 + astgen.compile_errors.items.len * + @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); + + astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ + .items_len = @intCast(astgen.compile_errors.items.len), + }); + + for (astgen.compile_errors.items) |item| { + _ = astgen.addExtraAssumeCapacity(item); + } + } + + const imports_index = @intFromEnum(Zir.ExtraIndex.imports); + if (astgen.imports.count() == 0) { + astgen.extra.items[imports_index] = 0; + } else { + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Imports).Struct.fields.len + + astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); + + astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ + .imports_len = @intCast(astgen.imports.count()), + }); + + var it = astgen.imports.iterator(); + while (it.next()) |entry| { + _ = astgen.addExtraAssumeCapacity(Zir.Inst.Imports.Item{ + .name = entry.key_ptr.*, + .token = entry.value_ptr.*, + }); + } + } + + return Zir{ + .instructions = astgen.instructions.toOwnedSlice(), + .string_bytes = try astgen.string_bytes.toOwnedSlice(gpa), + .extra = try astgen.extra.toOwnedSlice(gpa), + }; +} + +fn deinit(astgen: *AstGen, gpa: Allocator) void { + astgen.instructions.deinit(gpa); + astgen.extra.deinit(gpa); + astgen.string_table.deinit(gpa); + astgen.string_bytes.deinit(gpa); + astgen.compile_errors.deinit(gpa); + astgen.imports.deinit(gpa); + astgen.scratch.deinit(gpa); + astgen.ref_table.deinit(gpa); +} + +const ResultInfo = struct { + /// The semantics requested for the result location + rl: Loc, + + /// The "operator" consuming the result location + ctx: Context = .none, + + /// Turns a `coerced_ty` back into a `ty`. Should be called at branch points + /// such as if and switch expressions. + fn br(ri: ResultInfo) ResultInfo { + return switch (ri.rl) { + .coerced_ty => |ty| .{ + .rl = .{ .ty = ty }, + .ctx = ri.ctx, + }, + else => ri, + }; + } + + fn zirTag(ri: ResultInfo) Zir.Inst.Tag { + switch (ri.rl) { + .ty => return switch (ri.ctx) { + .shift_op => .as_shift_operand, + else => .as_node, + }, + else => unreachable, + } + } + + const Loc = union(enum) { + /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the + /// expression should be generated. The result instruction from the expression must + /// be ignored. + discard, + /// The expression has an inferred type, and it will be evaluated as an rvalue. + none, + /// The expression will be coerced into this type, but it will be evaluated as an rvalue. + ty: Zir.Inst.Ref, + /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, + /// so no `as` instruction needs to be emitted. + coerced_ty: Zir.Inst.Ref, + /// The expression must generate a pointer rather than a value. For example, the left hand side + /// of an assignment uses this kind of result location. + ref, + /// The expression must generate a pointer rather than a value, and the pointer will be coerced + /// by other code to this type, which is guaranteed by earlier instructions to be a pointer type. + ref_coerced_ty: Zir.Inst.Ref, + /// The expression must store its result into this typed pointer. The result instruction + /// from the expression must be ignored. + ptr: PtrResultLoc, + /// The expression must store its result into this allocation, which has an inferred type. + /// The result instruction from the expression must be ignored. + /// Always an instruction with tag `alloc_inferred`. + inferred_ptr: Zir.Inst.Ref, + /// The expression has a sequence of pointers to store its results into due to a destructure + /// operation. Each of these pointers may or may not have an inferred type. + destructure: struct { + /// The AST node of the destructure operation itself. + src_node: Ast.Node.Index, + /// The pointers to store results into. + components: []const DestructureComponent, + }, + + const DestructureComponent = union(enum) { + typed_ptr: PtrResultLoc, + inferred_ptr: Zir.Inst.Ref, + discard, + }; + + const PtrResultLoc = struct { + inst: Zir.Inst.Ref, + src_node: ?Ast.Node.Index = null, + }; + + /// Find the result type for a cast builtin given the result location. + /// If the location does not have a known result type, emits an error on + /// the given node. + fn resultType(rl: Loc, gz: *GenZir, node: Ast.Node.Index) !?Zir.Inst.Ref { + return switch (rl) { + .discard, .none, .ref, .inferred_ptr, .destructure => null, + .ty, .coerced_ty => |ty_ref| ty_ref, + .ref_coerced_ty => |ptr_ty| try gz.addUnNode(.elem_type, ptr_ty, node), + .ptr => |ptr| { + const ptr_ty = try gz.addUnNode(.typeof, ptr.inst, node); + return try gz.addUnNode(.elem_type, ptr_ty, node); + }, + }; + } + + fn resultTypeForCast(rl: Loc, gz: *GenZir, node: Ast.Node.Index, builtin_name: []const u8) !Zir.Inst.Ref { + const astgen = gz.astgen; + if (try rl.resultType(gz, node)) |ty| return ty; + switch (rl) { + .destructure => |destructure| return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ + try astgen.errNoteNode(destructure.src_node, "destructure expressions do not provide a single result type", .{}), + try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), + }), + else => return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ + try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), + }), + } + } + }; + + const Context = enum { + /// The expression is the operand to a return expression. + @"return", + /// The expression is the input to an error-handling operator (if-else, try, or catch). + error_handling_expr, + /// The expression is the right-hand side of a shift operation. + shift_op, + /// The expression is an argument in a function call. + fn_arg, + /// The expression is the right-hand side of an initializer for a `const` variable + const_init, + /// The expression is the right-hand side of an assignment expression. + assignment, + /// No specific operator in particular. + none, + }; +}; + +const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } }; +const coerced_addrspace_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .address_space_type } }; +const coerced_linksection_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }; +const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } }; +const coerced_bool_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .bool_type } }; + +fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + return comptimeExpr(gz, scope, coerced_type_ri, type_node); +} + +fn reachableTypeExpr( + gz: *GenZir, + scope: *Scope, + type_node: Ast.Node.Index, + reachable_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + return reachableExprComptime(gz, scope, coerced_type_ri, type_node, reachable_node, true); +} + +/// Same as `expr` but fails with a compile error if the result type is `noreturn`. +fn reachableExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + reachable_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + return reachableExprComptime(gz, scope, ri, node, reachable_node, false); +} + +fn reachableExprComptime( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + reachable_node: Ast.Node.Index, + force_comptime: bool, +) InnerError!Zir.Inst.Ref { + const result_inst = if (force_comptime) + try comptimeExpr(gz, scope, ri, node) + else + try expr(gz, scope, ri, node); + + if (gz.refIsNoReturn(result_inst)) { + try gz.astgen.appendErrorNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{ + try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}), + }); + } + return result_inst; +} + +fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + switch (node_tags[node]) { + .root => unreachable, + .@"usingnamespace" => unreachable, + .test_decl => unreachable, + .global_var_decl => unreachable, + .local_var_decl => unreachable, + .simple_var_decl => unreachable, + .aligned_var_decl => unreachable, + .switch_case => unreachable, + .switch_case_inline => unreachable, + .switch_case_one => unreachable, + .switch_case_inline_one => unreachable, + .container_field_init => unreachable, + .container_field_align => unreachable, + .container_field => unreachable, + .asm_output => unreachable, + .asm_input => unreachable, + + .assign, + .assign_destructure, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .add, + .add_wrap, + .add_sat, + .sub, + .sub_wrap, + .sub_sat, + .mul, + .mul_wrap, + .mul_sat, + .div, + .mod, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bang_equal, + .equal_equal, + .greater_than, + .greater_or_equal, + .less_than, + .less_or_equal, + .array_cat, + .array_mult, + .bool_and, + .bool_or, + .@"asm", + .asm_simple, + .string_literal, + .number_literal, + .call, + .call_comma, + .async_call, + .async_call_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .unreachable_literal, + .@"return", + .@"if", + .if_simple, + .@"while", + .while_simple, + .while_cont, + .bool_not, + .address_of, + .optional_type, + .block, + .block_semicolon, + .block_two, + .block_two_semicolon, + .@"break", + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .array_type, + .array_type_sentinel, + .enum_literal, + .multiline_string_literal, + .char_literal, + .@"defer", + .@"errdefer", + .@"catch", + .error_union, + .merge_error_sets, + .switch_range, + .for_range, + .@"await", + .bit_not, + .negation, + .negation_wrap, + .@"resume", + .@"try", + .slice, + .slice_open, + .slice_sentinel, + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"switch", + .switch_comma, + .@"for", + .for_simple, + .@"suspend", + .@"continue", + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + .anyframe_type, + .anyframe_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"comptime", + .@"nosuspend", + .error_value, + => return astgen.failNode(node, "invalid left-hand side to assignment", .{}), + + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + if (BuiltinFn.list.get(builtin_name)) |info| { + if (!info.allows_lvalue) { + return astgen.failNode(node, "invalid left-hand side to assignment", .{}); + } + } + }, + + // These can be assigned to. + .unwrap_optional, + .deref, + .field_access, + .array_access, + .identifier, + .grouped_expression, + .@"orelse", + => {}, + } + return expr(gz, scope, .{ .rl = .ref }, node); +} + +/// Turn Zig AST into untyped ZIR instructions. +/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the +/// result instruction can be used to inspect whether it is isNoReturn() but that is it, +/// it must otherwise not be used. +fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + const prev_anon_name_strategy = gz.anon_name_strategy; + defer gz.anon_name_strategy = prev_anon_name_strategy; + if (!nodeUsesAnonNameStrategy(tree, node)) { + gz.anon_name_strategy = .anon; + } + + switch (node_tags[node]) { + .root => unreachable, // Top-level declaration. + .@"usingnamespace" => unreachable, // Top-level declaration. + .test_decl => unreachable, // Top-level declaration. + .container_field_init => unreachable, // Top-level declaration. + .container_field_align => unreachable, // Top-level declaration. + .container_field => unreachable, // Top-level declaration. + .fn_decl => unreachable, // Top-level declaration. + + .global_var_decl => unreachable, // Handled in `blockExpr`. + .local_var_decl => unreachable, // Handled in `blockExpr`. + .simple_var_decl => unreachable, // Handled in `blockExpr`. + .aligned_var_decl => unreachable, // Handled in `blockExpr`. + .@"defer" => unreachable, // Handled in `blockExpr`. + .@"errdefer" => unreachable, // Handled in `blockExpr`. + + .switch_case => unreachable, // Handled in `switchExpr`. + .switch_case_inline => unreachable, // Handled in `switchExpr`. + .switch_case_one => unreachable, // Handled in `switchExpr`. + .switch_case_inline_one => unreachable, // Handled in `switchExpr`. + .switch_range => unreachable, // Handled in `switchExpr`. + + .asm_output => unreachable, // Handled in `asmExpr`. + .asm_input => unreachable, // Handled in `asmExpr`. + + .for_range => unreachable, // Handled in `forExpr`. + + .assign => { + try assign(gz, scope, node); + return rvalue(gz, ri, .void_value, node); + }, + + .assign_destructure => { + // Note that this variant does not declare any new var/const: that + // variant is handled by `blockExprStmts`. + try assignDestructure(gz, scope, node); + return rvalue(gz, ri, .void_value, node); + }, + + .assign_shl => { + try assignShift(gz, scope, node, .shl); + return rvalue(gz, ri, .void_value, node); + }, + .assign_shl_sat => { + try assignShiftSat(gz, scope, node); + return rvalue(gz, ri, .void_value, node); + }, + .assign_shr => { + try assignShift(gz, scope, node, .shr); + return rvalue(gz, ri, .void_value, node); + }, + + .assign_bit_and => { + try assignOp(gz, scope, node, .bit_and); + return rvalue(gz, ri, .void_value, node); + }, + .assign_bit_or => { + try assignOp(gz, scope, node, .bit_or); + return rvalue(gz, ri, .void_value, node); + }, + .assign_bit_xor => { + try assignOp(gz, scope, node, .xor); + return rvalue(gz, ri, .void_value, node); + }, + .assign_div => { + try assignOp(gz, scope, node, .div); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub => { + try assignOp(gz, scope, node, .sub); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub_wrap => { + try assignOp(gz, scope, node, .subwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub_sat => { + try assignOp(gz, scope, node, .sub_sat); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mod => { + try assignOp(gz, scope, node, .mod_rem); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add => { + try assignOp(gz, scope, node, .add); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add_wrap => { + try assignOp(gz, scope, node, .addwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add_sat => { + try assignOp(gz, scope, node, .add_sat); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul => { + try assignOp(gz, scope, node, .mul); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul_wrap => { + try assignOp(gz, scope, node, .mulwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul_sat => { + try assignOp(gz, scope, node, .mul_sat); + return rvalue(gz, ri, .void_value, node); + }, + + // zig fmt: off + .shl => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shl), + .shr => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shr), + + .add => return simpleBinOp(gz, scope, ri, node, .add), + .add_wrap => return simpleBinOp(gz, scope, ri, node, .addwrap), + .add_sat => return simpleBinOp(gz, scope, ri, node, .add_sat), + .sub => return simpleBinOp(gz, scope, ri, node, .sub), + .sub_wrap => return simpleBinOp(gz, scope, ri, node, .subwrap), + .sub_sat => return simpleBinOp(gz, scope, ri, node, .sub_sat), + .mul => return simpleBinOp(gz, scope, ri, node, .mul), + .mul_wrap => return simpleBinOp(gz, scope, ri, node, .mulwrap), + .mul_sat => return simpleBinOp(gz, scope, ri, node, .mul_sat), + .div => return simpleBinOp(gz, scope, ri, node, .div), + .mod => return simpleBinOp(gz, scope, ri, node, .mod_rem), + .shl_sat => return simpleBinOp(gz, scope, ri, node, .shl_sat), + + .bit_and => return simpleBinOp(gz, scope, ri, node, .bit_and), + .bit_or => return simpleBinOp(gz, scope, ri, node, .bit_or), + .bit_xor => return simpleBinOp(gz, scope, ri, node, .xor), + .bang_equal => return simpleBinOp(gz, scope, ri, node, .cmp_neq), + .equal_equal => return simpleBinOp(gz, scope, ri, node, .cmp_eq), + .greater_than => return simpleBinOp(gz, scope, ri, node, .cmp_gt), + .greater_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_gte), + .less_than => return simpleBinOp(gz, scope, ri, node, .cmp_lt), + .less_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_lte), + .array_cat => return simpleBinOp(gz, scope, ri, node, .array_cat), + + .array_mult => { + // This syntax form does not currently use the result type in the language specification. + // However, the result type can be used to emit more optimal code for large multiplications by + // having Sema perform a coercion before the multiplication operation. + const result = try gz.addPlNode(.array_mul, node, Zir.Inst.ArrayMul{ + .res_ty = if (try ri.rl.resultType(gz, node)) |t| t else .none, + .lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), + .rhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs), + }); + return rvalue(gz, ri, result, node); + }, + + .error_union => return simpleBinOp(gz, scope, ri, node, .error_union_type), + .merge_error_sets => return simpleBinOp(gz, scope, ri, node, .merge_error_sets), + + .bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and), + .bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or), + + .bool_not => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, node_datas[node].lhs, .bool_not), + .bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not), + + .negation => return negation(gz, scope, ri, node), + .negation_wrap => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .negate_wrap), + + .identifier => return identifier(gz, scope, ri, node), + + .asm_simple, + .@"asm", + => return asmExpr(gz, scope, ri, node, tree.fullAsm(node).?), + + .string_literal => return stringLiteral(gz, ri, node), + .multiline_string_literal => return multilineStringLiteral(gz, ri, node), + + .number_literal => return numberLiteral(gz, ri, node, node, .positive), + // zig fmt: on + + .builtin_call_two, .builtin_call_two_comma => { + if (node_datas[node].lhs == 0) { + const params = [_]Ast.Node.Index{}; + return builtinCall(gz, scope, ri, node, ¶ms); + } else if (node_datas[node].rhs == 0) { + const params = [_]Ast.Node.Index{node_datas[node].lhs}; + return builtinCall(gz, scope, ri, node, ¶ms); + } else { + const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; + return builtinCall(gz, scope, ri, node, ¶ms); + } + }, + .builtin_call, .builtin_call_comma => { + const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return builtinCall(gz, scope, ri, node, params); + }, + + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => { + var buf: [1]Ast.Node.Index = undefined; + return callExpr(gz, scope, ri, node, tree.fullCall(&buf, node).?); + }, + + .unreachable_literal => { + try emitDbgNode(gz, node); + _ = try gz.addAsIndex(.{ + .tag = .@"unreachable", + .data = .{ .@"unreachable" = .{ + .src_node = gz.nodeIndexToRelative(node), + } }, + }); + return Zir.Inst.Ref.unreachable_value; + }, + .@"return" => return ret(gz, scope, node), + .field_access => return fieldAccess(gz, scope, ri, node), + + .if_simple, + .@"if", + => { + const if_full = tree.fullIf(node).?; + no_switch_on_err: { + const error_token = if_full.error_token orelse break :no_switch_on_err; + switch (node_tags[if_full.ast.else_expr]) { + .@"switch", .switch_comma => {}, + else => break :no_switch_on_err, + } + const switch_operand = node_datas[if_full.ast.else_expr].lhs; + if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; + if (!mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; + return switchExprErrUnion(gz, scope, ri.br(), node, .@"if"); + } + return ifExpr(gz, scope, ri.br(), node, if_full); + }, + + .while_simple, + .while_cont, + .@"while", + => return whileExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false), + + .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullFor(node).?, false), + + .slice_open => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{ + .lhs = lhs, + .start = start, + }); + return rvalue(gz, ri, result, node); + }, + .slice => { + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); + const lhs_node = node_datas[node].lhs; + const lhs_tag = node_tags[lhs_node]; + const lhs_is_slice_sentinel = lhs_tag == .slice_sentinel; + const lhs_is_open_slice = lhs_tag == .slice_open or + (lhs_is_slice_sentinel and tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel).end == 0); + if (lhs_is_open_slice and nodeIsTriviallyZero(tree, extra.start)) { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[lhs_node].lhs); + + const start = if (lhs_is_slice_sentinel) start: { + const lhs_extra = tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel); + break :start try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, lhs_extra.start); + } else try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[lhs_node].rhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const len = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(.slice_length, node, Zir.Inst.SliceLength{ + .lhs = lhs, + .start = start, + .len = len, + .start_src_node_offset = gz.nodeIndexToRelative(lhs_node), + .sentinel = .none, + }); + return rvalue(gz, ri, result, node); + } + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end); + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{ + .lhs = lhs, + .start = start, + .end = end, + }); + return rvalue(gz, ri, result, node); + }, + .slice_sentinel => { + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); + const lhs_node = node_datas[node].lhs; + const lhs_tag = node_tags[lhs_node]; + const lhs_is_slice_sentinel = lhs_tag == .slice_sentinel; + const lhs_is_open_slice = lhs_tag == .slice_open or + (lhs_is_slice_sentinel and tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel).end == 0); + if (lhs_is_open_slice and nodeIsTriviallyZero(tree, extra.start)) { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[lhs_node].lhs); + + const start = if (lhs_is_slice_sentinel) start: { + const lhs_extra = tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel); + break :start try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, lhs_extra.start); + } else try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[lhs_node].rhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const len = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; + const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(.slice_length, node, Zir.Inst.SliceLength{ + .lhs = lhs, + .start = start, + .len = len, + .start_src_node_offset = gz.nodeIndexToRelative(lhs_node), + .sentinel = sentinel, + }); + return rvalue(gz, ri, result, node); + } + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; + const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{ + .lhs = lhs, + .start = start, + .end = end, + .sentinel = sentinel, + }); + return rvalue(gz, ri, result, node); + }, + + .deref => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + _ = try gz.addUnNode(.validate_deref, lhs, node); + switch (ri.rl) { + .ref, .ref_coerced_ty => return lhs, + else => { + const result = try gz.addUnNode(.load, lhs, node); + return rvalue(gz, ri, result, node); + }, + } + }, + .address_of => { + const operand_rl: ResultInfo.Loc = if (try ri.rl.resultType(gz, node)) |res_ty_inst| rl: { + _ = try gz.addUnTok(.validate_ref_ty, res_ty_inst, tree.firstToken(node)); + break :rl .{ .ref_coerced_ty = res_ty_inst }; + } else .ref; + const result = try expr(gz, scope, .{ .rl = operand_rl }, node_datas[node].lhs); + return rvalue(gz, ri, result, node); + }, + .optional_type => { + const operand = try typeExpr(gz, scope, node_datas[node].lhs); + const result = try gz.addUnNode(.optional_type, operand, node); + return rvalue(gz, ri, result, node); + }, + .unwrap_optional => switch (ri.rl) { + .ref, .ref_coerced_ty => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + try emitDbgStmt(gz, cursor); + + return gz.addUnNode(.optional_payload_safe_ptr, lhs, node); + }, + else => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + try emitDbgStmt(gz, cursor); + + return rvalue(gz, ri, try gz.addUnNode(.optional_payload_safe, lhs, node), node); + }, + }, + .block_two, .block_two_semicolon => { + const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; + if (node_datas[node].lhs == 0) { + return blockExpr(gz, scope, ri, node, statements[0..0]); + } else if (node_datas[node].rhs == 0) { + return blockExpr(gz, scope, ri, node, statements[0..1]); + } else { + return blockExpr(gz, scope, ri, node, statements[0..2]); + } + }, + .block, .block_semicolon => { + const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return blockExpr(gz, scope, ri, node, statements); + }, + .enum_literal => return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal), + .error_value => return simpleStrTok(gz, ri, node_datas[node].rhs, node, .error_value), + // TODO restore this when implementing https://github.com/ziglang/zig/issues/6025 + // .anyframe_literal => return rvalue(gz, ri, .anyframe_type, node), + .anyframe_literal => { + const result = try gz.addUnNode(.anyframe_type, .void_type, node); + return rvalue(gz, ri, result, node); + }, + .anyframe_type => { + const return_type = try typeExpr(gz, scope, node_datas[node].rhs); + const result = try gz.addUnNode(.anyframe_type, return_type, node); + return rvalue(gz, ri, result, node); + }, + .@"catch" => { + const catch_token = main_tokens[node]; + const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe) + catch_token + 2 + else + null; + no_switch_on_err: { + const capture_token = payload_token orelse break :no_switch_on_err; + switch (node_tags[node_datas[node].rhs]) { + .@"switch", .switch_comma => {}, + else => break :no_switch_on_err, + } + const switch_operand = node_datas[node_datas[node].rhs].lhs; + if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; + if (!mem.eql(u8, tree.tokenSlice(capture_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; + return switchExprErrUnion(gz, scope, ri.br(), node, .@"catch"); + } + switch (ri.rl) { + .ref, .ref_coerced_ty => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_err_ptr, + .err_union_payload_unsafe_ptr, + .err_union_code_ptr, + node_datas[node].rhs, + payload_token, + ), + else => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_err, + .err_union_payload_unsafe, + .err_union_code, + node_datas[node].rhs, + payload_token, + ), + } + }, + .@"orelse" => switch (ri.rl) { + .ref, .ref_coerced_ty => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_null_ptr, + .optional_payload_unsafe_ptr, + undefined, + node_datas[node].rhs, + null, + ), + else => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_null, + .optional_payload_unsafe, + undefined, + node_datas[node].rhs, + null, + ), + }, + + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + => return ptrType(gz, scope, ri, node, tree.fullPtrType(node).?), + + .container_decl, + .container_decl_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .container_decl_two, + .container_decl_two_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + => { + var buf: [2]Ast.Node.Index = undefined; + return containerDecl(gz, scope, ri, node, tree.fullContainerDecl(&buf, node).?); + }, + + .@"break" => return breakExpr(gz, scope, node), + .@"continue" => return continueExpr(gz, scope, node), + .grouped_expression => return expr(gz, scope, ri, node_datas[node].lhs), + .array_type => return arrayType(gz, scope, ri, node), + .array_type_sentinel => return arrayTypeSentinel(gz, scope, ri, node), + .char_literal => return charLiteral(gz, ri, node), + .error_set_decl => return errorSetDecl(gz, ri, node), + .array_access => return arrayAccess(gz, scope, ri, node), + .@"comptime" => return comptimeExprAst(gz, scope, ri, node), + .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node), + + .@"nosuspend" => return nosuspendExpr(gz, scope, ri, node), + .@"suspend" => return suspendExpr(gz, scope, node), + .@"await" => return awaitExpr(gz, scope, ri, node), + .@"resume" => return resumeExpr(gz, scope, ri, node), + + .@"try" => return tryExpr(gz, scope, ri, node, node_datas[node].lhs), + + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + return arrayInitExpr(gz, scope, ri, node, tree.fullArrayInit(&buf, node).?); + }, + + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + return structInitExpr(gz, scope, ri, node, tree.fullStructInit(&buf, node).?); + }, + + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => { + var buf: [1]Ast.Node.Index = undefined; + return fnProtoExpr(gz, scope, ri, node, tree.fullFnProto(&buf, node).?); + }, + } +} + +fn nosuspendExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + assert(body_node != 0); + if (gz.nosuspend_node != 0) { + try astgen.appendErrorNodeNotes(node, "redundant nosuspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.nosuspend_node, "other nosuspend block here", .{}), + }); + } + gz.nosuspend_node = node; + defer gz.nosuspend_node = 0; + return expr(gz, scope, ri, body_node); +} + +fn suspendExpr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + + if (gz.nosuspend_node != 0) { + return astgen.failNodeNotes(node, "suspend inside nosuspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.nosuspend_node, "nosuspend block here", .{}), + }); + } + if (gz.suspend_node != 0) { + return astgen.failNodeNotes(node, "cannot suspend inside suspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.suspend_node, "other suspend block here", .{}), + }); + } + assert(body_node != 0); + + const suspend_inst = try gz.makeBlockInst(.suspend_block, node); + try gz.instructions.append(gpa, suspend_inst); + + var suspend_scope = gz.makeSubBlock(scope); + suspend_scope.suspend_node = node; + defer suspend_scope.unstack(); + + const body_result = try expr(&suspend_scope, &suspend_scope.base, .{ .rl = .none }, body_node); + if (!gz.refIsNoReturn(body_result)) { + _ = try suspend_scope.addBreak(.break_inline, suspend_inst, .void_value); + } + try suspend_scope.setBlockBody(suspend_inst); + + return suspend_inst.toRef(); +} + +fn awaitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const rhs_node = node_datas[node].lhs; + + if (gz.suspend_node != 0) { + return astgen.failNodeNotes(node, "cannot await inside suspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), + }); + } + const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); + const result = if (gz.nosuspend_node != 0) + try gz.addExtendedPayload(.await_nosuspend, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }) + else + try gz.addUnNode(.@"await", operand, node); + + return rvalue(gz, ri, result, node); +} + +fn resumeExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const rhs_node = node_datas[node].lhs; + const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); + const result = try gz.addUnNode(.@"resume", operand, node); + return rvalue(gz, ri, result, node); +} + +fn fnProtoExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + fn_proto: Ast.full.FnProto, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + if (fn_proto.name_token) |some| { + return astgen.failTok(some, "function type cannot have a name", .{}); + } + + const is_extern = blk: { + const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + assert(!is_extern); + + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.block_inline, node); + + var noalias_bits: u32 = 0; + const is_var_args = is_var_args: { + var param_type_i: usize = 0; + var it = fn_proto.iterate(tree); + while (it.next()) |param| : (param_type_i += 1) { + const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { + .keyword_noalias => is_comptime: { + noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse + return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); + break :is_comptime false; + }, + .keyword_comptime => true, + else => false, + } else false; + + const is_anytype = if (param.anytype_ellipsis3) |token| blk: { + switch (token_tags[token]) { + .keyword_anytype => break :blk true, + .ellipsis3 => break :is_var_args true, + else => unreachable, + } + } else false; + + const param_name = if (param.name_token) |name_token| blk: { + if (mem.eql(u8, "_", tree.tokenSlice(name_token))) + break :blk .empty; + + break :blk try astgen.identAsString(name_token); + } else .empty; + + if (is_anytype) { + const name_token = param.name_token orelse param.anytype_ellipsis3.?; + + const tag: Zir.Inst.Tag = if (is_comptime) + .param_anytype_comptime + else + .param_anytype; + _ = try block_scope.addStrTok(tag, param_name, name_token); + } else { + const param_type_node = param.type_expr; + assert(param_type_node != 0); + var param_gz = block_scope.makeSubBlock(scope); + defer param_gz.unstack(); + const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); + const param_inst_expected: Zir.Inst.Index = @enumFromInt(astgen.instructions.len + 1); + _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); + const main_tokens = tree.nodes.items(.main_token); + const name_token = param.name_token orelse main_tokens[param_type_node]; + const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; + const param_inst = try block_scope.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); + assert(param_inst_expected == param_inst); + } + } + break :is_var_args false; + }; + + const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { + break :inst try expr(&block_scope, scope, coerced_align_ri, fn_proto.ast.align_expr); + }; + + if (fn_proto.ast.addrspace_expr != 0) { + return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{}); + } + + if (fn_proto.ast.section_expr != 0) { + return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{}); + } + + const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0) + try expr( + &block_scope, + scope, + .{ .rl = .{ .coerced_ty = .calling_convention_type } }, + fn_proto.ast.callconv_expr, + ) + else + Zir.Inst.Ref.none; + + const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; + const is_inferred_error = token_tags[maybe_bang] == .bang; + if (is_inferred_error) { + return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); + } + const ret_ty = try expr(&block_scope, scope, coerced_type_ri, fn_proto.ast.return_type); + + const result = try block_scope.addFunc(.{ + .src_node = fn_proto.ast.proto_node, + + .cc_ref = cc, + .cc_gz = null, + .align_ref = align_ref, + .align_gz = null, + .ret_ref = ret_ty, + .ret_gz = null, + .section_ref = .none, + .section_gz = null, + .addrspace_ref = .none, + .addrspace_gz = null, + + .param_block = block_inst, + .body_gz = null, + .lib_name = .empty, + .is_var_args = is_var_args, + .is_inferred_error = false, + .is_test = false, + .is_extern = false, + .is_noinline = false, + .noalias_bits = noalias_bits, + }); + + _ = try block_scope.addBreak(.break_inline, block_inst, result); + try block_scope.setBlockBody(block_inst); + try gz.instructions.append(astgen.gpa, block_inst); + + return rvalue(gz, ri, block_inst.toRef(), fn_proto.ast.proto_node); +} + +fn arrayInitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + array_init: Ast.full.ArrayInit, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + assert(array_init.ast.elements.len != 0); // Otherwise it would be struct init. + + const array_ty: Zir.Inst.Ref, const elem_ty: Zir.Inst.Ref = inst: { + if (array_init.ast.type_expr == 0) break :inst .{ .none, .none }; + + infer: { + const array_type: Ast.full.ArrayType = tree.fullArrayType(array_init.ast.type_expr) orelse break :infer; + // This intentionally does not support `@"_"` syntax. + if (node_tags[array_type.ast.elem_count] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_")) + { + const len_inst = try gz.addInt(array_init.ast.elements.len); + const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); + if (array_type.ast.sentinel == 0) { + const array_type_inst = try gz.addPlNode(.array_type, array_init.ast.type_expr, Zir.Inst.Bin{ + .lhs = len_inst, + .rhs = elem_type, + }); + break :inst .{ array_type_inst, elem_type }; + } else { + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); + const array_type_inst = try gz.addPlNode( + .array_type_sentinel, + array_init.ast.type_expr, + Zir.Inst.ArrayTypeSentinel{ + .len = len_inst, + .elem_type = elem_type, + .sentinel = sentinel, + }, + ); + break :inst .{ array_type_inst, elem_type }; + } + } + } + const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); + _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{ + .ty = array_type_inst, + .init_count = @intCast(array_init.ast.elements.len), + }); + break :inst .{ array_type_inst, .none }; + }; + + if (array_ty != .none) { + // Typed inits do not use RLS for language simplicity. + switch (ri.rl) { + .discard => { + if (elem_ty != .none) { + const elem_ri: ResultInfo = .{ .rl = .{ .ty = elem_ty } }; + for (array_init.ast.elements) |elem_init| { + _ = try expr(gz, scope, elem_ri, elem_init); + } + } else { + for (array_init.ast.elements, 0..) |elem_init, i| { + const this_elem_ty = try gz.add(.{ + .tag = .array_init_elem_type, + .data = .{ .bin = .{ + .lhs = array_ty, + .rhs = @enumFromInt(i), + } }, + }); + _ = try expr(gz, scope, .{ .rl = .{ .ty = this_elem_ty } }, elem_init); + } + } + return .void_value; + }, + .ref => return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, array_ty, elem_ty, true), + else => { + const array_inst = try arrayInitExprTyped(gz, scope, node, array_init.ast.elements, array_ty, elem_ty, false); + return rvalue(gz, ri, array_inst, node); + }, + } + } + + switch (ri.rl) { + .none => return arrayInitExprAnon(gz, scope, node, array_init.ast.elements), + .discard => { + for (array_init.ast.elements) |elem_init| { + _ = try expr(gz, scope, .{ .rl = .discard }, elem_init); + } + return Zir.Inst.Ref.void_value; + }, + .ref => { + const result = try arrayInitExprAnon(gz, scope, node, array_init.ast.elements); + return gz.addUnTok(.ref, result, tree.firstToken(node)); + }, + .ref_coerced_ty => |ptr_ty_inst| { + const dest_arr_ty_inst = try gz.addPlNode(.validate_array_init_ref_ty, node, Zir.Inst.ArrayInitRefTy{ + .ptr_ty = ptr_ty_inst, + .elem_count = @intCast(array_init.ast.elements.len), + }); + return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, dest_arr_ty_inst, .none, true); + }, + .ty, .coerced_ty => |result_ty_inst| { + _ = try gz.addPlNode(.validate_array_init_result_ty, node, Zir.Inst.ArrayInit{ + .ty = result_ty_inst, + .init_count = @intCast(array_init.ast.elements.len), + }); + return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, result_ty_inst, .none, false); + }, + .ptr => |ptr| { + try arrayInitExprPtr(gz, scope, node, array_init.ast.elements, ptr.inst); + return .void_value; + }, + .inferred_ptr => { + // We can't get elem pointers of an untyped inferred alloc, so must perform a + // standard anonymous initialization followed by an rvalue store. + // See corresponding logic in structInitExpr. + const result = try arrayInitExprAnon(gz, scope, node, array_init.ast.elements); + return rvalue(gz, ri, result, node); + }, + .destructure => |destructure| { + // Untyped init - destructure directly into result pointers + if (array_init.ast.elements.len != destructure.components.len) { + return astgen.failNodeNotes(node, "expected {} elements for destructure, found {}", .{ + destructure.components.len, + array_init.ast.elements.len, + }, &.{ + try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), + }); + } + for (array_init.ast.elements, destructure.components) |elem_init, ds_comp| { + const elem_ri: ResultInfo = .{ .rl = switch (ds_comp) { + .typed_ptr => |ptr_rl| .{ .ptr = ptr_rl }, + .inferred_ptr => |ptr_inst| .{ .inferred_ptr = ptr_inst }, + .discard => .discard, + } }; + _ = try expr(gz, scope, elem_ri, elem_init); + } + return .void_value; + }, + } +} + +/// An array initialization expression using an `array_init_anon` instruction. +fn arrayInitExprAnon( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + elements: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ + .operands_len = @intCast(elements.len), + }); + var extra_index = try reserveExtra(astgen, elements.len); + + for (elements) |elem_init| { + const elem_ref = try expr(gz, scope, .{ .rl = .none }, elem_init); + astgen.extra.items[extra_index] = @intFromEnum(elem_ref); + extra_index += 1; + } + return try gz.addPlNodePayloadIndex(.array_init_anon, node, payload_index); +} + +/// An array initialization expression using an `array_init` or `array_init_ref` instruction. +fn arrayInitExprTyped( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + elements: []const Ast.Node.Index, + ty_inst: Zir.Inst.Ref, + maybe_elem_ty_inst: Zir.Inst.Ref, + is_ref: bool, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const len = elements.len + 1; // +1 for type + const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ + .operands_len = @intCast(len), + }); + var extra_index = try reserveExtra(astgen, len); + astgen.extra.items[extra_index] = @intFromEnum(ty_inst); + extra_index += 1; + + if (maybe_elem_ty_inst != .none) { + const elem_ri: ResultInfo = .{ .rl = .{ .coerced_ty = maybe_elem_ty_inst } }; + for (elements) |elem_init| { + const elem_inst = try expr(gz, scope, elem_ri, elem_init); + astgen.extra.items[extra_index] = @intFromEnum(elem_inst); + extra_index += 1; + } + } else { + for (elements, 0..) |elem_init, i| { + const ri: ResultInfo = .{ .rl = .{ .coerced_ty = try gz.add(.{ + .tag = .array_init_elem_type, + .data = .{ .bin = .{ + .lhs = ty_inst, + .rhs = @enumFromInt(i), + } }, + }) } }; + + const elem_inst = try expr(gz, scope, ri, elem_init); + astgen.extra.items[extra_index] = @intFromEnum(elem_inst); + extra_index += 1; + } + } + + const tag: Zir.Inst.Tag = if (is_ref) .array_init_ref else .array_init; + return try gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +/// An array initialization expression using element pointers. +fn arrayInitExprPtr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + elements: []const Ast.Node.Index, + ptr_inst: Zir.Inst.Ref, +) InnerError!void { + const astgen = gz.astgen; + + const array_ptr_inst = try gz.addUnNode(.opt_eu_base_ptr_init, ptr_inst, node); + + const payload_index = try addExtra(astgen, Zir.Inst.Block{ + .body_len = @intCast(elements.len), + }); + var extra_index = try reserveExtra(astgen, elements.len); + + for (elements, 0..) |elem_init, i| { + const elem_ptr_inst = try gz.addPlNode(.array_init_elem_ptr, elem_init, Zir.Inst.ElemPtrImm{ + .ptr = array_ptr_inst, + .index = @intCast(i), + }); + astgen.extra.items[extra_index] = @intFromEnum(elem_ptr_inst.toIndex().?); + extra_index += 1; + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = elem_ptr_inst } } }, elem_init); + } + + _ = try gz.addPlNodePayloadIndex(.validate_ptr_array_init, node, payload_index); +} + +fn structInitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + if (struct_init.ast.type_expr == 0) { + if (struct_init.ast.fields.len == 0) { + // Anonymous init with no fields. + switch (ri.rl) { + .discard => return .void_value, + .ref_coerced_ty => |ptr_ty_inst| return gz.addUnNode(.struct_init_empty_ref_result, ptr_ty_inst, node), + .ty, .coerced_ty => |ty_inst| return gz.addUnNode(.struct_init_empty_result, ty_inst, node), + .ptr => { + // TODO: should we modify this to use RLS for the field stores here? + const ty_inst = (try ri.rl.resultType(gz, node)).?; + const val = try gz.addUnNode(.struct_init_empty_result, ty_inst, node); + return rvalue(gz, ri, val, node); + }, + .none, .ref, .inferred_ptr => { + return rvalue(gz, ri, .empty_struct, node); + }, + .destructure => |destructure| { + return astgen.failNodeNotes(node, "empty initializer cannot be destructured", .{}, &.{ + try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), + }); + }, + } + } + } else array: { + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const array_type: Ast.full.ArrayType = tree.fullArrayType(struct_init.ast.type_expr) orelse { + if (struct_init.ast.fields.len == 0) { + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); + return rvalue(gz, ri, result, node); + } + break :array; + }; + const is_inferred_array_len = node_tags[array_type.ast.elem_count] == .identifier and + // This intentionally does not support `@"_"` syntax. + mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_"); + if (struct_init.ast.fields.len == 0) { + if (is_inferred_array_len) { + const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); + const array_type_inst = if (array_type.ast.sentinel == 0) blk: { + break :blk try gz.addPlNode(.array_type, struct_init.ast.type_expr, Zir.Inst.Bin{ + .lhs = .zero_usize, + .rhs = elem_type, + }); + } else blk: { + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); + break :blk try gz.addPlNode( + .array_type_sentinel, + struct_init.ast.type_expr, + Zir.Inst.ArrayTypeSentinel{ + .len = .zero_usize, + .elem_type = elem_type, + .sentinel = sentinel, + }, + ); + }; + const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node); + return rvalue(gz, ri, result, node); + } + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); + return rvalue(gz, ri, result, node); + } else { + return astgen.failNode( + struct_init.ast.type_expr, + "initializing array with struct syntax", + .{}, + ); + } + } + + { + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + try duplicate_names.ensureTotalCapacity(@intCast(struct_init.ast.fields.len)); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + + for (struct_init.ast.fields) |field| { + const name_token = tree.firstToken(field) - 2; + const name_index = try astgen.identAsString(name_token); + + const gop = try duplicate_names.getOrPut(name_index); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, name_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, name_token); + } + } + + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate name here", .{})); + } + + try error_notes.append(try astgen.errNoteNode(node, "struct declared here", .{})); + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate struct field name", + .{}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + } + + if (struct_init.ast.type_expr != 0) { + // Typed inits do not use RLS for language simplicity. + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); + switch (ri.rl) { + .ref => return structInitExprTyped(gz, scope, node, struct_init, ty_inst, true), + else => { + const struct_inst = try structInitExprTyped(gz, scope, node, struct_init, ty_inst, false); + return rvalue(gz, ri, struct_inst, node); + }, + } + } + + switch (ri.rl) { + .none => return structInitExprAnon(gz, scope, node, struct_init), + .discard => { + // Even if discarding we must perform side-effects. + for (struct_init.ast.fields) |field_init| { + _ = try expr(gz, scope, .{ .rl = .discard }, field_init); + } + return .void_value; + }, + .ref => { + const result = try structInitExprAnon(gz, scope, node, struct_init); + return gz.addUnTok(.ref, result, tree.firstToken(node)); + }, + .ref_coerced_ty => |ptr_ty_inst| { + const result_ty_inst = try gz.addUnNode(.elem_type, ptr_ty_inst, node); + _ = try gz.addUnNode(.validate_struct_init_result_ty, result_ty_inst, node); + return structInitExprTyped(gz, scope, node, struct_init, result_ty_inst, true); + }, + .ty, .coerced_ty => |result_ty_inst| { + _ = try gz.addUnNode(.validate_struct_init_result_ty, result_ty_inst, node); + return structInitExprTyped(gz, scope, node, struct_init, result_ty_inst, false); + }, + .ptr => |ptr| { + try structInitExprPtr(gz, scope, node, struct_init, ptr.inst); + return .void_value; + }, + .inferred_ptr => { + // We can't get field pointers of an untyped inferred alloc, so must perform a + // standard anonymous initialization followed by an rvalue store. + // See corresponding logic in arrayInitExpr. + const struct_inst = try structInitExprAnon(gz, scope, node, struct_init); + return rvalue(gz, ri, struct_inst, node); + }, + .destructure => |destructure| { + // This is an untyped init, so is an actual struct, which does + // not support destructuring. + return astgen.failNodeNotes(node, "struct value cannot be destructured", .{}, &.{ + try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), + }); + }, + } +} + +/// A struct initialization expression using a `struct_init_anon` instruction. +fn structInitExprAnon( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ + .fields_len = @intCast(struct_init.ast.fields.len), + }); + const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len; + var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + setExtra(astgen, extra_index, Zir.Inst.StructInitAnon.Item{ + .field_name = str_index, + .init = try expr(gz, scope, .{ .rl = .none }, field_init), + }); + extra_index += field_size; + } + + return gz.addPlNodePayloadIndex(.struct_init_anon, node, payload_index); +} + +/// A struct initialization expression using a `struct_init` or `struct_init_ref` instruction. +fn structInitExprTyped( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + ty_inst: Zir.Inst.Ref, + is_ref: bool, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ + .fields_len = @intCast(struct_init.ast.fields.len), + }); + const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len; + var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + const field_ty_inst = try gz.addPlNode(.struct_init_field_type, field_init, Zir.Inst.FieldType{ + .container_type = ty_inst, + .name_start = str_index, + }); + setExtra(astgen, extra_index, Zir.Inst.StructInit.Item{ + .field_type = field_ty_inst.toIndex().?, + .init = try expr(gz, scope, .{ .rl = .{ .coerced_ty = field_ty_inst } }, field_init), + }); + extra_index += field_size; + } + + const tag: Zir.Inst.Tag = if (is_ref) .struct_init_ref else .struct_init; + return gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +/// A struct initialization expression using field pointers. +fn structInitExprPtr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + ptr_inst: Zir.Inst.Ref, +) InnerError!void { + const astgen = gz.astgen; + const tree = astgen.tree; + + const struct_ptr_inst = try gz.addUnNode(.opt_eu_base_ptr_init, ptr_inst, node); + + const payload_index = try addExtra(astgen, Zir.Inst.Block{ + .body_len = @intCast(struct_init.ast.fields.len), + }); + var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + const field_ptr = try gz.addPlNode(.struct_init_field_ptr, field_init, Zir.Inst.Field{ + .lhs = struct_ptr_inst, + .field_name_start = str_index, + }); + astgen.extra.items[extra_index] = @intFromEnum(field_ptr.toIndex().?); + extra_index += 1; + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = field_ptr } } }, field_init); + } + + _ = try gz.addPlNodePayloadIndex(.validate_ptr_struct_init, node, payload_index); +} + +/// This explicitly calls expr in a comptime scope by wrapping it in a `block_comptime` if +/// necessary. It should be used whenever we need to force compile-time evaluation of something, +/// such as a type. +/// The function corresponding to `comptime` expression syntax is `comptimeExprAst`. +fn comptimeExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + if (gz.is_comptime) { + // No need to change anything! + return expr(gz, scope, ri, node); + } + + // There's an optimization here: if the body will be evaluated at comptime regardless, there's + // no need to wrap it in a block. This is hard to determine in general, but we can identify a + // common subset of trivially comptime expressions to take down the size of the ZIR a bit. + const tree = gz.astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_tags = tree.nodes.items(.tag); + switch (node_tags[node]) { + // Any identifier in `primitive_instrs` is trivially comptime. In particular, this includes + // some common types, so we can elide `block_comptime` for a few common type annotations. + .identifier => { + const ident_token = main_tokens[node]; + const ident_name_raw = tree.tokenSlice(ident_token); + if (primitive_instrs.get(ident_name_raw)) |zir_const_ref| { + // No need to worry about result location here, we're not creating a comptime block! + return rvalue(gz, ri, zir_const_ref, node); + } + }, + + // We can also avoid the block for a few trivial AST tags which are always comptime-known. + .number_literal, .string_literal, .multiline_string_literal, .enum_literal, .error_value => { + // No need to worry about result location here, we're not creating a comptime block! + return expr(gz, scope, ri, node); + }, + + // Lastly, for labelled blocks, avoid emitting a labelled block directly inside this + // comptime block, because that would be silly! Note that we don't bother doing this for + // unlabelled blocks, since they don't generate blocks at comptime anyway (see `blockExpr`). + .block_two, .block_two_semicolon, .block, .block_semicolon => { + const token_tags = tree.tokens.items(.tag); + const lbrace = main_tokens[node]; + // Careful! We can't pass in the real result location here, since it may + // refer to runtime memory. A runtime-to-comptime boundary has to remove + // result location information, compute the result, and copy it to the true + // result location at runtime. We do this below as well. + const ty_only_ri: ResultInfo = .{ + .ctx = ri.ctx, + .rl = if (try ri.rl.resultType(gz, node)) |res_ty| + .{ .coerced_ty = res_ty } + else + .none, + }; + if (token_tags[lbrace - 1] == .colon and + token_tags[lbrace - 2] == .identifier) + { + const node_datas = tree.nodes.items(.data); + switch (node_tags[node]) { + .block_two, .block_two_semicolon => { + const stmts: [2]Ast.Node.Index = .{ node_datas[node].lhs, node_datas[node].rhs }; + const stmt_slice = if (stmts[0] == 0) + stmts[0..0] + else if (stmts[1] == 0) + stmts[0..1] + else + stmts[0..2]; + + const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmt_slice, true); + return rvalue(gz, ri, block_ref, node); + }, + .block, .block_semicolon => { + const stmts = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + // Replace result location and copy back later - see above. + const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmts, true); + return rvalue(gz, ri, block_ref, node); + }, + else => unreachable, + } + } + }, + + // In other cases, we don't optimize anything - we need a wrapper comptime block. + else => {}, + } + + var block_scope = gz.makeSubBlock(scope); + block_scope.is_comptime = true; + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.block_comptime, node); + // Replace result location and copy back later - see above. + const ty_only_ri: ResultInfo = .{ + .ctx = ri.ctx, + .rl = if (try ri.rl.resultType(gz, node)) |res_ty| + .{ .coerced_ty = res_ty } + else + .none, + }; + const block_result = try expr(&block_scope, scope, ty_only_ri, node); + if (!gz.refIsNoReturn(block_result)) { + _ = try block_scope.addBreak(.@"break", block_inst, block_result); + } + try block_scope.setBlockBody(block_inst); + try gz.instructions.append(gz.astgen.gpa, block_inst); + + return rvalue(gz, ri, block_inst.toRef(), node); +} + +/// This one is for an actual `comptime` syntax, and will emit a compile error if +/// the scope is already known to be comptime-evaluated. +/// See `comptimeExpr` for the helper function for calling expr in a comptime scope. +fn comptimeExprAst( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (gz.is_comptime) { + return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); + } + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + return comptimeExpr(gz, scope, ri, body_node); +} + +/// Restore the error return trace index. Performs the restore only if the result is a non-error or +/// if the result location is a non-error-handling expression. +fn restoreErrRetIndex( + gz: *GenZir, + bt: GenZir.BranchTarget, + ri: ResultInfo, + node: Ast.Node.Index, + result: Zir.Inst.Ref, +) !void { + const op = switch (nodeMayEvalToError(gz.astgen.tree, node)) { + .always => return, // never restore/pop + .never => .none, // always restore/pop + .maybe => switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => switch (ri.rl) { + .ptr => |ptr_res| try gz.addUnNode(.load, ptr_res.inst, node), + .inferred_ptr => blk: { + // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr + // before its type has been resolved. There is no valid operand to use here, so error + // traces will be popped prematurely. + // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. + break :blk .none; + }, + .destructure => return, // value must be a tuple or array, so never restore/pop + else => result, + }, + else => .none, // always restore/pop + }, + }; + _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }, node); +} + +fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const break_label = node_datas[node].lhs; + const rhs = node_datas[node].rhs; + + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const block_gz = scope.cast(GenZir).?; + + if (block_gz.cur_defer_node != 0) { + // We are breaking out of a `defer` block. + return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{ + try astgen.errNoteNode( + block_gz.cur_defer_node, + "defer expression here", + .{}, + ), + }); + } + + const block_inst = blk: { + if (break_label != 0) { + if (block_gz.label) |*label| { + if (try astgen.tokenIdentEql(label.token, break_label)) { + label.used = true; + break :blk label.block_inst; + } + } + } else if (block_gz.break_block.unwrap()) |i| { + break :blk i; + } + // If not the target, start over with the parent + scope = block_gz.parent; + continue; + }; + // If we made it here, this block is the target of the break expr + + const break_tag: Zir.Inst.Tag = if (block_gz.is_inline) + .break_inline + else + .@"break"; + + if (rhs == 0) { + _ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node); + + try genDefers(parent_gz, scope, parent_scope, .normal_only); + + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.is_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, node); + + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + return Zir.Inst.Ref.unreachable_value; + } + + const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); + + try genDefers(parent_gz, scope, parent_scope, .normal_only); + + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.is_comptime) + try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand); + + switch (block_gz.break_result_info.rl) { + .ptr => { + // In this case we don't have any mechanism to intercept it; + // we assume the result location is written, and we break with void. + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + }, + .discard => { + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + }, + else => { + _ = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs); + }, + } + return Zir.Inst.Ref.unreachable_value; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .namespace, .enum_namespace => break, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .top => unreachable, + } + } + if (break_label != 0) { + const label_name = try astgen.identifierTokenString(break_label); + return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); + } else { + return astgen.failNode(node, "break expression outside loop", .{}); + } +} + +fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const break_label = node_datas[node].lhs; + + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const gen_zir = scope.cast(GenZir).?; + + if (gen_zir.cur_defer_node != 0) { + return astgen.failNodeNotes(node, "cannot continue out of defer expression", .{}, &.{ + try astgen.errNoteNode( + gen_zir.cur_defer_node, + "defer expression here", + .{}, + ), + }); + } + const continue_block = gen_zir.continue_block.unwrap() orelse { + scope = gen_zir.parent; + continue; + }; + if (break_label != 0) blk: { + if (gen_zir.label) |*label| { + if (try astgen.tokenIdentEql(label.token, break_label)) { + label.used = true; + break :blk; + } + } + // found continue but either it has a different label, or no label + scope = gen_zir.parent; + continue; + } + + const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline) + .break_inline + else + .@"break"; + if (break_tag == .break_inline) { + _ = try parent_gz.addUnNode(.check_comptime_control_flow, continue_block.toRef(), node); + } + + // As our last action before the continue, "pop" the error trace if needed + if (!gen_zir.is_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node); + + _ = try parent_gz.addBreak(break_tag, continue_block, .void_value); + return Zir.Inst.Ref.unreachable_value; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + try parent_gz.addDefer(defer_scope.index, defer_scope.len); + }, + .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => break, + .top => unreachable, + } + } + if (break_label != 0) { + const label_name = try astgen.identifierTokenString(break_label); + return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); + } else { + return astgen.failNode(node, "continue expression outside loop", .{}); + } +} + +fn blockExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + block_node: Ast.Node.Index, + statements: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const lbrace = main_tokens[block_node]; + if (token_tags[lbrace - 1] == .colon and + token_tags[lbrace - 2] == .identifier) + { + return labeledBlockExpr(gz, scope, ri, block_node, statements, false); + } + + if (!gz.is_comptime) { + // Since this block is unlabeled, its control flow is effectively linear and we + // can *almost* get away with inlining the block here. However, we actually need + // to preserve the .block for Sema, to properly pop the error return trace. + + const block_tag: Zir.Inst.Tag = .block; + const block_inst = try gz.makeBlockInst(block_tag, block_node); + try gz.instructions.append(astgen.gpa, block_inst); + + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + try blockExprStmts(&block_scope, &block_scope.base, statements); + + if (!block_scope.endsWithNoReturn()) { + // As our last action before the break, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); + _ = try block_scope.addBreak(.@"break", block_inst, .void_value); + } + + try block_scope.setBlockBody(block_inst); + } else { + var sub_gz = gz.makeSubBlock(scope); + try blockExprStmts(&sub_gz, &sub_gz.base, statements); + } + + return rvalue(gz, ri, .void_value, block_node); +} + +fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void { + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const gen_zir = scope.cast(GenZir).?; + if (gen_zir.label) |prev_label| { + if (try astgen.tokenIdentEql(label, prev_label.token)) { + const label_name = try astgen.identifierTokenString(label); + return astgen.failTokNotes(label, "redefinition of label '{s}'", .{ + label_name, + }, &[_]u32{ + try astgen.errNoteTok( + prev_label.token, + "previous definition here", + .{}, + ), + }); + } + } + scope = gen_zir.parent; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => break, + .top => unreachable, + } + } +} + +fn labeledBlockExpr( + gz: *GenZir, + parent_scope: *Scope, + ri: ResultInfo, + block_node: Ast.Node.Index, + statements: []const Ast.Node.Index, + force_comptime: bool, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const lbrace = main_tokens[block_node]; + const label_token = lbrace - 2; + assert(token_tags[label_token] == .identifier); + + try astgen.checkLabelRedefinition(parent_scope, label_token); + + const need_rl = astgen.nodes_need_rl.contains(block_node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(gz, block_node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + // Reserve the Block ZIR instruction index so that we can put it into the GenZir struct + // so that break statements can reference it. + const block_tag: Zir.Inst.Tag = if (force_comptime) .block_comptime else .block; + const block_inst = try gz.makeBlockInst(block_tag, block_node); + try gz.instructions.append(astgen.gpa, block_inst); + var block_scope = gz.makeSubBlock(parent_scope); + block_scope.label = GenZir.Label{ + .token = label_token, + .block_inst = block_inst, + }; + block_scope.setBreakResultInfo(block_ri); + if (force_comptime) block_scope.is_comptime = true; + defer block_scope.unstack(); + + try blockExprStmts(&block_scope, &block_scope.base, statements); + if (!block_scope.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); + _ = try block_scope.addBreak(.@"break", block_inst, .void_value); + } + + if (!block_scope.label.?.used) { + try astgen.appendErrorTok(label_token, "unused block label", .{}); + } + + try block_scope.setBlockBody(block_inst); + if (need_result_rvalue) { + return rvalue(gz, ri, block_inst.toRef(), block_node); + } else { + return block_inst.toRef(); + } +} + +fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Node.Index) !void { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_data = tree.nodes.items(.data); + + if (statements.len == 0) return; + + var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); + defer block_arena.deinit(); + const block_arena_allocator = block_arena.allocator(); + + var noreturn_src_node: Ast.Node.Index = 0; + var scope = parent_scope; + for (statements) |statement| { + if (noreturn_src_node != 0) { + try astgen.appendErrorNodeNotes( + statement, + "unreachable code", + .{}, + &[_]u32{ + try astgen.errNoteNode( + noreturn_src_node, + "control flow is diverted here", + .{}, + ), + }, + ); + } + var inner_node = statement; + while (true) { + switch (node_tags[inner_node]) { + // zig fmt: off + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.fullVarDecl(statement).?), + + .assign_destructure => scope = try assignDestructureMaybeDecls(gz, scope, statement, block_arena_allocator), + + .@"defer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_normal), + .@"errdefer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_error), + + .assign => try assign(gz, scope, statement), + + .assign_shl => try assignShift(gz, scope, statement, .shl), + .assign_shr => try assignShift(gz, scope, statement, .shr), + + .assign_bit_and => try assignOp(gz, scope, statement, .bit_and), + .assign_bit_or => try assignOp(gz, scope, statement, .bit_or), + .assign_bit_xor => try assignOp(gz, scope, statement, .xor), + .assign_div => try assignOp(gz, scope, statement, .div), + .assign_sub => try assignOp(gz, scope, statement, .sub), + .assign_sub_wrap => try assignOp(gz, scope, statement, .subwrap), + .assign_mod => try assignOp(gz, scope, statement, .mod_rem), + .assign_add => try assignOp(gz, scope, statement, .add), + .assign_add_wrap => try assignOp(gz, scope, statement, .addwrap), + .assign_mul => try assignOp(gz, scope, statement, .mul), + .assign_mul_wrap => try assignOp(gz, scope, statement, .mulwrap), + + .grouped_expression => { + inner_node = node_data[statement].lhs; + continue; + }, + + .while_simple, + .while_cont, + .@"while", => _ = try whileExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullWhile(inner_node).?, true), + + .for_simple, + .@"for", => _ = try forExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullFor(inner_node).?, true), + + else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node), + // zig fmt: on + } + break; + } + } + + try genDefers(gz, parent_scope, scope, .normal_only); + try checkUsed(gz, parent_scope, scope); +} + +/// Returns AST source node of the thing that is noreturn if the statement is +/// definitely `noreturn`. Otherwise returns 0. +fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) InnerError!Ast.Node.Index { + try emitDbgNode(gz, statement); + // We need to emit an error if the result is not `noreturn` or `void`, but + // we want to avoid adding the ZIR instruction if possible for performance. + const maybe_unused_result = try expr(gz, scope, .{ .rl = .none }, statement); + return addEnsureResult(gz, maybe_unused_result, statement); +} + +fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: Ast.Node.Index) InnerError!Ast.Node.Index { + var noreturn_src_node: Ast.Node.Index = 0; + const elide_check = if (maybe_unused_result.toIndex()) |inst| b: { + // Note that this array becomes invalid after appending more items to it + // in the above while loop. + const zir_tags = gz.astgen.instructions.items(.tag); + switch (zir_tags[@intFromEnum(inst)]) { + // For some instructions, modify the zir data + // so we can avoid a separate ensure_result_used instruction. + .call, .field_call => { + const break_extra = gz.astgen.instructions.items(.data)[@intFromEnum(inst)].pl_node.payload_index; + comptime assert(std.meta.fieldIndex(Zir.Inst.Call, "flags") == + std.meta.fieldIndex(Zir.Inst.FieldCall, "flags")); + const flags: *Zir.Inst.Call.Flags = @ptrCast(&gz.astgen.extra.items[ + break_extra + std.meta.fieldIndex(Zir.Inst.Call, "flags").? + ]); + flags.ensure_result_used = true; + break :b true; + }, + .builtin_call => { + const break_extra = gz.astgen.instructions.items(.data)[@intFromEnum(inst)].pl_node.payload_index; + const flags: *Zir.Inst.BuiltinCall.Flags = @ptrCast(&gz.astgen.extra.items[ + break_extra + std.meta.fieldIndex(Zir.Inst.BuiltinCall, "flags").? + ]); + flags.ensure_result_used = true; + break :b true; + }, + + // ZIR instructions that might be a type other than `noreturn` or `void`. + .add, + .addwrap, + .add_sat, + .add_unsafe, + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .elem_type, + .indexable_ptr_elem_type, + .vector_elem_type, + .vector_type, + .indexable_ptr_len, + .anyframe_type, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_comptime, + .block_inline, + .declaration, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_val_node, + .elem_val_imm, + .field_ptr, + .field_val, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .slice_length, + .import, + .switch_block, + .switch_block_ref, + .switch_block_err_union, + .union_init, + .field_type_ref, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .enum_from_int, + .int_from_enum, + .type_info, + .size_of, + .bit_size_of, + .typeof_log2_int_type, + .int_from_ptr, + .align_of, + .int_from_bool, + .embed_file, + .error_name, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .abs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .int_from_float, + .float_from_int, + .ptr_from_int, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .has_decl, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .mul_add, + .field_parent_ptr, + .max, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .closure_get, + .ret_ptr, + .ret_type, + .for_len, + .@"try", + .try_ptr, + .opt_eu_base_ptr_init, + .coerce_ptr_elem_ty, + .struct_init_empty, + .struct_init_empty_result, + .struct_init_empty_ref_result, + .struct_init_anon, + .struct_init, + .struct_init_ref, + .struct_init_field_type, + .struct_init_field_ptr, + .array_init_anon, + .array_init, + .array_init_ref, + .validate_array_init_ref_ty, + .array_init_elem_type, + .array_init_elem_ptr, + => break :b false, + + .extended => switch (gz.astgen.instructions.items(.data)[@intFromEnum(inst)].extended.opcode) { + .breakpoint, + .fence, + .set_float_mode, + .set_align_stack, + .set_cold, + => break :b true, + else => break :b false, + }, + + // ZIR instructions that are always `noreturn`. + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .trap, + .check_comptime_control_flow, + => { + noreturn_src_node = statement; + break :b true; + }, + + // ZIR instructions that are always `void`. + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .@"export", + .export_value, + .set_eval_branch_quota, + .atomic_store, + .store_node, + .store_to_inferred_ptr, + .resolve_inferred_alloc, + .set_runtime_safety, + .closure_capture, + .memcpy, + .memset, + .validate_deref, + .validate_destructure, + .save_err_ret_index, + .restore_err_ret_index_unconditional, + .restore_err_ret_index_fn_entry, + .validate_struct_init_ty, + .validate_struct_init_result_ty, + .validate_ptr_struct_init, + .validate_array_init_ty, + .validate_array_init_result_ty, + .validate_ptr_array_init, + .validate_ref_ty, + => break :b true, + + .@"defer" => unreachable, + .defer_err_code => unreachable, + } + } else switch (maybe_unused_result) { + .none => unreachable, + + .unreachable_value => b: { + noreturn_src_node = statement; + break :b true; + }, + + .void_value => true, + + else => false, + }; + if (!elide_check) { + _ = try gz.addUnNode(.ensure_result_used, maybe_unused_result, statement); + } + return noreturn_src_node; +} + +fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct { + have_any: bool, + have_normal: bool, + have_err: bool, + need_err_code: bool, +} { + var have_normal = false; + var have_err = false; + var need_err_code = false; + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_normal = true; + }, + .defer_error => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_err = true; + + const have_err_payload = defer_scope.remapped_err_code != .none; + need_err_code = need_err_code or have_err_payload; + }, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } + return .{ + .have_any = have_normal or have_err, + .have_normal = have_normal, + .have_err = have_err, + .need_err_code = need_err_code, + }; +} + +const DefersToEmit = union(enum) { + both: Zir.Inst.Ref, // err code + both_sans_err, + normal_only, +}; + +fn genDefers( + gz: *GenZir, + outer_scope: *Scope, + inner_scope: *Scope, + which_ones: DefersToEmit, +) InnerError!void { + const gpa = gz.astgen.gpa; + + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + try gz.addDefer(defer_scope.index, defer_scope.len); + }, + .defer_error => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + switch (which_ones) { + .both_sans_err => { + try gz.addDefer(defer_scope.index, defer_scope.len); + }, + .both => |err_code| { + if (defer_scope.remapped_err_code.unwrap()) |remapped_err_code| { + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(Zir.Inst.DeferErrCode{ + .remapped_err_code = remapped_err_code, + .index = defer_scope.index, + .len = defer_scope.len, + }); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .defer_err_code, + .data = .{ .defer_err_code = .{ + .err_code = err_code, + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + } else { + try gz.addDefer(defer_scope.index, defer_scope.len); + } + }, + .normal_only => continue, + } + }, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } +} + +fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!void { + const astgen = gz.astgen; + + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => { + const s = scope.cast(Scope.LocalVal).?; + if (s.used == 0 and s.discarded == 0) { + try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); + } else if (s.used != 0 and s.discarded != 0) { + try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ + try gz.astgen.errNoteTok(s.used, "used here", .{}), + }); + } + scope = s.parent; + }, + .local_ptr => { + const s = scope.cast(Scope.LocalPtr).?; + if (s.used == 0 and s.discarded == 0) { + try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); + } else { + if (s.used != 0 and s.discarded != 0) { + try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ + try astgen.errNoteTok(s.used, "used here", .{}), + }); + } + if (s.id_cat == .@"local variable" and !s.used_as_lvalue) { + try astgen.appendErrorTokNotes(s.token_src, "local variable is never mutated", .{}, &.{ + try astgen.errNoteTok(s.token_src, "consider using 'const'", .{}), + }); + } + } + + scope = s.parent; + }, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } +} + +fn deferStmt( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + block_arena: Allocator, + scope_tag: Scope.Tag, +) InnerError!*Scope { + var defer_gen = gz.makeSubBlock(scope); + defer_gen.cur_defer_node = node; + defer_gen.any_defer_node = node; + defer defer_gen.unstack(); + + const tree = gz.astgen.tree; + const node_datas = tree.nodes.items(.data); + const expr_node = node_datas[node].rhs; + + const payload_token = node_datas[node].lhs; + var local_val_scope: Scope.LocalVal = undefined; + var opt_remapped_err_code: Zir.Inst.OptionalIndex = .none; + const have_err_code = scope_tag == .defer_error and payload_token != 0; + const sub_scope = if (!have_err_code) &defer_gen.base else blk: { + const ident_name = try gz.astgen.identAsString(payload_token); + const remapped_err_code: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + opt_remapped_err_code = remapped_err_code.toOptional(); + try gz.astgen.instructions.append(gz.astgen.gpa, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .value_placeholder, + .small = undefined, + .operand = undefined, + } }, + }); + const remapped_err_code_ref = remapped_err_code.toRef(); + local_val_scope = .{ + .parent = &defer_gen.base, + .gen_zir = gz, + .name = ident_name, + .inst = remapped_err_code_ref, + .token_src = payload_token, + .id_cat = .capture, + }; + try gz.addDbgVar(.dbg_var_val, ident_name, remapped_err_code_ref); + break :blk &local_val_scope.base; + }; + _ = try unusedResultExpr(&defer_gen, sub_scope, expr_node); + try checkUsed(gz, scope, sub_scope); + _ = try defer_gen.addBreak(.break_inline, @enumFromInt(0), .void_value); + + // We must handle ref_table for remapped_err_code manually. + const body = defer_gen.instructionsSlice(); + const body_len = blk: { + var refs: u32 = 0; + if (opt_remapped_err_code.unwrap()) |remapped_err_code| { + var cur_inst = remapped_err_code; + while (gz.astgen.ref_table.get(cur_inst)) |ref_inst| { + refs += 1; + cur_inst = ref_inst; + } + } + break :blk gz.astgen.countBodyLenAfterFixups(body) + refs; + }; + + const index: u32 = @intCast(gz.astgen.extra.items.len); + try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len); + if (opt_remapped_err_code.unwrap()) |remapped_err_code| { + if (gz.astgen.ref_table.fetchRemove(remapped_err_code)) |kv| { + gz.astgen.appendPossiblyRefdBodyInst(&gz.astgen.extra, kv.value); + } + } + gz.astgen.appendBodyWithFixups(body); + + const defer_scope = try block_arena.create(Scope.Defer); + + defer_scope.* = .{ + .base = .{ .tag = scope_tag }, + .parent = scope, + .index = index, + .len = body_len, + .remapped_err_code = opt_remapped_err_code, + }; + return &defer_scope.base; +} + +fn varDecl( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + block_arena: Allocator, + var_decl: Ast.full.VarDecl, +) InnerError!*Scope { + try emitDbgNode(gz, node); + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + const name_token = var_decl.ast.mut_token + 1; + const ident_name_raw = tree.tokenSlice(name_token); + if (mem.eql(u8, ident_name_raw, "_")) { + return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); + } + const ident_name = try astgen.identAsString(name_token); + + try astgen.detectLocalShadowing( + scope, + ident_name, + name_token, + ident_name_raw, + if (token_tags[var_decl.ast.mut_token] == .keyword_const) .@"local constant" else .@"local variable", + ); + + if (var_decl.ast.init_node == 0) { + return astgen.failNode(node, "variables must be initialized", .{}); + } + + if (var_decl.ast.addrspace_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); + } + + if (var_decl.ast.section_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); + } + + const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) + try expr(gz, scope, coerced_align_ri, var_decl.ast.align_node) + else + .none; + + switch (token_tags[var_decl.ast.mut_token]) { + .keyword_const => { + if (var_decl.comptime_token) |comptime_token| { + try astgen.appendErrorTok(comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); + } + + // Depending on the type of AST the initialization expression is, we may need an lvalue + // or an rvalue as a result location. If it is an rvalue, we can use the instruction as + // the variable, no memory location needed. + const type_node = var_decl.ast.type_node; + if (align_inst == .none and + !astgen.nodes_need_rl.contains(node)) + { + const result_info: ResultInfo = if (type_node != 0) .{ + .rl = .{ .ty = try typeExpr(gz, scope, type_node) }, + .ctx = .const_init, + } else .{ .rl = .none, .ctx = .const_init }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + const init_inst = try reachableExpr(gz, scope, result_info, var_decl.ast.init_node, node); + gz.anon_name_strategy = prev_anon_name_strategy; + + try gz.addDbgVar(.dbg_var_val, ident_name, init_inst); + + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + + const sub_scope = try block_arena.create(Scope.LocalVal); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .inst = init_inst, + .token_src = name_token, + .id_cat = .@"local constant", + }; + return &sub_scope.base; + } + + const is_comptime = gz.is_comptime or + tree.nodes.items(.tag)[var_decl.ast.init_node] == .@"comptime"; + + var resolve_inferred_alloc: Zir.Inst.Ref = .none; + var opt_type_inst: Zir.Inst.Ref = .none; + const init_rl: ResultInfo.Loc = if (type_node != 0) init_rl: { + const type_inst = try typeExpr(gz, scope, type_node); + opt_type_inst = type_inst; + if (align_inst == .none) { + break :init_rl .{ .ptr = .{ .inst = try gz.addUnNode(.alloc, type_inst, node) } }; + } else { + break :init_rl .{ .ptr = .{ .inst = try gz.addAllocExtended(.{ + .node = node, + .type_inst = type_inst, + .align_inst = align_inst, + .is_const = true, + .is_comptime = is_comptime, + }) } }; + } + } else init_rl: { + const alloc_inst = if (align_inst == .none) ptr: { + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_inferred_comptime + else + .alloc_inferred; + break :ptr try gz.addNode(tag, node); + } else ptr: { + break :ptr try gz.addAllocExtended(.{ + .node = node, + .type_inst = .none, + .align_inst = align_inst, + .is_const = true, + .is_comptime = is_comptime, + }); + }; + resolve_inferred_alloc = alloc_inst; + break :init_rl .{ .inferred_ptr = alloc_inst }; + }; + const var_ptr = switch (init_rl) { + .ptr => |ptr| ptr.inst, + .inferred_ptr => |inst| inst, + else => unreachable, + }; + const init_result_info: ResultInfo = .{ .rl = init_rl, .ctx = .const_init }; + + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + defer gz.anon_name_strategy = prev_anon_name_strategy; + const init_inst = try reachableExpr(gz, scope, init_result_info, var_decl.ast.init_node, node); + + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + + const const_ptr = if (resolve_inferred_alloc != .none) p: { + _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); + break :p var_ptr; + } else try gz.addUnNode(.make_ptr_const, var_ptr, node); + + try gz.addDbgVar(.dbg_var_ptr, ident_name, const_ptr); + + const sub_scope = try block_arena.create(Scope.LocalPtr); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .ptr = const_ptr, + .token_src = name_token, + .maybe_comptime = true, + .id_cat = .@"local constant", + }; + return &sub_scope.base; + }, + .keyword_var => { + if (var_decl.comptime_token != null and gz.is_comptime) + return astgen.failTok(var_decl.comptime_token.?, "'comptime var' is redundant in comptime scope", .{}); + const is_comptime = var_decl.comptime_token != null or gz.is_comptime; + var resolve_inferred_alloc: Zir.Inst.Ref = .none; + const alloc: Zir.Inst.Ref, const result_info: ResultInfo = if (var_decl.ast.type_node != 0) a: { + const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node); + const alloc = alloc: { + if (align_inst == .none) { + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_comptime_mut + else + .alloc_mut; + break :alloc try gz.addUnNode(tag, type_inst, node); + } else { + break :alloc try gz.addAllocExtended(.{ + .node = node, + .type_inst = type_inst, + .align_inst = align_inst, + .is_const = false, + .is_comptime = is_comptime, + }); + } + }; + break :a .{ alloc, .{ .rl = .{ .ptr = .{ .inst = alloc } } } }; + } else a: { + const alloc = alloc: { + if (align_inst == .none) { + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_inferred_comptime_mut + else + .alloc_inferred_mut; + break :alloc try gz.addNode(tag, node); + } else { + break :alloc try gz.addAllocExtended(.{ + .node = node, + .type_inst = .none, + .align_inst = align_inst, + .is_const = false, + .is_comptime = is_comptime, + }); + } + }; + resolve_inferred_alloc = alloc; + break :a .{ alloc, .{ .rl = .{ .inferred_ptr = alloc } } }; + }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + _ = try reachableExprComptime(gz, scope, result_info, var_decl.ast.init_node, node, is_comptime); + gz.anon_name_strategy = prev_anon_name_strategy; + if (resolve_inferred_alloc != .none) { + _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); + } + + try gz.addDbgVar(.dbg_var_ptr, ident_name, alloc); + + const sub_scope = try block_arena.create(Scope.LocalPtr); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .ptr = alloc, + .token_src = name_token, + .maybe_comptime = is_comptime, + .id_cat = .@"local variable", + }; + return &sub_scope.base; + }, + else => unreachable, + } +} + +fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void { + // The instruction emitted here is for debugging runtime code. + // If the current block will be evaluated only during semantic analysis + // then no dbg_stmt ZIR instruction is needed. + if (gz.is_comptime) return; + const astgen = gz.astgen; + astgen.advanceSourceCursorToNode(node); + const line = astgen.source_line - gz.decl_line; + const column = astgen.source_column; + try emitDbgStmt(gz, .{ line, column }); +} + +fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const node_tags = tree.nodes.items(.tag); + + const lhs = node_datas[infix_node].lhs; + const rhs = node_datas[infix_node].rhs; + if (node_tags[lhs] == .identifier) { + // This intentionally does not support `@"_"` syntax. + const ident_name = tree.tokenSlice(main_tokens[lhs]); + if (mem.eql(u8, ident_name, "_")) { + _ = try expr(gz, scope, .{ .rl = .discard, .ctx = .assignment }, rhs); + return; + } + } + const lvalue = try lvalExpr(gz, scope, lhs); + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ + .inst = lvalue, + .src_node = infix_node, + } } }, rhs); +} + +/// Handles destructure assignments where no LHS is a `const` or `var` decl. +fn assignDestructure(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!void { + try emitDbgNode(gz, node); + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const node_tags = tree.nodes.items(.tag); + + const extra_index = node_datas[node].lhs; + const lhs_count = tree.extra_data[extra_index]; + const lhs_nodes: []const Ast.Node.Index = @ptrCast(tree.extra_data[extra_index + 1 ..][0..lhs_count]); + const rhs = node_datas[node].rhs; + + const maybe_comptime_token = tree.firstToken(node) - 1; + const declared_comptime = token_tags[maybe_comptime_token] == .keyword_comptime; + + if (declared_comptime and gz.is_comptime) { + return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); + } + + // If this expression is marked comptime, we must wrap the whole thing in a comptime block. + var gz_buf: GenZir = undefined; + const inner_gz = if (declared_comptime) bs: { + gz_buf = gz.makeSubBlock(scope); + gz_buf.is_comptime = true; + break :bs &gz_buf; + } else gz; + defer if (declared_comptime) inner_gz.unstack(); + + const rl_components = try astgen.arena.alloc(ResultInfo.Loc.DestructureComponent, lhs_nodes.len); + for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { + if (node_tags[lhs_node] == .identifier) { + // This intentionally does not support `@"_"` syntax. + const ident_name = tree.tokenSlice(main_tokens[lhs_node]); + if (mem.eql(u8, ident_name, "_")) { + lhs_rl.* = .discard; + continue; + } + } + lhs_rl.* = .{ .typed_ptr = .{ + .inst = try lvalExpr(inner_gz, scope, lhs_node), + .src_node = lhs_node, + } }; + } + + const ri: ResultInfo = .{ .rl = .{ .destructure = .{ + .src_node = node, + .components = rl_components, + } } }; + + _ = try expr(inner_gz, scope, ri, rhs); + + if (declared_comptime) { + const comptime_block_inst = try gz.makeBlockInst(.block_comptime, node); + _ = try inner_gz.addBreak(.@"break", comptime_block_inst, .void_value); + try inner_gz.setBlockBody(comptime_block_inst); + try gz.instructions.append(gz.astgen.gpa, comptime_block_inst); + } +} + +/// Handles destructure assignments where the LHS may contain `const` or `var` decls. +fn assignDestructureMaybeDecls( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + block_arena: Allocator, +) InnerError!*Scope { + try emitDbgNode(gz, node); + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const node_tags = tree.nodes.items(.tag); + + const extra_index = node_datas[node].lhs; + const lhs_count = tree.extra_data[extra_index]; + const lhs_nodes: []const Ast.Node.Index = @ptrCast(tree.extra_data[extra_index + 1 ..][0..lhs_count]); + const rhs = node_datas[node].rhs; + + const maybe_comptime_token = tree.firstToken(node) - 1; + const declared_comptime = token_tags[maybe_comptime_token] == .keyword_comptime; + if (declared_comptime and gz.is_comptime) { + return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); + } + + const is_comptime = declared_comptime or gz.is_comptime; + const rhs_is_comptime = tree.nodes.items(.tag)[rhs] == .@"comptime"; + + // When declaring consts via a destructure, we always use a result pointer. + // This avoids the need to create tuple types, and is also likely easier to + // optimize, since it's a bit tricky for the optimizer to "split up" the + // value into individual pointer writes down the line. + + // We know this rl information won't live past the evaluation of this + // expression, so it may as well go in the block arena. + const rl_components = try block_arena.alloc(ResultInfo.Loc.DestructureComponent, lhs_nodes.len); + var any_non_const_lhs = false; + var any_lvalue_expr = false; + for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { + switch (node_tags[lhs_node]) { + .identifier => { + // This intentionally does not support `@"_"` syntax. + const ident_name = tree.tokenSlice(main_tokens[lhs_node]); + if (mem.eql(u8, ident_name, "_")) { + any_non_const_lhs = true; + lhs_rl.* = .discard; + continue; + } + }, + .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl => { + const full = tree.fullVarDecl(lhs_node).?; + + const name_token = full.ast.mut_token + 1; + const ident_name_raw = tree.tokenSlice(name_token); + if (mem.eql(u8, ident_name_raw, "_")) { + return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); + } + + // We detect shadowing in the second pass over these, while we're creating scopes. + + if (full.ast.addrspace_node != 0) { + return astgen.failTok(main_tokens[full.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); + } + if (full.ast.section_node != 0) { + return astgen.failTok(main_tokens[full.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); + } + + const is_const = switch (token_tags[full.ast.mut_token]) { + .keyword_var => false, + .keyword_const => true, + else => unreachable, + }; + if (!is_const) any_non_const_lhs = true; + + // We also mark `const`s as comptime if the RHS is definitely comptime-known. + const this_lhs_comptime = is_comptime or (is_const and rhs_is_comptime); + + const align_inst: Zir.Inst.Ref = if (full.ast.align_node != 0) + try expr(gz, scope, coerced_align_ri, full.ast.align_node) + else + .none; + + if (full.ast.type_node != 0) { + // Typed alloc + const type_inst = try typeExpr(gz, scope, full.ast.type_node); + const ptr = if (align_inst == .none) ptr: { + const tag: Zir.Inst.Tag = if (is_const) + .alloc + else if (this_lhs_comptime) + .alloc_comptime_mut + else + .alloc_mut; + break :ptr try gz.addUnNode(tag, type_inst, node); + } else try gz.addAllocExtended(.{ + .node = node, + .type_inst = type_inst, + .align_inst = align_inst, + .is_const = is_const, + .is_comptime = this_lhs_comptime, + }); + lhs_rl.* = .{ .typed_ptr = .{ .inst = ptr } }; + } else { + // Inferred alloc + const ptr = if (align_inst == .none) ptr: { + const tag: Zir.Inst.Tag = if (is_const) tag: { + break :tag if (this_lhs_comptime) .alloc_inferred_comptime else .alloc_inferred; + } else tag: { + break :tag if (this_lhs_comptime) .alloc_inferred_comptime_mut else .alloc_inferred_mut; + }; + break :ptr try gz.addNode(tag, node); + } else try gz.addAllocExtended(.{ + .node = node, + .type_inst = .none, + .align_inst = align_inst, + .is_const = is_const, + .is_comptime = this_lhs_comptime, + }); + lhs_rl.* = .{ .inferred_ptr = ptr }; + } + + continue; + }, + else => {}, + } + // This LHS is just an lvalue expression. + // We will fill in its result pointer later, inside a comptime block. + any_non_const_lhs = true; + any_lvalue_expr = true; + lhs_rl.* = .{ .typed_ptr = .{ + .inst = undefined, + .src_node = lhs_node, + } }; + } + + if (declared_comptime and !any_non_const_lhs) { + try astgen.appendErrorTok(maybe_comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); + } + + // If this expression is marked comptime, we must wrap it in a comptime block. + var gz_buf: GenZir = undefined; + const inner_gz = if (declared_comptime) bs: { + gz_buf = gz.makeSubBlock(scope); + gz_buf.is_comptime = true; + break :bs &gz_buf; + } else gz; + defer if (declared_comptime) inner_gz.unstack(); + + if (any_lvalue_expr) { + // At least one LHS was an lvalue expr. Iterate again in order to + // evaluate the lvalues from within the possible block_comptime. + for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { + if (lhs_rl.* != .typed_ptr) continue; + switch (node_tags[lhs_node]) { + .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl => continue, + else => {}, + } + lhs_rl.typed_ptr.inst = try lvalExpr(inner_gz, scope, lhs_node); + } + } + + // We can't give a reasonable anon name strategy for destructured inits, so + // leave it at its default of `.anon`. + _ = try reachableExpr(inner_gz, scope, .{ .rl = .{ .destructure = .{ + .src_node = node, + .components = rl_components, + } } }, rhs, node); + + if (declared_comptime) { + // Finish the block_comptime. Inferred alloc resolution etc will occur + // in the parent block. + const comptime_block_inst = try gz.makeBlockInst(.block_comptime, node); + _ = try inner_gz.addBreak(.@"break", comptime_block_inst, .void_value); + try inner_gz.setBlockBody(comptime_block_inst); + try gz.instructions.append(gz.astgen.gpa, comptime_block_inst); + } + + // Now, iterate over the LHS exprs to construct any new scopes. + // If there were any inferred allocations, resolve them. + // If there were any `const` decls, make the pointer constant. + var cur_scope = scope; + for (rl_components, lhs_nodes) |lhs_rl, lhs_node| { + switch (node_tags[lhs_node]) { + .local_var_decl, .simple_var_decl, .aligned_var_decl => {}, + else => continue, // We were mutating an existing lvalue - nothing to do + } + const full = tree.fullVarDecl(lhs_node).?; + const raw_ptr = switch (lhs_rl) { + .discard => unreachable, + .typed_ptr => |typed_ptr| typed_ptr.inst, + .inferred_ptr => |ptr_inst| ptr_inst, + }; + // If the alloc was inferred, resolve it. + if (full.ast.type_node == 0) { + _ = try gz.addUnNode(.resolve_inferred_alloc, raw_ptr, lhs_node); + } + const is_const = switch (token_tags[full.ast.mut_token]) { + .keyword_var => false, + .keyword_const => true, + else => unreachable, + }; + // If the alloc was const, make it const. + const var_ptr = if (is_const and full.ast.type_node != 0) make_const: { + // Note that we don't do this if type_node == 0 since `resolve_inferred_alloc` + // handles it for us. + break :make_const try gz.addUnNode(.make_ptr_const, raw_ptr, node); + } else raw_ptr; + const name_token = full.ast.mut_token + 1; + const ident_name_raw = tree.tokenSlice(name_token); + const ident_name = try astgen.identAsString(name_token); + try astgen.detectLocalShadowing( + cur_scope, + ident_name, + name_token, + ident_name_raw, + if (is_const) .@"local constant" else .@"local variable", + ); + try gz.addDbgVar(.dbg_var_ptr, ident_name, var_ptr); + // Finally, create the scope. + const sub_scope = try block_arena.create(Scope.LocalPtr); + sub_scope.* = .{ + .parent = cur_scope, + .gen_zir = gz, + .name = ident_name, + .ptr = var_ptr, + .token_src = name_token, + .maybe_comptime = is_const or is_comptime, + .id_cat = if (is_const) .@"local constant" else .@"local variable", + }; + cur_scope = &sub_scope.base; + } + + return cur_scope; +} + +fn assignOp( + gz: *GenZir, + scope: *Scope, + infix_node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + + const cursor = switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => maybeAdvanceSourceCursorToMainToken(gz, infix_node), + else => undefined, + }; + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = lhs_type } }, node_datas[infix_node].rhs); + + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + try emitDbgStmt(gz, cursor); + }, + else => {}, + } + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); +} + +fn assignShift( + gz: *GenZir, + scope: *Scope, + infix_node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = rhs_type } }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); +} + +fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + // Saturating shift-left allows any integer type for both the LHS and RHS. + const rhs = try expr(gz, scope, .{ .rl = .none }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ + .lhs = lhs_ptr, + .rhs = result, + }); +} + +fn ptrType( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + ptr_info: Ast.full.PtrType, +) InnerError!Zir.Inst.Ref { + if (ptr_info.size == .C and ptr_info.allowzero_token != null) { + return gz.astgen.failTok(ptr_info.allowzero_token.?, "C pointers always allow address zero", .{}); + } + + const source_offset = gz.astgen.source_offset; + const source_line = gz.astgen.source_line; + const source_column = gz.astgen.source_column; + const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type); + + var sentinel_ref: Zir.Inst.Ref = .none; + var align_ref: Zir.Inst.Ref = .none; + var addrspace_ref: Zir.Inst.Ref = .none; + var bit_start_ref: Zir.Inst.Ref = .none; + var bit_end_ref: Zir.Inst.Ref = .none; + var trailing_count: u32 = 0; + + if (ptr_info.ast.sentinel != 0) { + // These attributes can appear in any order and they all come before the + // element type so we need to reset the source cursor before generating them. + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + sentinel_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel); + trailing_count += 1; + } + if (ptr_info.ast.addrspace_node != 0) { + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + addrspace_ref = try expr(gz, scope, coerced_addrspace_ri, ptr_info.ast.addrspace_node); + trailing_count += 1; + } + if (ptr_info.ast.align_node != 0) { + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + align_ref = try expr(gz, scope, coerced_align_ri, ptr_info.ast.align_node); + trailing_count += 1; + } + if (ptr_info.ast.bit_range_start != 0) { + assert(ptr_info.ast.bit_range_end != 0); + bit_start_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_start); + bit_end_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_end); + trailing_count += 2; + } + + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.PtrType).Struct.fields.len + + trailing_count); + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.PtrType{ + .elem_type = elem_type, + .src_node = gz.nodeIndexToRelative(node), + }); + if (sentinel_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@intFromEnum(sentinel_ref)); + } + if (align_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@intFromEnum(align_ref)); + } + if (addrspace_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@intFromEnum(addrspace_ref)); + } + if (bit_start_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_start_ref)); + gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref)); + } + + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + const result = new_index.toRef(); + gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ + .ptr_type = .{ + .flags = .{ + .is_allowzero = ptr_info.allowzero_token != null, + .is_mutable = ptr_info.const_token == null, + .is_volatile = ptr_info.volatile_token != null, + .has_sentinel = sentinel_ref != .none, + .has_align = align_ref != .none, + .has_addrspace = addrspace_ref != .none, + .has_bit_range = bit_start_ref != .none, + }, + .size = ptr_info.size, + .payload_index = payload_index, + }, + } }); + gz.instructions.appendAssumeCapacity(new_index); + + return rvalue(gz, ri, result, node); +} + +fn arrayType(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + const len_node = node_datas[node].lhs; + if (node_tags[len_node] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) + { + return astgen.failNode(len_node, "unable to infer array size", .{}); + } + const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node); + const elem_type = try typeExpr(gz, scope, node_datas[node].rhs); + + const result = try gz.addPlNode(.array_type, node, Zir.Inst.Bin{ + .lhs = len, + .rhs = elem_type, + }); + return rvalue(gz, ri, result, node); +} + +fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel); + + const len_node = node_datas[node].lhs; + if (node_tags[len_node] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) + { + return astgen.failNode(len_node, "unable to infer array size", .{}); + } + const len = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node, node); + const elem_type = try typeExpr(gz, scope, extra.elem_type); + const sentinel = try reachableExprComptime(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node, true); + + const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{ + .len = len, + .elem_type = elem_type, + .sentinel = sentinel, + }); + return rvalue(gz, ri, result, node); +} + +const WipMembers = struct { + payload: *ArrayListUnmanaged(u32), + payload_top: usize, + field_bits_start: u32, + fields_start: u32, + fields_end: u32, + decl_index: u32 = 0, + field_index: u32 = 0, + + const Self = @This(); + + fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self { + const payload_top: u32 = @intCast(payload.items.len); + const field_bits_start = payload_top + decl_count; + const fields_start = field_bits_start + if (bits_per_field > 0) blk: { + const fields_per_u32 = 32 / bits_per_field; + break :blk (field_count + fields_per_u32 - 1) / fields_per_u32; + } else 0; + const payload_end = fields_start + field_count * max_field_size; + try payload.resize(gpa, payload_end); + return .{ + .payload = payload, + .payload_top = payload_top, + .field_bits_start = field_bits_start, + .fields_start = fields_start, + .fields_end = fields_start, + }; + } + + fn nextDecl(self: *Self, decl_inst: Zir.Inst.Index) void { + self.payload.items[self.payload_top + self.decl_index] = @intFromEnum(decl_inst); + self.decl_index += 1; + } + + fn nextField(self: *Self, comptime bits_per_field: u32, bits: [bits_per_field]bool) void { + const fields_per_u32 = 32 / bits_per_field; + const index = self.field_bits_start + self.field_index / fields_per_u32; + assert(index < self.fields_start); + var bit_bag: u32 = if (self.field_index % fields_per_u32 == 0) 0 else self.payload.items[index]; + bit_bag >>= bits_per_field; + comptime var i = 0; + inline while (i < bits_per_field) : (i += 1) { + bit_bag |= @as(u32, @intFromBool(bits[i])) << (32 - bits_per_field + i); + } + self.payload.items[index] = bit_bag; + self.field_index += 1; + } + + fn appendToField(self: *Self, data: u32) void { + assert(self.fields_end < self.payload.items.len); + self.payload.items[self.fields_end] = data; + self.fields_end += 1; + } + + fn finishBits(self: *Self, comptime bits_per_field: u32) void { + if (bits_per_field > 0) { + const fields_per_u32 = 32 / bits_per_field; + const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32); + if (self.field_index > 0 and empty_field_slots < fields_per_u32) { + const index = self.field_bits_start + self.field_index / fields_per_u32; + self.payload.items[index] >>= @intCast(empty_field_slots * bits_per_field); + } + } + } + + fn declsSlice(self: *Self) []u32 { + return self.payload.items[self.payload_top..][0..self.decl_index]; + } + + fn fieldsSlice(self: *Self) []u32 { + return self.payload.items[self.field_bits_start..self.fields_end]; + } + + fn deinit(self: *Self) void { + self.payload.items.len = self.payload_top; + } +}; + +fn fnDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + decl_node: Ast.Node.Index, + body_node: Ast.Node.Index, + fn_proto: Ast.full.FnProto, +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + // missing function name already happened in scanDecls() + const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail; + + // We insert this at the beginning so that its instruction index marks the + // start of the top level declaration. + const decl_inst = try gz.makeBlockInst(.declaration, fn_proto.ast.proto_node); + astgen.advanceSourceCursorToNode(decl_node); + + var decl_gz: GenZir = .{ + .is_comptime = true, + .decl_node_index = fn_proto.ast.proto_node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_gz.unstack(); + + var fn_gz: GenZir = .{ + .is_comptime = false, + .decl_node_index = fn_proto.ast.proto_node, + .decl_line = decl_gz.decl_line, + .parent = &decl_gz.base, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = GenZir.unstacked_top, + }; + defer fn_gz.unstack(); + + const is_pub = fn_proto.visib_token != null; + const is_export = blk: { + const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_export_token] == .keyword_export; + }; + const is_extern = blk: { + const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + const has_inline_keyword = blk: { + const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_inline_token] == .keyword_inline; + }; + const is_noinline = blk: { + const maybe_noinline_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_noinline_token] == .keyword_noinline; + }; + + const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken()); + + wip_members.nextDecl(decl_inst); + + var noalias_bits: u32 = 0; + var params_scope = &fn_gz.base; + const is_var_args = is_var_args: { + var param_type_i: usize = 0; + var it = fn_proto.iterate(tree); + while (it.next()) |param| : (param_type_i += 1) { + const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { + .keyword_noalias => is_comptime: { + noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse + return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); + break :is_comptime false; + }, + .keyword_comptime => true, + else => false, + } else false; + + const is_anytype = if (param.anytype_ellipsis3) |token| blk: { + switch (token_tags[token]) { + .keyword_anytype => break :blk true, + .ellipsis3 => break :is_var_args true, + else => unreachable, + } + } else false; + + const param_name: Zir.NullTerminatedString = if (param.name_token) |name_token| blk: { + const name_bytes = tree.tokenSlice(name_token); + if (mem.eql(u8, "_", name_bytes)) + break :blk .empty; + + const param_name = try astgen.identAsString(name_token); + if (!is_extern) { + try astgen.detectLocalShadowing(params_scope, param_name, name_token, name_bytes, .@"function parameter"); + } + break :blk param_name; + } else if (!is_extern) { + if (param.anytype_ellipsis3) |tok| { + return astgen.failTok(tok, "missing parameter name", .{}); + } else { + ambiguous: { + if (tree.nodes.items(.tag)[param.type_expr] != .identifier) break :ambiguous; + const main_token = tree.nodes.items(.main_token)[param.type_expr]; + const identifier_str = tree.tokenSlice(main_token); + if (isPrimitive(identifier_str)) break :ambiguous; + return astgen.failNodeNotes( + param.type_expr, + "missing parameter name or type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + param.type_expr, + "if this is a name, annotate its type '{s}: T'", + .{identifier_str}, + ), + try astgen.errNoteNode( + param.type_expr, + "if this is a type, give it a name ': {s}'", + .{identifier_str}, + ), + }, + ); + } + return astgen.failNode(param.type_expr, "missing parameter name", .{}); + } + } else .empty; + + const param_inst = if (is_anytype) param: { + const name_token = param.name_token orelse param.anytype_ellipsis3.?; + const tag: Zir.Inst.Tag = if (is_comptime) + .param_anytype_comptime + else + .param_anytype; + break :param try decl_gz.addStrTok(tag, param_name, name_token); + } else param: { + const param_type_node = param.type_expr; + assert(param_type_node != 0); + var param_gz = decl_gz.makeSubBlock(scope); + defer param_gz.unstack(); + const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); + const param_inst_expected: Zir.Inst.Index = @enumFromInt(astgen.instructions.len + 1); + _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); + + const main_tokens = tree.nodes.items(.main_token); + const name_token = param.name_token orelse main_tokens[param_type_node]; + const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; + const param_inst = try decl_gz.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); + assert(param_inst_expected == param_inst); + break :param param_inst.toRef(); + }; + + if (param_name == .empty or is_extern) continue; + + const sub_scope = try astgen.arena.create(Scope.LocalVal); + sub_scope.* = .{ + .parent = params_scope, + .gen_zir = &decl_gz, + .name = param_name, + .inst = param_inst, + .token_src = param.name_token.?, + .id_cat = .@"function parameter", + }; + params_scope = &sub_scope.base; + } + break :is_var_args false; + }; + + const lib_name = if (fn_proto.lib_name) |lib_name_token| blk: { + const lib_name_str = try astgen.strLitAsString(lib_name_token); + const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; + if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { + return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); + } else if (lib_name_str.len == 0) { + return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); + } + break :blk lib_name_str.index; + } else .empty; + + const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; + const is_inferred_error = token_tags[maybe_bang] == .bang; + + // After creating the function ZIR instruction, it will need to update the break + // instructions inside the expression blocks for align, addrspace, cc, and ret_ty + // to use the function instruction as the "block" to break from. + + var align_gz = decl_gz.makeSubBlock(params_scope); + defer align_gz.unstack(); + const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, coerced_align_ri, fn_proto.ast.align_expr); + if (align_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try align_gz.addBreak(.break_inline, @enumFromInt(0), inst); + break :inst inst; + }; + + var addrspace_gz = decl_gz.makeSubBlock(params_scope); + defer addrspace_gz.unstack(); + const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, coerced_addrspace_ri, fn_proto.ast.addrspace_expr); + if (addrspace_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try addrspace_gz.addBreak(.break_inline, @enumFromInt(0), inst); + break :inst inst; + }; + + var section_gz = decl_gz.makeSubBlock(params_scope); + defer section_gz.unstack(); + const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); + if (section_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try section_gz.addBreak(.break_inline, @enumFromInt(0), inst); + break :inst inst; + }; + + var cc_gz = decl_gz.makeSubBlock(params_scope); + defer cc_gz.unstack(); + const cc_ref: Zir.Inst.Ref = blk: { + if (fn_proto.ast.callconv_expr != 0) { + if (has_inline_keyword) { + return astgen.failNode( + fn_proto.ast.callconv_expr, + "explicit callconv incompatible with inline keyword", + .{}, + ); + } + const inst = try expr( + &decl_gz, + params_scope, + .{ .rl = .{ .coerced_ty = .calling_convention_type } }, + fn_proto.ast.callconv_expr, + ); + if (cc_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :blk inst; + } + _ = try cc_gz.addBreak(.break_inline, @enumFromInt(0), inst); + break :blk inst; + } else if (is_extern) { + // note: https://github.com/ziglang/zig/issues/5269 + break :blk .calling_convention_c; + } else if (has_inline_keyword) { + break :blk .calling_convention_inline; + } else { + break :blk .none; + } + }; + + var ret_gz = decl_gz.makeSubBlock(params_scope); + defer ret_gz.unstack(); + const ret_ref: Zir.Inst.Ref = inst: { + const inst = try expr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type); + if (ret_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try ret_gz.addBreak(.break_inline, @enumFromInt(0), inst); + break :inst inst; + }; + + const func_inst: Zir.Inst.Ref = if (body_node == 0) func: { + if (!is_extern) { + return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{}); + } + if (is_inferred_error) { + return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); + } + break :func try decl_gz.addFunc(.{ + .src_node = decl_node, + .cc_ref = cc_ref, + .cc_gz = &cc_gz, + .align_ref = align_ref, + .align_gz = &align_gz, + .ret_ref = ret_ref, + .ret_gz = &ret_gz, + .section_ref = section_ref, + .section_gz = §ion_gz, + .addrspace_ref = addrspace_ref, + .addrspace_gz = &addrspace_gz, + .param_block = decl_inst, + .body_gz = null, + .lib_name = lib_name, + .is_var_args = is_var_args, + .is_inferred_error = false, + .is_test = false, + .is_extern = true, + .is_noinline = is_noinline, + .noalias_bits = noalias_bits, + }); + } else func: { + // as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz + fn_gz.instructions_top = ret_gz.instructions.items.len; + + const prev_fn_block = astgen.fn_block; + const prev_fn_ret_ty = astgen.fn_ret_ty; + astgen.fn_block = &fn_gz; + astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: { + // We're essentially guaranteed to need the return type at some point, + // since the return type is likely not `void` or `noreturn` so there + // will probably be an explicit return requiring RLS. Fetch this + // return type now so the rest of the function can use it. + break :r try fn_gz.addNode(.ret_type, decl_node); + } else ret_ref; + defer { + astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = prev_fn_ret_ty; + } + + const prev_var_args = astgen.fn_var_args; + astgen.fn_var_args = is_var_args; + defer astgen.fn_var_args = prev_var_args; + + astgen.advanceSourceCursorToNode(body_node); + const lbrace_line = astgen.source_line - decl_gz.decl_line; + const lbrace_column = astgen.source_column; + + _ = try expr(&fn_gz, params_scope, .{ .rl = .none }, body_node); + try checkUsed(gz, &fn_gz.base, params_scope); + + if (!fn_gz.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try fn_gz.addRestoreErrRetIndex(.ret, .always, decl_node); + + // Add implicit return at end of function. + _ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); + } + + break :func try decl_gz.addFunc(.{ + .src_node = decl_node, + .cc_ref = cc_ref, + .cc_gz = &cc_gz, + .align_ref = align_ref, + .align_gz = &align_gz, + .ret_ref = ret_ref, + .ret_gz = &ret_gz, + .section_ref = section_ref, + .section_gz = §ion_gz, + .addrspace_ref = addrspace_ref, + .addrspace_gz = &addrspace_gz, + .lbrace_line = lbrace_line, + .lbrace_column = lbrace_column, + .param_block = decl_inst, + .body_gz = &fn_gz, + .lib_name = lib_name, + .is_var_args = is_var_args, + .is_inferred_error = is_inferred_error, + .is_test = false, + .is_extern = false, + .is_noinline = is_noinline, + .noalias_bits = noalias_bits, + }); + }; + + // We add this at the end so that its instruction index marks the end range + // of the top level declaration. addFunc already unstacked fn_gz and ret_gz. + _ = try decl_gz.addBreak(.break_inline, decl_inst, func_inst); + + try setDeclaration( + decl_inst, + std.zig.hashSrc(tree.getNodeSource(decl_node)), + .{ .named = fn_name_token }, + decl_gz.decl_line - gz.decl_line, + is_pub, + is_export, + doc_comment_index, + &decl_gz, + // align, linksection, and addrspace are passed in the func instruction in this case. + // TODO: move them from the function instruction to the declaration instruction? + null, + ); +} + +fn globalVarDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, + var_decl: Ast.full.VarDecl, +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var; + // We do this at the beginning so that the instruction index marks the range start + // of the top level declaration. + const decl_inst = try gz.makeBlockInst(.declaration, node); + + const name_token = var_decl.ast.mut_token + 1; + astgen.advanceSourceCursorToNode(node); + + var block_scope: GenZir = .{ + .parent = scope, + .decl_node_index = node, + .decl_line = astgen.source_line, + .astgen = astgen, + .is_comptime = true, + .anon_name_strategy = .parent, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const is_pub = var_decl.visib_token != null; + const is_export = blk: { + const maybe_export_token = var_decl.extern_export_token orelse break :blk false; + break :blk token_tags[maybe_export_token] == .keyword_export; + }; + const is_extern = blk: { + const maybe_extern_token = var_decl.extern_export_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + wip_members.nextDecl(decl_inst); + + const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: { + if (!is_mutable) { + return astgen.failTok(tok, "threadlocal variable cannot be constant", .{}); + } + break :blk true; + } else false; + + const lib_name = if (var_decl.lib_name) |lib_name_token| blk: { + const lib_name_str = try astgen.strLitAsString(lib_name_token); + const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; + if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { + return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); + } else if (lib_name_str.len == 0) { + return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); + } + break :blk lib_name_str.index; + } else .empty; + + const doc_comment_index = try astgen.docCommentAsString(var_decl.firstToken()); + + assert(var_decl.comptime_token == null); // handled by parser + + const var_inst: Zir.Inst.Ref = if (var_decl.ast.init_node != 0) vi: { + if (is_extern) { + return astgen.failNode( + var_decl.ast.init_node, + "extern variables have no initializers", + .{}, + ); + } + + const type_inst: Zir.Inst.Ref = if (var_decl.ast.type_node != 0) + try expr( + &block_scope, + &block_scope.base, + coerced_type_ri, + var_decl.ast.type_node, + ) + else + .none; + + const init_inst = try expr( + &block_scope, + &block_scope.base, + if (type_inst != .none) .{ .rl = .{ .ty = type_inst } } else .{ .rl = .none }, + var_decl.ast.init_node, + ); + + if (is_mutable) { + const var_inst = try block_scope.addVar(.{ + .var_type = type_inst, + .lib_name = .empty, + .align_inst = .none, // passed via the decls data + .init = init_inst, + .is_extern = false, + .is_const = !is_mutable, + .is_threadlocal = is_threadlocal, + }); + break :vi var_inst; + } else { + break :vi init_inst; + } + } else if (!is_extern) { + return astgen.failNode(node, "variables must be initialized", .{}); + } else if (var_decl.ast.type_node != 0) vi: { + // Extern variable which has an explicit type. + const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node); + + const var_inst = try block_scope.addVar(.{ + .var_type = type_inst, + .lib_name = lib_name, + .align_inst = .none, // passed via the decls data + .init = .none, + .is_extern = true, + .is_const = !is_mutable, + .is_threadlocal = is_threadlocal, + }); + break :vi var_inst; + } else { + return astgen.failNode(node, "unable to infer variable type", .{}); + }; + + // We do this at the end so that the instruction index marks the end + // range of a top level declaration. + _ = try block_scope.addBreakWithSrcNode(.break_inline, decl_inst, var_inst, node); + + var align_gz = block_scope.makeSubBlock(scope); + if (var_decl.ast.align_node != 0) { + const align_inst = try expr(&align_gz, &align_gz.base, coerced_align_ri, var_decl.ast.align_node); + _ = try align_gz.addBreakWithSrcNode(.break_inline, decl_inst, align_inst, node); + } + + var linksection_gz = align_gz.makeSubBlock(scope); + if (var_decl.ast.section_node != 0) { + const linksection_inst = try expr(&linksection_gz, &linksection_gz.base, coerced_linksection_ri, var_decl.ast.section_node); + _ = try linksection_gz.addBreakWithSrcNode(.break_inline, decl_inst, linksection_inst, node); + } + + var addrspace_gz = linksection_gz.makeSubBlock(scope); + if (var_decl.ast.addrspace_node != 0) { + const addrspace_inst = try expr(&addrspace_gz, &addrspace_gz.base, coerced_addrspace_ri, var_decl.ast.addrspace_node); + _ = try addrspace_gz.addBreakWithSrcNode(.break_inline, decl_inst, addrspace_inst, node); + } + + try setDeclaration( + decl_inst, + std.zig.hashSrc(tree.getNodeSource(node)), + .{ .named = name_token }, + block_scope.decl_line - gz.decl_line, + is_pub, + is_export, + doc_comment_index, + &block_scope, + .{ + .align_gz = &align_gz, + .linksection_gz = &linksection_gz, + .addrspace_gz = &addrspace_gz, + }, + ); +} + +fn comptimeDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const decl_inst = try gz.makeBlockInst(.declaration, node); + wip_members.nextDecl(decl_inst); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .is_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const block_result = try expr(&decl_block, &decl_block.base, .{ .rl = .none }, body_node); + if (decl_block.isEmpty() or !decl_block.refIsNoReturn(block_result)) { + _ = try decl_block.addBreak(.break_inline, decl_inst, .void_value); + } + + try setDeclaration( + decl_inst, + std.zig.hashSrc(tree.getNodeSource(node)), + .@"comptime", + decl_block.decl_line - gz.decl_line, + false, + false, + .empty, + &decl_block, + null, + ); +} + +fn usingnamespaceDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const type_expr = node_datas[node].lhs; + const is_pub = blk: { + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const main_token = main_tokens[node]; + break :blk (main_token > 0 and token_tags[main_token - 1] == .keyword_pub); + }; + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const decl_inst = try gz.makeBlockInst(.declaration, node); + wip_members.nextDecl(decl_inst); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .is_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const namespace_inst = try typeExpr(&decl_block, &decl_block.base, type_expr); + _ = try decl_block.addBreak(.break_inline, decl_inst, namespace_inst); + + try setDeclaration( + decl_inst, + std.zig.hashSrc(tree.getNodeSource(node)), + .@"usingnamespace", + decl_block.decl_line - gz.decl_line, + is_pub, + false, + .empty, + &decl_block, + null, + ); +} + +fn testDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].rhs; + + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const decl_inst = try gz.makeBlockInst(.declaration, node); + + wip_members.nextDecl(decl_inst); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .is_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const test_token = main_tokens[node]; + const test_name_token = test_token + 1; + const test_name: DeclarationName = switch (token_tags[test_name_token]) { + else => .unnamed_test, + .string_literal => .{ .named_test = test_name_token }, + .identifier => blk: { + const ident_name_raw = tree.tokenSlice(test_name_token); + + if (mem.eql(u8, ident_name_raw, "_")) return astgen.failTok(test_name_token, "'_' used as an identifier without @\"_\" syntax", .{}); + + // if not @"" syntax, just use raw token slice + if (ident_name_raw[0] != '@') { + if (isPrimitive(ident_name_raw)) return astgen.failTok(test_name_token, "cannot test a primitive", .{}); + } + + // Local variables, including function parameters. + const name_str_index = try astgen.identAsString(test_name_token); + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + var num_namespaces_out: u32 = 0; + var capturing_namespace: ?*Scope.Namespace = null; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == name_str_index) { + local_val.used = test_name_token; + return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ + @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{ + @tagName(local_val.id_cat), + }), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + local_ptr.used = test_name_token; + return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ + @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{ + @tagName(local_ptr.id_cat), + }), + }); + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(name_str_index)) |i| { + if (found_already) |f| { + return astgen.failTokNotes(test_name_token, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + num_namespaces_out += 1; + capturing_namespace = ns; + s = ns.parent; + }, + .top => break, + }; + if (found_already == null) { + const ident_name = try astgen.identifierTokenString(test_name_token); + return astgen.failTok(test_name_token, "use of undeclared identifier '{s}'", .{ident_name}); + } + + break :blk .{ .decltest = name_str_index }; + }, + }; + + var fn_block: GenZir = .{ + .is_comptime = false, + .decl_node_index = node, + .decl_line = decl_block.decl_line, + .parent = &decl_block.base, + .astgen = astgen, + .instructions = decl_block.instructions, + .instructions_top = decl_block.instructions.items.len, + }; + defer fn_block.unstack(); + + const prev_fn_block = astgen.fn_block; + const prev_fn_ret_ty = astgen.fn_ret_ty; + astgen.fn_block = &fn_block; + astgen.fn_ret_ty = .anyerror_void_error_union_type; + defer { + astgen.fn_block = prev_fn_block; + astgen.fn_ret_ty = prev_fn_ret_ty; + } + + astgen.advanceSourceCursorToNode(body_node); + const lbrace_line = astgen.source_line - decl_block.decl_line; + const lbrace_column = astgen.source_column; + + const block_result = try expr(&fn_block, &fn_block.base, .{ .rl = .none }, body_node); + if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) { + + // As our last action before the return, "pop" the error trace if needed + _ = try fn_block.addRestoreErrRetIndex(.ret, .always, node); + + // Add implicit return at end of function. + _ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); + } + + const func_inst = try decl_block.addFunc(.{ + .src_node = node, + + .cc_ref = .none, + .cc_gz = null, + .align_ref = .none, + .align_gz = null, + .ret_ref = .anyerror_void_error_union_type, + .ret_gz = null, + .section_ref = .none, + .section_gz = null, + .addrspace_ref = .none, + .addrspace_gz = null, + + .lbrace_line = lbrace_line, + .lbrace_column = lbrace_column, + .param_block = decl_inst, + .body_gz = &fn_block, + .lib_name = .empty, + .is_var_args = false, + .is_inferred_error = false, + .is_test = true, + .is_extern = false, + .is_noinline = false, + .noalias_bits = 0, + }); + + _ = try decl_block.addBreak(.break_inline, decl_inst, func_inst); + + try setDeclaration( + decl_inst, + std.zig.hashSrc(tree.getNodeSource(node)), + test_name, + decl_block.decl_line - gz.decl_line, + false, + false, + .empty, + &decl_block, + null, + ); +} + +fn structDeclInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + container_decl: Ast.full.ContainerDecl, + layout: std.builtin.Type.ContainerLayout, + backing_int_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const decl_inst = try gz.reserveInstructionIndex(); + + if (container_decl.ast.members.len == 0 and backing_int_node == 0) { + try gz.setStruct(decl_inst, .{ + .src_node = node, + .layout = layout, + .fields_len = 0, + .decls_len = 0, + .backing_int_ref = .none, + .backing_int_body_len = 0, + .known_non_opv = false, + .known_comptime_only = false, + .is_tuple = false, + .any_comptime_fields = false, + .any_default_inits = false, + .any_aligned_fields = false, + .fields_hash = std.zig.hashSrc(@tagName(layout)), + }); + return decl_inst.toRef(); + } + + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The struct_decl instruction introduces a scope in which the decls of the struct + // are in scope, so that field types, alignments, and default value expressions + // can refer to decls within the struct itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .is_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const scratch_top = astgen.scratch.items.len; + defer astgen.scratch.items.len = scratch_top; + + var backing_int_body_len: usize = 0; + const backing_int_ref: Zir.Inst.Ref = blk: { + if (backing_int_node != 0) { + if (layout != .Packed) { + return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{}); + } else { + const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node); + if (!block_scope.isEmpty()) { + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref); + } + + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + backing_int_body_len = astgen.scratch.items.len - old_scratch_len; + block_scope.instructions.items.len = block_scope.instructions_top; + } + break :blk backing_int_ref; + } + } else { + break :blk .none; + } + }; + + const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); + const field_count: u32 = @intCast(container_decl.ast.members.len - decl_count); + + const bits_per_field = 4; + const max_field_size = 5; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); + defer wip_members.deinit(); + + // We will use the scratch buffer, starting here, for the bodies: + // bodies: { // for every fields_len + // field_type_body_inst: Inst, // for each field_type_body_len + // align_body_inst: Inst, // for each align_body_len + // init_body_inst: Inst, // for each init_body_len + // } + // Note that the scratch buffer is simultaneously being used by WipMembers, however + // it will not access any elements beyond this point in the ArrayList. It also + // accesses via the ArrayList items field so it can handle the scratch buffer being + // reallocated. + // No defer needed here because it is handled by `wip_members.deinit()` above. + const bodies_start = astgen.scratch.items.len; + + const node_tags = tree.nodes.items(.tag); + const is_tuple = for (container_decl.ast.members) |member_node| { + const container_field = tree.fullContainerField(member_node) orelse continue; + if (container_field.ast.tuple_like) break true; + } else false; + + if (is_tuple) switch (layout) { + .Auto => {}, + .Extern => return astgen.failNode(node, "extern tuples are not supported", .{}), + .Packed => return astgen.failNode(node, "packed tuples are not supported", .{}), + }; + + if (is_tuple) for (container_decl.ast.members) |member_node| { + switch (node_tags[member_node]) { + .container_field_init, + .container_field_align, + .container_field, + .@"comptime", + .test_decl, + => continue, + else => { + const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) { + .container_field_init, + .container_field_align, + .container_field, + => break maybe_tuple, + else => {}, + } else unreachable; + return astgen.failNodeNotes( + member_node, + "tuple declarations cannot contain declarations", + .{}, + &[_]u32{ + try astgen.errNoteNode(tuple_member, "tuple field here", .{}), + }, + ); + }, + } + }; + + var fields_hasher = std.zig.SrcHasher.init(.{}); + fields_hasher.update(@tagName(layout)); + if (backing_int_node != 0) { + fields_hasher.update(tree.getNodeSource(backing_int_node)); + } + + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + try duplicate_names.ensureTotalCapacity(field_count); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + + var known_non_opv = false; + var known_comptime_only = false; + var any_comptime_fields = false; + var any_aligned_fields = false; + var any_default_inits = false; + for (container_decl.ast.members) |member_node| { + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + + fields_hasher.update(tree.getNodeSource(member_node)); + + if (!is_tuple) { + const field_name = try astgen.identAsString(member.ast.main_token); + + member.convertToNonTupleLike(astgen.tree.nodes); + assert(!member.ast.tuple_like); + + wip_members.appendToField(@intFromEnum(field_name)); + + const gop = try duplicate_names.getOrPut(field_name); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + } + } else if (!member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "tuple field has a name", .{}); + } + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(@intFromEnum(doc_comment_index)); + + if (member.ast.type_expr == 0) { + return astgen.failTok(member.ast.main_token, "struct field missing type", .{}); + } + + const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); + const have_type_body = !block_scope.isEmpty(); + const have_align = member.ast.align_expr != 0; + const have_value = member.ast.value_expr != 0; + const is_comptime = member.comptime_token != null; + + if (is_comptime) { + switch (layout) { + .Packed => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}), + .Extern => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}), + .Auto => any_comptime_fields = true, + } + } else { + known_non_opv = known_non_opv or + nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr); + known_comptime_only = known_comptime_only or + nodeImpliesComptimeOnly(tree, member.ast.type_expr); + } + wip_members.nextField(bits_per_field, .{ have_align, have_value, is_comptime, have_type_body }); + + if (have_type_body) { + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, field_type); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } else { + wip_members.appendToField(@intFromEnum(field_type)); + } + + if (have_align) { + if (layout == .Packed) { + try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{}); + } + any_aligned_fields = true; + const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, align_ref); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } + + if (have_value) { + any_default_inits = true; + + // The decl_inst is used as here so that we can easily reconstruct a mapping + // between it and the field type when the fields inits are analzyed. + const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } }; + + const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, default_inst); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } else if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); + } + } + + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); + } + + try error_notes.append(try astgen.errNoteNode(node, "struct declared here", .{})); + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate struct field name", + .{}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + + try gz.setStruct(decl_inst, .{ + .src_node = node, + .layout = layout, + .fields_len = field_count, + .decls_len = decl_count, + .backing_int_ref = backing_int_ref, + .backing_int_body_len = @intCast(backing_int_body_len), + .known_non_opv = known_non_opv, + .known_comptime_only = known_comptime_only, + .is_tuple = is_tuple, + .any_comptime_fields = any_comptime_fields, + .any_default_inits = any_default_inits, + .any_aligned_fields = any_aligned_fields, + .fields_hash = fields_hash, + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + const bodies_slice = astgen.scratch.items[bodies_start..]; + try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + + decls_slice.len + fields_slice.len + bodies_slice.len); + astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + astgen.extra.appendSliceAssumeCapacity(bodies_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return decl_inst.toRef(); +} + +fn unionDeclInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + members: []const Ast.Node.Index, + layout: std.builtin.Type.ContainerLayout, + arg_node: Ast.Node.Index, + auto_enum_tok: ?Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const decl_inst = try gz.reserveInstructionIndex(); + + const astgen = gz.astgen; + const gpa = astgen.gpa; + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The union_decl instruction introduces a scope in which the decls of the union + // are in scope, so that field types, alignments, and default value expressions + // can refer to decls within the union itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .is_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const decl_count = try astgen.scanDecls(&namespace, members); + const field_count: u32 = @intCast(members.len - decl_count); + + if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) { + const layout_str = if (layout == .Extern) "extern" else "packed"; + if (arg_node != 0) { + return astgen.failNode(arg_node, "{s} union does not support enum tag type", .{layout_str}); + } else { + return astgen.failTok(auto_enum_tok.?, "{s} union does not support enum tag type", .{layout_str}); + } + } + + const arg_inst: Zir.Inst.Ref = if (arg_node != 0) + try typeExpr(&block_scope, &namespace.base, arg_node) + else + .none; + + const bits_per_field = 4; + const max_field_size = 5; + var any_aligned_fields = false; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); + defer wip_members.deinit(); + + var fields_hasher = std.zig.SrcHasher.init(.{}); + fields_hasher.update(@tagName(layout)); + fields_hasher.update(&.{@intFromBool(auto_enum_tok != null)}); + if (arg_node != 0) { + fields_hasher.update(astgen.tree.getNodeSource(arg_node)); + } + + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + try duplicate_names.ensureTotalCapacity(field_count); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + + for (members) |member_node| { + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + fields_hasher.update(astgen.tree.getNodeSource(member_node)); + member.convertToNonTupleLike(astgen.tree.nodes); + if (member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "union field missing name", .{}); + } + if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "union fields cannot be marked comptime", .{}); + } + + const field_name = try astgen.identAsString(member.ast.main_token); + wip_members.appendToField(@intFromEnum(field_name)); + + const gop = try duplicate_names.getOrPut(field_name); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + } + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(@intFromEnum(doc_comment_index)); + + const have_type = member.ast.type_expr != 0; + const have_align = member.ast.align_expr != 0; + const have_value = member.ast.value_expr != 0; + const unused = false; + wip_members.nextField(bits_per_field, .{ have_type, have_align, have_value, unused }); + + if (have_type) { + const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); + wip_members.appendToField(@intFromEnum(field_type)); + } else if (arg_inst == .none and auto_enum_tok == null) { + return astgen.failNode(member_node, "union field missing type", .{}); + } + if (have_align) { + const align_inst = try expr(&block_scope, &block_scope.base, coerced_align_ri, member.ast.align_expr); + wip_members.appendToField(@intFromEnum(align_inst)); + any_aligned_fields = true; + } + if (have_value) { + if (arg_inst == .none) { + return astgen.failNodeNotes( + node, + "explicitly valued tagged union missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + if (auto_enum_tok == null) { + return astgen.failNodeNotes( + node, + "explicitly valued tagged union requires inferred enum tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + const tag_value = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); + wip_members.appendToField(@intFromEnum(tag_value)); + } + } + + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); + } + + try error_notes.append(try astgen.errNoteNode(node, "union declared here", .{})); + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate union field name", + .{}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + + if (!block_scope.isEmpty()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); + } + + const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + + try gz.setUnion(decl_inst, .{ + .src_node = node, + .layout = layout, + .tag_type = arg_inst, + .body_len = body_len, + .fields_len = field_count, + .decls_len = decl_count, + .auto_enum_tag = auto_enum_tok != null, + .any_aligned_fields = any_aligned_fields, + .fields_hash = fields_hash, + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return decl_inst.toRef(); +} + +fn containerDecl( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + container_decl: Ast.full.ContainerDecl, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const prev_fn_block = astgen.fn_block; + astgen.fn_block = null; + defer astgen.fn_block = prev_fn_block; + + // We must not create any types until Sema. Here the goal is only to generate + // ZIR for all the field types, alignments, and default value expressions. + + switch (token_tags[container_decl.ast.main_token]) { + .keyword_struct => { + const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { + .keyword_packed => std.builtin.Type.ContainerLayout.Packed, + .keyword_extern => std.builtin.Type.ContainerLayout.Extern, + else => unreachable, + } else std.builtin.Type.ContainerLayout.Auto; + + const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg); + return rvalue(gz, ri, result, node); + }, + .keyword_union => { + const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { + .keyword_packed => std.builtin.Type.ContainerLayout.Packed, + .keyword_extern => std.builtin.Type.ContainerLayout.Extern, + else => unreachable, + } else std.builtin.Type.ContainerLayout.Auto; + + const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, container_decl.ast.enum_token); + return rvalue(gz, ri, result, node); + }, + .keyword_enum => { + if (container_decl.layout_token) |t| { + return astgen.failTok(t, "enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type", .{}); + } + // Count total fields as well as how many have explicitly provided tag values. + const counts = blk: { + var values: usize = 0; + var total_fields: usize = 0; + var decls: usize = 0; + var nonexhaustive_node: Ast.Node.Index = 0; + var nonfinal_nonexhaustive = false; + for (container_decl.ast.members) |member_node| { + var member = tree.fullContainerField(member_node) orelse { + decls += 1; + continue; + }; + member.convertToNonTupleLike(astgen.tree.nodes); + if (member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "enum field missing name", .{}); + } + if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "enum fields cannot be marked comptime", .{}); + } + if (member.ast.type_expr != 0) { + return astgen.failNodeNotes( + member.ast.type_expr, + "enum fields do not have types", + .{}, + &[_]u32{ + try astgen.errNoteNode( + node, + "consider 'union(enum)' here to make it a tagged union", + .{}, + ), + }, + ); + } + if (member.ast.align_expr != 0) { + return astgen.failNode(member.ast.align_expr, "enum fields cannot be aligned", .{}); + } + + const name_token = member.ast.main_token; + if (mem.eql(u8, tree.tokenSlice(name_token), "_")) { + if (nonexhaustive_node != 0) { + return astgen.failNodeNotes( + member_node, + "redundant non-exhaustive enum mark", + .{}, + &[_]u32{ + try astgen.errNoteNode( + nonexhaustive_node, + "other mark here", + .{}, + ), + }, + ); + } + nonexhaustive_node = member_node; + if (member.ast.value_expr != 0) { + return astgen.failNode(member.ast.value_expr, "'_' is used to mark an enum as non-exhaustive and cannot be assigned a value", .{}); + } + continue; + } else if (nonexhaustive_node != 0) { + nonfinal_nonexhaustive = true; + } + total_fields += 1; + if (member.ast.value_expr != 0) { + if (container_decl.ast.arg == 0) { + return astgen.failNode(member.ast.value_expr, "value assigned to enum tag with inferred tag type", .{}); + } + values += 1; + } + } + if (nonfinal_nonexhaustive) { + return astgen.failNode(nonexhaustive_node, "'_' field of non-exhaustive enum must be last", .{}); + } + break :blk .{ + .total_fields = total_fields, + .values = values, + .decls = decls, + .nonexhaustive_node = nonexhaustive_node, + }; + }; + if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) { + try astgen.appendErrorNodeNotes( + node, + "non-exhaustive enum missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + counts.nonexhaustive_node, + "marked non-exhaustive here", + .{}, + ), + }, + ); + } + // In this case we must generate ZIR code for the tag values, similar to + // how structs are handled above. + const nonexhaustive = counts.nonexhaustive_node != 0; + + const decl_inst = try gz.reserveInstructionIndex(); + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The enum_decl instruction introduces a scope in which the decls of the enum + // are in scope, so that tag values can refer to decls within the enum itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .is_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + _ = try astgen.scanDecls(&namespace, container_decl.ast.members); + namespace.base.tag = .enum_namespace; + + const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) + try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg) + else + .none; + + const bits_per_field = 1; + const max_field_size = 3; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(counts.decls), @intCast(counts.total_fields), bits_per_field, max_field_size); + defer wip_members.deinit(); + + var fields_hasher = std.zig.SrcHasher.init(.{}); + if (container_decl.ast.arg != 0) { + fields_hasher.update(tree.getNodeSource(container_decl.ast.arg)); + } + fields_hasher.update(&.{@intFromBool(nonexhaustive)}); + + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + try duplicate_names.ensureTotalCapacity(counts.total_fields); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + + for (container_decl.ast.members) |member_node| { + if (member_node == counts.nonexhaustive_node) + continue; + fields_hasher.update(tree.getNodeSource(member_node)); + namespace.base.tag = .namespace; + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + member.convertToNonTupleLike(astgen.tree.nodes); + assert(member.comptime_token == null); + assert(member.ast.type_expr == 0); + assert(member.ast.align_expr == 0); + + const field_name = try astgen.identAsString(member.ast.main_token); + wip_members.appendToField(@intFromEnum(field_name)); + + const gop = try duplicate_names.getOrPut(field_name); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + } + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(@intFromEnum(doc_comment_index)); + + const have_value = member.ast.value_expr != 0; + wip_members.nextField(bits_per_field, .{have_value}); + + if (have_value) { + if (arg_inst == .none) { + return astgen.failNodeNotes( + node, + "explicitly valued enum missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + namespace.base.tag = .enum_namespace; + const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); + wip_members.appendToField(@intFromEnum(tag_value_inst)); + } + } + + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); + } + + try error_notes.append(try astgen.errNoteNode(node, "enum declared here", .{})); + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate enum field name", + .{}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + + if (!block_scope.isEmpty()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); + } + + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + + const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + + try gz.setEnum(decl_inst, .{ + .src_node = node, + .nonexhaustive = nonexhaustive, + .tag_type = arg_inst, + .body_len = body_len, + .fields_len = @intCast(counts.total_fields), + .decls_len = @intCast(counts.decls), + .fields_hash = fields_hash, + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return rvalue(gz, ri, decl_inst.toRef(), node); + }, + .keyword_opaque => { + assert(container_decl.ast.arg == 0); + + const decl_inst = try gz.reserveInstructionIndex(); + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .is_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); + + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, 0, 0, 0); + defer wip_members.deinit(); + + for (container_decl.ast.members) |member_node| { + const res = try containerMember(&block_scope, &namespace.base, &wip_members, member_node); + if (res == .field) { + return astgen.failNode(member_node, "opaque types cannot have fields", .{}); + } + } + + try gz.setOpaque(decl_inst, .{ + .src_node = node, + .decls_len = decl_count, + }); + + wip_members.finishBits(0); + const decls_slice = wip_members.declsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return rvalue(gz, ri, decl_inst.toRef(), node); + }, + else => unreachable, + } +} + +const ContainerMemberResult = union(enum) { decl, field: Ast.full.ContainerField }; + +fn containerMember( + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + member_node: Ast.Node.Index, +) InnerError!ContainerMemberResult { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + switch (node_tags[member_node]) { + .container_field_init, + .container_field_align, + .container_field, + => return ContainerMemberResult{ .field = tree.fullContainerField(member_node).? }, + + .fn_proto, + .fn_proto_multi, + .fn_proto_one, + .fn_proto_simple, + .fn_decl, + => { + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, member_node).?; + const body = if (node_tags[member_node] == .fn_decl) node_datas[member_node].rhs else 0; + + astgen.fnDecl(gz, scope, wip_members, member_node, body, full) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => { + astgen.globalVarDecl(gz, scope, wip_members, member_node, tree.fullVarDecl(member_node).?) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + + .@"comptime" => { + astgen.comptimeDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + .@"usingnamespace" => { + astgen.usingnamespaceDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + .test_decl => { + astgen.testDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + else => unreachable, + } + return .decl; +} + +fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const payload_index = try reserveExtra(astgen, @typeInfo(Zir.Inst.ErrorSetDecl).Struct.fields.len); + var fields_len: usize = 0; + { + var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}; + defer idents.deinit(gpa); + + const error_token = main_tokens[node]; + var tok_i = error_token + 2; + while (true) : (tok_i += 1) { + switch (token_tags[tok_i]) { + .doc_comment, .comma => {}, + .identifier => { + const str_index = try astgen.identAsString(tok_i); + const gop = try idents.getOrPut(gpa, str_index); + if (gop.found_existing) { + const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(str_index))); + defer gpa.free(name); + return astgen.failTokNotes( + tok_i, + "duplicate error set field '{s}'", + .{name}, + &[_]u32{ + try astgen.errNoteTok( + gop.value_ptr.*, + "previous declaration here", + .{}, + ), + }, + ); + } + gop.value_ptr.* = tok_i; + + try astgen.extra.ensureUnusedCapacity(gpa, 2); + astgen.extra.appendAssumeCapacity(@intFromEnum(str_index)); + const doc_comment_index = try astgen.docCommentAsString(tok_i); + astgen.extra.appendAssumeCapacity(@intFromEnum(doc_comment_index)); + fields_len += 1; + }, + .r_brace => break, + else => unreachable, + } + } + } + + setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{ + .fields_len = @intCast(fields_len), + }); + const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); + return rvalue(gz, ri, result, node); +} + +fn tryExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + + const fn_block = astgen.fn_block orelse { + return astgen.failNode(node, "'try' outside function scope", .{}); + }; + + if (parent_gz.any_defer_node != 0) { + return astgen.failNodeNotes(node, "'try' not allowed inside defer expression", .{}, &.{ + try astgen.errNoteNode( + parent_gz.any_defer_node, + "defer expression here", + .{}, + ), + }); + } + + // Ensure debug line/column information is emitted for this try expression. + // Then we will save the line/column so that we can emit another one that goes + // "backwards" because we want to evaluate the operand, but then put the debug + // info back at the try keyword for error return tracing. + if (!parent_gz.is_comptime) { + try emitDbgNode(parent_gz, node); + } + const try_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; + + const operand_ri: ResultInfo = switch (ri.rl) { + .ref, .ref_coerced_ty => .{ .rl = .ref, .ctx = .error_handling_expr }, + else => .{ .rl = .none, .ctx = .error_handling_expr }, + }; + // This could be a pointer or value depending on the `ri` parameter. + const operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, node); + const block_tag: Zir.Inst.Tag = if (operand_ri.rl == .ref) .try_ptr else .@"try"; + const try_inst = try parent_gz.makeBlockInst(block_tag, node); + try parent_gz.instructions.append(astgen.gpa, try_inst); + + var else_scope = parent_gz.makeSubBlock(scope); + defer else_scope.unstack(); + + const err_tag = switch (ri.rl) { + .ref, .ref_coerced_ty => Zir.Inst.Tag.err_union_code_ptr, + else => Zir.Inst.Tag.err_union_code, + }; + const err_code = try else_scope.addUnNode(err_tag, operand, node); + try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code }); + try emitDbgStmt(&else_scope, try_lc); + _ = try else_scope.addUnNode(.ret_node, err_code, node); + + try else_scope.setTryBody(try_inst, operand); + const result = try_inst.toRef(); + switch (ri.rl) { + .ref, .ref_coerced_ty => return result, + else => return rvalue(parent_gz, ri, result, node), + } +} + +fn orelseCatchExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs: Ast.Node.Index, + cond_op: Zir.Inst.Tag, + unwrap_op: Zir.Inst.Tag, + unwrap_code_op: Zir.Inst.Tag, + rhs: Ast.Node.Index, + payload_token: ?Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + const do_err_trace = astgen.fn_block != null and (cond_op == .is_non_err or cond_op == .is_non_err_ptr); + + var block_scope = parent_gz.makeSubBlock(scope); + block_scope.setBreakResultInfo(block_ri); + defer block_scope.unstack(); + + const operand_ri: ResultInfo = switch (block_scope.break_result_info.rl) { + .ref, .ref_coerced_ty => .{ .rl = .ref, .ctx = if (do_err_trace) .error_handling_expr else .none }, + else => .{ .rl = .none, .ctx = if (do_err_trace) .error_handling_expr else .none }, + }; + // This could be a pointer or value depending on the `operand_ri` parameter. + // We cannot use `block_scope.break_result_info` because that has the bare + // type, whereas this expression has the optional type. Later we make + // up for this fact by calling rvalue on the else branch. + const operand = try reachableExpr(&block_scope, &block_scope.base, operand_ri, lhs, rhs); + const cond = try block_scope.addUnNode(cond_op, operand, node); + const condbr = try block_scope.addCondBr(.condbr, node); + + const block = try parent_gz.makeBlockInst(.block, node); + try block_scope.setBlockBody(block); + // block_scope unstacked now, can add new instructions to parent_gz + try parent_gz.instructions.append(astgen.gpa, block); + + var then_scope = block_scope.makeSubBlock(scope); + defer then_scope.unstack(); + + // This could be a pointer or value depending on `unwrap_op`. + const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); + const then_result = switch (ri.rl) { + .ref, .ref_coerced_ty => unwrapped_payload, + else => try rvalue(&then_scope, block_scope.break_result_info, unwrapped_payload, node), + }; + _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, node); + + var else_scope = block_scope.makeSubBlock(scope); + defer else_scope.unstack(); + + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, lhs)) + _ = try else_scope.addSaveErrRetIndex(.always); + + var err_val_scope: Scope.LocalVal = undefined; + const else_sub_scope = blk: { + const payload = payload_token orelse break :blk &else_scope.base; + const err_str = tree.tokenSlice(payload); + if (mem.eql(u8, err_str, "_")) { + return astgen.failTok(payload, "discard of error capture; omit it instead", .{}); + } + const err_name = try astgen.identAsString(payload); + + try astgen.detectLocalShadowing(scope, err_name, payload, err_str, .capture); + + err_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = err_name, + .inst = try else_scope.addUnNode(unwrap_code_op, operand, node), + .token_src = payload, + .id_cat = .capture, + }; + break :blk &err_val_scope.base; + }; + + const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs); + if (!else_scope.endsWithNoReturn()) { + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, rhs, else_result); + + _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, rhs); + } + try checkUsed(parent_gz, &else_scope.base, else_sub_scope); + + try setCondBrPayload(condbr, cond, &then_scope, &else_scope); + + if (need_result_rvalue) { + return rvalue(parent_gz, ri, block.toRef(), node); + } else { + return block.toRef(); + } +} + +/// Return whether the identifier names of two tokens are equal. Resolves @"" +/// tokens without allocating. +/// OK in theory it could do it without allocating. This implementation +/// allocates when the @"" form is used. +fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex) !bool { + const ident_name_1 = try astgen.identifierTokenString(token1); + const ident_name_2 = try astgen.identifierTokenString(token2); + return mem.eql(u8, ident_name_1, ident_name_2); +} + +fn fieldAccess( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + switch (ri.rl) { + .ref, .ref_coerced_ty => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node), + else => { + const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node); + return rvalue(gz, ri, access, node); + }, + } +} + +fn addFieldAccess( + tag: Zir.Inst.Tag, + gz: *GenZir, + scope: *Scope, + lhs_ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + + const object_node = node_datas[node].lhs; + const dot_token = main_tokens[node]; + const field_ident = dot_token + 1; + const str_index = try astgen.identAsString(field_ident); + const lhs = try expr(gz, scope, lhs_ri, object_node); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + try emitDbgStmt(gz, cursor); + + return gz.addPlNode(tag, node, Zir.Inst.Field{ + .lhs = lhs, + .field_name_start = str_index, + }); +} + +fn arrayAccess( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const tree = gz.astgen.tree; + const node_datas = tree.nodes.items(.data); + switch (ri.rl) { + .ref, .ref_coerced_ty => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, cursor); + + return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); + }, + else => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, cursor); + + return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node); + }, + } +} + +fn simpleBinOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + if (op_inst_tag == .cmp_neq or op_inst_tag == .cmp_eq) { + const node_tags = tree.nodes.items(.tag); + const str = if (op_inst_tag == .cmp_eq) "==" else "!="; + if (node_tags[node_datas[node].lhs] == .string_literal or + node_tags[node_datas[node].rhs] == .string_literal) + return astgen.failNode(node, "cannot compare strings with {s}", .{str}); + } + + const lhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].lhs, node); + const cursor = switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => maybeAdvanceSourceCursorToMainToken(gz, node), + else => undefined, + }; + const rhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].rhs, node); + + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + try emitDbgStmt(gz, cursor); + }, + else => {}, + } + const result = try gz.addPlNode(op_inst_tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); + return rvalue(gz, ri, result, node); +} + +fn simpleStrTok( + gz: *GenZir, + ri: ResultInfo, + ident_token: Ast.TokenIndex, + node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const str_index = try astgen.identAsString(ident_token); + const result = try gz.addStrTok(op_inst_tag, str_index, ident_token); + return rvalue(gz, ri, result, node); +} + +fn boolBinOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + zir_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs = try expr(gz, scope, coerced_bool_ri, node_datas[node].lhs); + const bool_br = (try gz.addPlNodePayloadIndex(zir_tag, node, undefined)).toIndex().?; + + var rhs_scope = gz.makeSubBlock(scope); + defer rhs_scope.unstack(); + const rhs = try expr(&rhs_scope, &rhs_scope.base, coerced_bool_ri, node_datas[node].rhs); + if (!gz.refIsNoReturn(rhs)) { + _ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs); + } + try rhs_scope.setBoolBrBody(bool_br, lhs); + + const block_ref = bool_br.toRef(); + return rvalue(gz, ri, block_ref, node); +} + +fn ifExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + if_full: Ast.full.If, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const do_err_trace = astgen.fn_block != null and if_full.error_token != null; + + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + var block_scope = parent_gz.makeSubBlock(scope); + block_scope.setBreakResultInfo(block_ri); + defer block_scope.unstack(); + + const payload_is_ref = if (if_full.payload_token) |payload_token| + token_tags[payload_token] == .asterisk + else + false; + + try emitDbgNode(parent_gz, if_full.ast.cond_expr); + const cond: struct { + inst: Zir.Inst.Ref, + bool_bit: Zir.Inst.Ref, + } = c: { + if (if_full.error_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none, .ctx = .error_handling_expr }; + const err_union = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; + break :c .{ + .inst = err_union, + .bool_bit = try block_scope.addUnNode(tag, err_union, if_full.ast.cond_expr), + }; + } else if (if_full.payload_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; + break :c .{ + .inst = optional, + .bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr), + }; + } else { + const cond = try expr(&block_scope, &block_scope.base, coerced_bool_ri, if_full.ast.cond_expr); + break :c .{ + .inst = cond, + .bool_bit = cond, + }; + } + }; + + const condbr = try block_scope.addCondBr(.condbr, node); + + const block = try parent_gz.makeBlockInst(.block, node); + try block_scope.setBlockBody(block); + // block_scope unstacked now, can add new instructions to parent_gz + try parent_gz.instructions.append(astgen.gpa, block); + + var then_scope = parent_gz.makeSubBlock(scope); + defer then_scope.unstack(); + + var payload_val_scope: Scope.LocalVal = undefined; + + const then_node = if_full.ast.then_expr; + const then_sub_scope = s: { + if (if_full.error_token != null) { + if (if_full.payload_token) |payload_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_payload_unsafe_ptr + else + .err_union_payload_unsafe; + const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); + const token_name_index = payload_token + @intFromBool(payload_is_ref); + const ident_name = try astgen.identAsString(token_name_index); + const token_name_str = tree.tokenSlice(token_name_index); + if (mem.eql(u8, "_", token_name_str)) + break :s &then_scope.base; + try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = token_name_index, + .id_cat = .capture, + }; + try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); + break :s &then_scope.base; + } + } else if (if_full.payload_token) |payload_token| { + const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; + const tag: Zir.Inst.Tag = if (payload_is_ref) + .optional_payload_unsafe_ptr + else + .optional_payload_unsafe; + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); + const ident_name = try astgen.identAsString(ident_token); + try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = ident_token, + .id_cat = .capture, + }; + try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &then_scope.base; + } + }; + + const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_info, then_node); + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + if (!then_scope.endsWithNoReturn()) { + _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, then_node); + } + + var else_scope = parent_gz.makeSubBlock(scope); + defer else_scope.unstack(); + + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, if_full.ast.cond_expr)) + _ = try else_scope.addSaveErrRetIndex(.always); + + const else_node = if_full.ast.else_expr; + if (else_node != 0) { + const sub_scope = s: { + if (if_full.error_token) |error_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_code_ptr + else + .err_union_code; + const payload_inst = try else_scope.addUnNode(tag, cond.inst, if_full.ast.cond_expr); + const ident_name = try astgen.identAsString(error_token); + const error_token_str = tree.tokenSlice(error_token); + if (mem.eql(u8, "_", error_token_str)) + break :s &else_scope.base; + try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, error_token_str, .capture); + payload_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = error_token, + .id_cat = .capture, + }; + try else_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &else_scope.base; + } + }; + const else_result = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); + if (!else_scope.endsWithNoReturn()) { + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, else_result); + _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, else_node); + } + try checkUsed(parent_gz, &else_scope.base, sub_scope); + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(.@"break", block, result); + } + + try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); + + if (need_result_rvalue) { + return rvalue(parent_gz, ri, block.toRef(), node); + } else { + return block.toRef(); + } +} + +/// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. +fn setCondBrPayload( + condbr: Zir.Inst.Index, + cond: Zir.Inst.Ref, + then_scope: *GenZir, + else_scope: *GenZir, +) !void { + defer then_scope.unstack(); + defer else_scope.unstack(); + const astgen = then_scope.astgen; + const then_body = then_scope.instructionsSliceUpto(else_scope); + const else_body = else_scope.instructionsSlice(); + const then_body_len = astgen.countBodyLenAfterFixups(then_body); + const else_body_len = astgen.countBodyLenAfterFixups(else_body); + try astgen.extra.ensureUnusedCapacity( + astgen.gpa, + @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, + ); + + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(condbr)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ + .condition = cond, + .then_body_len = then_body_len, + .else_body_len = else_body_len, + }); + astgen.appendBodyWithFixups(then_body); + astgen.appendBodyWithFixups(else_body); +} + +fn whileExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + while_full: Ast.full.While, + is_statement: bool, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + if (while_full.label_token) |label_token| { + try astgen.checkLabelRedefinition(scope, label_token); + } + + const is_inline = while_full.inline_token != null; + if (parent_gz.is_comptime and is_inline) { + return astgen.failTok(while_full.inline_token.?, "redundant inline keyword in comptime scope", .{}); + } + const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; + const loop_block = try parent_gz.makeBlockInst(loop_tag, node); + try parent_gz.instructions.append(astgen.gpa, loop_block); + + var loop_scope = parent_gz.makeSubBlock(scope); + loop_scope.is_inline = is_inline; + loop_scope.setBreakResultInfo(block_ri); + defer loop_scope.unstack(); + + var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); + defer cond_scope.unstack(); + + const payload_is_ref = if (while_full.payload_token) |payload_token| + token_tags[payload_token] == .asterisk + else + false; + + try emitDbgNode(parent_gz, while_full.ast.cond_expr); + const cond: struct { + inst: Zir.Inst.Ref, + bool_bit: Zir.Inst.Ref, + } = c: { + if (while_full.error_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const err_union = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; + break :c .{ + .inst = err_union, + .bool_bit = try cond_scope.addUnNode(tag, err_union, while_full.ast.cond_expr), + }; + } else if (while_full.payload_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; + break :c .{ + .inst = optional, + .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr), + }; + } else { + const cond = try expr(&cond_scope, &cond_scope.base, coerced_bool_ri, while_full.ast.cond_expr); + break :c .{ + .inst = cond, + .bool_bit = cond, + }; + } + }; + + const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; + const condbr = try cond_scope.addCondBr(condbr_tag, node); + const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; + const cond_block = try loop_scope.makeBlockInst(block_tag, node); + try cond_scope.setBlockBody(cond_block); + // cond_scope unstacked now, can add new instructions to loop_scope + try loop_scope.instructions.append(astgen.gpa, cond_block); + + // make scope now but don't stack on parent_gz until loop_scope + // gets unstacked after cont_expr is emitted and added below + var then_scope = parent_gz.makeSubBlock(&cond_scope.base); + then_scope.instructions_top = GenZir.unstacked_top; + defer then_scope.unstack(); + + var dbg_var_name: Zir.NullTerminatedString = .empty; + var dbg_var_inst: Zir.Inst.Ref = undefined; + var opt_payload_inst: Zir.Inst.OptionalIndex = .none; + var payload_val_scope: Scope.LocalVal = undefined; + const then_sub_scope = s: { + if (while_full.error_token != null) { + if (while_full.payload_token) |payload_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_payload_unsafe_ptr + else + .err_union_payload_unsafe; + // will add this instruction to then_scope.instructions below + const payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); + opt_payload_inst = payload_inst.toOptional(); + const ident_token = payload_token + @intFromBool(payload_is_ref); + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + const ident_name = try astgen.identAsString(ident_token); + try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst.toRef(), + .token_src = ident_token, + .id_cat = .capture, + }; + dbg_var_name = ident_name; + dbg_var_inst = payload_inst.toRef(); + break :s &payload_val_scope.base; + } else { + _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); + break :s &then_scope.base; + } + } else if (while_full.payload_token) |payload_token| { + const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; + const tag: Zir.Inst.Tag = if (payload_is_ref) + .optional_payload_unsafe_ptr + else + .optional_payload_unsafe; + // will add this instruction to then_scope.instructions below + const payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); + opt_payload_inst = payload_inst.toOptional(); + const ident_name = try astgen.identAsString(ident_token); + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst.toRef(), + .token_src = ident_token, + .id_cat = .capture, + }; + dbg_var_name = ident_name; + dbg_var_inst = payload_inst.toRef(); + break :s &payload_val_scope.base; + } else { + break :s &then_scope.base; + } + }; + + var continue_scope = parent_gz.makeSubBlock(then_sub_scope); + continue_scope.instructions_top = GenZir.unstacked_top; + defer continue_scope.unstack(); + const continue_block = try then_scope.makeBlockInst(block_tag, node); + + const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; + _ = try loop_scope.addNode(repeat_tag, node); + + try loop_scope.setBlockBody(loop_block); + loop_scope.break_block = loop_block.toOptional(); + loop_scope.continue_block = continue_block.toOptional(); + if (while_full.label_token) |label_token| { + loop_scope.label = .{ + .token = label_token, + .block_inst = loop_block, + }; + } + + // done adding instructions to loop_scope, can now stack then_scope + then_scope.instructions_top = then_scope.instructions.items.len; + + const then_node = while_full.ast.then_expr; + if (opt_payload_inst.unwrap()) |payload_inst| { + try then_scope.instructions.append(astgen.gpa, payload_inst); + } + if (dbg_var_name != .empty) try then_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); + try then_scope.instructions.append(astgen.gpa, continue_block); + // This code could be improved to avoid emitting the continue expr when there + // are no jumps to it. This happens when the last statement of a while body is noreturn + // and there are no `continue` statements. + // Tracking issue: https://github.com/ziglang/zig/issues/9185 + if (while_full.ast.cont_expr != 0) { + _ = try unusedResultExpr(&then_scope, then_sub_scope, while_full.ast.cont_expr); + } + + continue_scope.instructions_top = continue_scope.instructions.items.len; + _ = try unusedResultExpr(&continue_scope, &continue_scope.base, then_node); + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; + if (!continue_scope.endsWithNoReturn()) { + _ = try continue_scope.addBreak(break_tag, continue_block, .void_value); + } + try continue_scope.setBlockBody(continue_block); + _ = try then_scope.addBreak(break_tag, cond_block, .void_value); + + var else_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer else_scope.unstack(); + + const else_node = while_full.ast.else_expr; + if (else_node != 0) { + const sub_scope = s: { + if (while_full.error_token) |error_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_code_ptr + else + .err_union_code; + const else_payload_inst = try else_scope.addUnNode(tag, cond.inst, while_full.ast.cond_expr); + const ident_name = try astgen.identAsString(error_token); + const ident_bytes = tree.tokenSlice(error_token); + if (mem.eql(u8, ident_bytes, "_")) + break :s &else_scope.base; + try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = ident_name, + .inst = else_payload_inst, + .token_src = error_token, + .id_cat = .capture, + }; + try else_scope.addDbgVar(.dbg_var_val, ident_name, else_payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &else_scope.base; + } + }; + // Remove the continue block and break block so that `continue` and `break` + // control flow apply to outer loops; not this one. + loop_scope.continue_block = .none; + loop_scope.break_block = .none; + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); + if (is_statement) { + _ = try addEnsureResult(&else_scope, else_result, else_node); + } + + try checkUsed(parent_gz, &else_scope.base, sub_scope); + if (!else_scope.endsWithNoReturn()) { + _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); + } + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(break_tag, loop_block, result); + } + + if (loop_scope.label) |some| { + if (!some.used) { + try astgen.appendErrorTok(some.token, "unused while loop label", .{}); + } + } + + try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); + + const result = if (need_result_rvalue) + try rvalue(parent_gz, ri, loop_block.toRef(), node) + else + loop_block.toRef(); + + if (is_statement) { + _ = try parent_gz.addUnNode(.ensure_result_used, result, node); + } + + return result; +} + +fn forExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + for_full: Ast.full.For, + is_statement: bool, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + + if (for_full.label_token) |label_token| { + try astgen.checkLabelRedefinition(scope, label_token); + } + + const need_rl = astgen.nodes_need_rl.contains(node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + const is_inline = for_full.inline_token != null; + if (parent_gz.is_comptime and is_inline) { + return astgen.failTok(for_full.inline_token.?, "redundant inline keyword in comptime scope", .{}); + } + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const node_tags = tree.nodes.items(.tag); + const node_data = tree.nodes.items(.data); + const gpa = astgen.gpa; + + // For counters, this is the start value; for indexables, this is the base + // pointer that can be used with elem_ptr and similar instructions. + // Special value `none` means that this is a counter and its start value is + // zero, indicating that the main index counter can be used directly. + const indexables = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); + defer gpa.free(indexables); + // elements of this array can be `none`, indicating no length check. + const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); + defer gpa.free(lens); + + // We will use a single zero-based counter no matter how many indexables there are. + const index_ptr = blk: { + const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc; + const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node); + // initialize to zero + _ = try parent_gz.addPlNode(.store_node, node, Zir.Inst.Bin{ + .lhs = index_ptr, + .rhs = .zero_usize, + }); + break :blk index_ptr; + }; + + var any_len_checks = false; + + { + var capture_token = for_full.payload_token; + for (for_full.ast.inputs, indexables, lens) |input, *indexable_ref, *len_ref| { + const capture_is_ref = token_tags[capture_token] == .asterisk; + const ident_tok = capture_token + @intFromBool(capture_is_ref); + const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_"); + + if (is_discard and capture_is_ref) { + return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{}); + } + // Skip over the comma, and on to the next capture (or the ending pipe character). + capture_token = ident_tok + 2; + + try emitDbgNode(parent_gz, input); + if (node_tags[input] == .for_range) { + if (capture_is_ref) { + return astgen.failTok(ident_tok, "cannot capture reference to range", .{}); + } + const start_node = node_data[input].lhs; + const start_val = try expr(parent_gz, scope, .{ .rl = .{ .ty = .usize_type } }, start_node); + + const end_node = node_data[input].rhs; + const end_val = if (end_node != 0) + try expr(parent_gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_data[input].rhs) + else + .none; + + if (end_val == .none and is_discard) { + return astgen.failTok(ident_tok, "discard of unbounded counter", .{}); + } + + const start_is_zero = nodeIsTriviallyZero(tree, start_node); + const range_len = if (end_val == .none or start_is_zero) + end_val + else + try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{ + .lhs = end_val, + .rhs = start_val, + }); + + any_len_checks = any_len_checks or range_len != .none; + indexable_ref.* = if (start_is_zero) .none else start_val; + len_ref.* = range_len; + } else { + const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input); + + any_len_checks = true; + indexable_ref.* = indexable; + len_ref.* = indexable; + } + } + } + + if (!any_len_checks) { + return astgen.failNode(node, "unbounded for loop", .{}); + } + + // We use a dedicated ZIR instruction to assert the lengths to assist with + // nicer error reporting as well as fewer ZIR bytes emitted. + const len: Zir.Inst.Ref = len: { + const lens_len: u32 = @intCast(lens.len); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len); + const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{ + .operands_len = lens_len, + }); + appendRefsAssumeCapacity(astgen, lens); + break :len len; + }; + + const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; + const loop_block = try parent_gz.makeBlockInst(loop_tag, node); + try parent_gz.instructions.append(gpa, loop_block); + + var loop_scope = parent_gz.makeSubBlock(scope); + loop_scope.is_inline = is_inline; + loop_scope.setBreakResultInfo(block_ri); + defer loop_scope.unstack(); + + // We need to finish loop_scope later once we have the deferred refs from then_scope. However, the + // load must be removed from instructions in the meantime or it appears to be part of parent_gz. + const index = try loop_scope.addUnNode(.load, index_ptr, node); + _ = loop_scope.instructions.pop(); + + var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); + defer cond_scope.unstack(); + + // Check the condition. + const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{ + .lhs = index, + .rhs = len, + }); + + const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; + const condbr = try cond_scope.addCondBr(condbr_tag, node); + const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; + const cond_block = try loop_scope.makeBlockInst(block_tag, node); + try cond_scope.setBlockBody(cond_block); + + loop_scope.break_block = loop_block.toOptional(); + loop_scope.continue_block = cond_block.toOptional(); + if (for_full.label_token) |label_token| { + loop_scope.label = .{ + .token = label_token, + .block_inst = loop_block, + }; + } + + const then_node = for_full.ast.then_expr; + var then_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer then_scope.unstack(); + + const capture_scopes = try gpa.alloc(Scope.LocalVal, for_full.ast.inputs.len); + defer gpa.free(capture_scopes); + + const then_sub_scope = blk: { + var capture_token = for_full.payload_token; + var capture_sub_scope: *Scope = &then_scope.base; + for (for_full.ast.inputs, indexables, capture_scopes) |input, indexable_ref, *capture_scope| { + const capture_is_ref = token_tags[capture_token] == .asterisk; + const ident_tok = capture_token + @intFromBool(capture_is_ref); + const capture_name = tree.tokenSlice(ident_tok); + // Skip over the comma, and on to the next capture (or the ending pipe character). + capture_token = ident_tok + 2; + + if (mem.eql(u8, capture_name, "_")) continue; + + const name_str_index = try astgen.identAsString(ident_tok); + try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture); + + const capture_inst = inst: { + const is_counter = node_tags[input] == .for_range; + + if (indexable_ref == .none) { + // Special case: the main index can be used directly. + assert(is_counter); + assert(!capture_is_ref); + break :inst index; + } + + // For counters, we add the index variable to the start value; for + // indexables, we use it as an element index. This is so similar + // that they can share the same code paths, branching only on the + // ZIR tag. + const switch_cond = (@as(u2, @intFromBool(capture_is_ref)) << 1) | @intFromBool(is_counter); + const tag: Zir.Inst.Tag = switch (switch_cond) { + 0b00 => .elem_val, + 0b01 => .add, + 0b10 => .elem_ptr, + 0b11 => unreachable, // compile error emitted already + }; + break :inst try then_scope.addPlNode(tag, input, Zir.Inst.Bin{ + .lhs = indexable_ref, + .rhs = index, + }); + }; + + capture_scope.* = .{ + .parent = capture_sub_scope, + .gen_zir = &then_scope, + .name = name_str_index, + .inst = capture_inst, + .token_src = ident_tok, + .id_cat = .capture, + }; + + try then_scope.addDbgVar(.dbg_var_val, name_str_index, capture_inst); + capture_sub_scope = &capture_scope.base; + } + + break :blk capture_sub_scope; + }; + + const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, then_node); + _ = try addEnsureResult(&then_scope, then_result, then_node); + + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + + const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; + + _ = try then_scope.addBreak(break_tag, cond_block, .void_value); + + var else_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer else_scope.unstack(); + + const else_node = for_full.ast.else_expr; + if (else_node != 0) { + const sub_scope = &else_scope.base; + // Remove the continue block and break block so that `continue` and `break` + // control flow apply to outer loops; not this one. + loop_scope.continue_block = .none; + loop_scope.break_block = .none; + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); + if (is_statement) { + _ = try addEnsureResult(&else_scope, else_result, else_node); + } + if (!else_scope.endsWithNoReturn()) { + _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); + } + } else { + const result = try rvalue(&else_scope, ri, .void_value, node); + _ = try else_scope.addBreak(break_tag, loop_block, result); + } + + if (loop_scope.label) |some| { + if (!some.used) { + try astgen.appendErrorTok(some.token, "unused for loop label", .{}); + } + } + + try setCondBrPayload(condbr, cond, &then_scope, &else_scope); + + // then_block and else_block unstacked now, can resurrect loop_scope to finally finish it + { + loop_scope.instructions_top = loop_scope.instructions.items.len; + try loop_scope.instructions.appendSlice(gpa, &.{ index.toIndex().?, cond_block }); + + // Increment the index variable. + const index_plus_one = try loop_scope.addPlNode(.add_unsafe, node, Zir.Inst.Bin{ + .lhs = index, + .rhs = .one_usize, + }); + _ = try loop_scope.addPlNode(.store_node, node, Zir.Inst.Bin{ + .lhs = index_ptr, + .rhs = index_plus_one, + }); + const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; + _ = try loop_scope.addNode(repeat_tag, node); + + try loop_scope.setBlockBody(loop_block); + } + + const result = if (need_result_rvalue) + try rvalue(parent_gz, ri, loop_block.toRef(), node) + else + loop_block.toRef(); + + if (is_statement) { + _ = try parent_gz.addUnNode(.ensure_result_used, result, node); + } + return result; +} + +fn switchExprErrUnion( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + catch_or_if_node: Ast.Node.Index, + node_ty: enum { @"catch", @"if" }, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const if_full = switch (node_ty) { + .@"catch" => undefined, + .@"if" => tree.fullIf(catch_or_if_node).?, + }; + + const switch_node, const operand_node, const error_payload = switch (node_ty) { + .@"catch" => .{ + node_datas[catch_or_if_node].rhs, + node_datas[catch_or_if_node].lhs, + main_tokens[catch_or_if_node] + 2, + }, + .@"if" => .{ + if_full.ast.else_expr, + if_full.ast.cond_expr, + if_full.error_token.?, + }, + }; + assert(node_tags[switch_node] == .@"switch" or node_tags[switch_node] == .switch_comma); + + const do_err_trace = astgen.fn_block != null; + + const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + + const need_rl = astgen.nodes_need_rl.contains(catch_or_if_node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, catch_or_if_node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + + const payload_is_ref = node_ty == .@"if" and + if_full.payload_token != null and token_tags[if_full.payload_token.?] == .asterisk; + + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + var scalar_cases_len: u32 = 0; + var multi_cases_len: u32 = 0; + var inline_cases_len: u32 = 0; + var has_else = false; + var else_node: Ast.Node.Index = 0; + var else_src: ?Ast.TokenIndex = null; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + + if (case.ast.values.len == 0) { + const case_src = case.ast.arrow_token - 1; + if (else_src) |src| { + return astgen.failTokNotes( + case_src, + "multiple else prongs in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + src, + "previous else prong here", + .{}, + ), + }, + ); + } + has_else = true; + else_node = case_node; + else_src = case_src; + continue; + } else if (case.ast.values.len == 1 and + node_tags[case.ast.values[0]] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) + { + const case_src = case.ast.arrow_token - 1; + return astgen.failTokNotes( + case_src, + "'_' prong is not allowed when switching on errors", + .{}, + &[_]u32{ + try astgen.errNoteTok( + case_src, + "consider using 'else'", + .{}, + ), + }, + ); + } + + for (case.ast.values) |val| { + if (node_tags[val] == .string_literal) + return astgen.failNode(val, "cannot switch on strings", .{}); + } + + if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { + scalar_cases_len += 1; + } else { + multi_cases_len += 1; + } + if (case.inline_token != null) { + inline_cases_len += 1; + } + } + + const operand_ri: ResultInfo = .{ + .rl = if (payload_is_ref) .ref else .none, + .ctx = .error_handling_expr, + }; + + astgen.advanceSourceCursorToNode(operand_node); + const operand_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; + + const raw_operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, switch_node); + const item_ri: ResultInfo = .{ .rl = .none }; + + // This contains the data that goes into the `extra` array for the SwitchBlockErrUnion, except + // the first cases_nodes.len slots are a table that indexes payloads later in the array, + // with the non-error and else case indices coming first, then scalar_cases_len indexes, then + // multi_cases_len indexes + const payloads = &astgen.scratch; + const scratch_top = astgen.scratch.items.len; + const case_table_start = scratch_top; + const scalar_case_table = case_table_start + 1 + @intFromBool(has_else); + const multi_case_table = scalar_case_table + scalar_cases_len; + const case_table_end = multi_case_table + multi_cases_len; + + try astgen.scratch.resize(gpa, case_table_end); + defer astgen.scratch.items.len = scratch_top; + + var block_scope = parent_gz.makeSubBlock(scope); + // block_scope not used for collecting instructions + block_scope.instructions_top = GenZir.unstacked_top; + block_scope.setBreakResultInfo(block_ri); + + // Sema expects a dbg_stmt immediately before switch_block_err_union + try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); + // This gets added to the parent block later, after the item expressions. + const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node); + + // We re-use this same scope for all cases, including the special prong, if any. + var case_scope = parent_gz.makeSubBlock(&block_scope.base); + case_scope.instructions_top = GenZir.unstacked_top; + + { + const body_len_index: u32 = @intCast(payloads.items.len); + payloads.items[case_table_start] = body_len_index; + try payloads.resize(gpa, body_len_index + 1); // body_len + + case_scope.instructions_top = parent_gz.instructions.items.len; + defer case_scope.unstack(); + + const unwrap_payload_tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_payload_unsafe_ptr + else + .err_union_payload_unsafe; + + const unwrapped_payload = try case_scope.addUnNode( + unwrap_payload_tag, + raw_operand, + catch_or_if_node, + ); + + switch (node_ty) { + .@"catch" => { + const case_result = switch (ri.rl) { + .ref, .ref_coerced_ty => unwrapped_payload, + else => try rvalue( + &case_scope, + block_scope.break_result_info, + unwrapped_payload, + catch_or_if_node, + ), + }; + _ = try case_scope.addBreakWithSrcNode( + .@"break", + switch_block, + case_result, + catch_or_if_node, + ); + }, + .@"if" => { + var payload_val_scope: Scope.LocalVal = undefined; + + const then_node = if_full.ast.then_expr; + const then_sub_scope = s: { + assert(if_full.error_token != null); + if (if_full.payload_token) |payload_token| { + const token_name_index = payload_token + @intFromBool(payload_is_ref); + const ident_name = try astgen.identAsString(token_name_index); + const token_name_str = tree.tokenSlice(token_name_index); + if (mem.eql(u8, "_", token_name_str)) + break :s &case_scope.base; + try astgen.detectLocalShadowing( + &case_scope.base, + ident_name, + token_name_index, + token_name_str, + .capture, + ); + payload_val_scope = .{ + .parent = &case_scope.base, + .gen_zir = &case_scope, + .name = ident_name, + .inst = unwrapped_payload, + .token_src = token_name_index, + .id_cat = .capture, + }; + try case_scope.addDbgVar(.dbg_var_val, ident_name, unwrapped_payload); + break :s &payload_val_scope.base; + } else { + _ = try case_scope.addUnNode( + .ensure_err_union_payload_void, + raw_operand, + catch_or_if_node, + ); + break :s &case_scope.base; + } + }; + const then_result = try expr( + &case_scope, + then_sub_scope, + block_scope.break_result_info, + then_node, + ); + try checkUsed(parent_gz, &case_scope.base, then_sub_scope); + if (!case_scope.endsWithNoReturn()) { + _ = try case_scope.addBreakWithSrcNode( + .@"break", + switch_block, + then_result, + then_node, + ); + } + }, + } + + const case_slice = case_scope.instructionsSlice(); + // Since we use the switch_block_err_union instruction itself to refer + // to the capture, which will not be added to the child block, we need + // to handle ref_table manually. + const refs_len = refs: { + var n: usize = 0; + var check_inst = switch_block; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + n += 1; + check_inst = ref_inst; + } + break :refs n; + }; + const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); + try payloads.ensureUnusedCapacity(gpa, body_len); + const capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = switch (node_ty) { + .@"catch" => .none, + .@"if" => if (if_full.payload_token == null) + .none + else if (payload_is_ref) + .by_ref + else + .by_val, + }; + payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ + .body_len = @intCast(body_len), + .capture = capture, + .is_inline = false, + .has_tag_capture = false, + }); + if (astgen.ref_table.fetchRemove(switch_block)) |kv| { + appendPossiblyRefdBodyInst(astgen, payloads, kv.value); + } + appendBodyWithFixupsArrayList(astgen, payloads, case_slice); + } + + const err_name = blk: { + const err_str = tree.tokenSlice(error_payload); + if (mem.eql(u8, err_str, "_")) { + return astgen.failTok(error_payload, "discard of error capture; omit it instead", .{}); + } + const err_name = try astgen.identAsString(error_payload); + try astgen.detectLocalShadowing(scope, err_name, error_payload, err_str, .capture); + + break :blk err_name; + }; + + // allocate a shared dummy instruction for the error capture + const err_inst = err_inst: { + const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + try astgen.instructions.append(astgen.gpa, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .value_placeholder, + .small = undefined, + .operand = undefined, + } }, + }); + break :err_inst inst; + }; + + // In this pass we generate all the item and prong expressions for error cases. + var multi_case_index: u32 = 0; + var scalar_case_index: u32 = 0; + var any_uses_err_capture = false; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + + const is_multi_case = case.ast.values.len > 1 or + (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); + + var dbg_var_name: Zir.NullTerminatedString = .empty; + var dbg_var_inst: Zir.Inst.Ref = undefined; + var err_scope: Scope.LocalVal = undefined; + var capture_scope: Scope.LocalVal = undefined; + + const sub_scope = blk: { + err_scope = .{ + .parent = &case_scope.base, + .gen_zir = &case_scope, + .name = err_name, + .inst = err_inst.toRef(), + .token_src = error_payload, + .id_cat = .capture, + }; + + const capture_token = case.payload_token orelse break :blk &err_scope.base; + if (token_tags[capture_token] != .identifier) { + return astgen.failTok(capture_token + 1, "error set cannot be captured by reference", .{}); + } + + const capture_slice = tree.tokenSlice(capture_token); + if (mem.eql(u8, capture_slice, "_")) { + return astgen.failTok(capture_token, "discard of error capture; omit it instead", .{}); + } + const tag_name = try astgen.identAsString(capture_token); + try astgen.detectLocalShadowing(&case_scope.base, tag_name, capture_token, capture_slice, .capture); + + capture_scope = .{ + .parent = &case_scope.base, + .gen_zir = &case_scope, + .name = tag_name, + .inst = switch_block.toRef(), + .token_src = capture_token, + .id_cat = .capture, + }; + dbg_var_name = tag_name; + dbg_var_inst = switch_block.toRef(); + + err_scope.parent = &capture_scope.base; + + break :blk &err_scope.base; + }; + + const header_index: u32 = @intCast(payloads.items.len); + const body_len_index = if (is_multi_case) blk: { + payloads.items[multi_case_table + multi_case_index] = header_index; + multi_case_index += 1; + try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len + + // items + var items_len: u32 = 0; + for (case.ast.values) |item_node| { + if (node_tags[item_node] == .switch_range) continue; + items_len += 1; + + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + try payloads.append(gpa, @intFromEnum(item_inst)); + } + + // ranges + var ranges_len: u32 = 0; + for (case.ast.values) |range| { + if (node_tags[range] != .switch_range) continue; + ranges_len += 1; + + const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); + const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); + try payloads.appendSlice(gpa, &[_]u32{ + @intFromEnum(first), @intFromEnum(last), + }); + } + + payloads.items[header_index] = items_len; + payloads.items[header_index + 1] = ranges_len; + break :blk header_index + 2; + } else if (case_node == else_node) blk: { + payloads.items[case_table_start + 1] = header_index; + try payloads.resize(gpa, header_index + 1); // body_len + break :blk header_index; + } else blk: { + payloads.items[scalar_case_table + scalar_case_index] = header_index; + scalar_case_index += 1; + try payloads.resize(gpa, header_index + 2); // item, body_len + const item_node = case.ast.values[0]; + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + payloads.items[header_index] = @intFromEnum(item_inst); + break :blk header_index + 1; + }; + + { + // temporarily stack case_scope on parent_gz + case_scope.instructions_top = parent_gz.instructions.items.len; + defer case_scope.unstack(); + + if (do_err_trace and nodeMayAppendToErrorTrace(tree, operand_node)) + _ = try case_scope.addSaveErrRetIndex(.always); + + if (dbg_var_name != .empty) { + try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); + } + + const target_expr_node = case.ast.target_expr; + const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node); + // check capture_scope, not err_scope to avoid false positive unused error capture + try checkUsed(parent_gz, &case_scope.base, err_scope.parent); + const uses_err = err_scope.used != 0 or err_scope.discarded != 0; + if (uses_err) { + try case_scope.addDbgVar(.dbg_var_val, err_name, err_inst.toRef()); + any_uses_err_capture = true; + } + + if (!parent_gz.refIsNoReturn(case_result)) { + if (do_err_trace) + try restoreErrRetIndex( + &case_scope, + .{ .block = switch_block }, + block_scope.break_result_info, + target_expr_node, + case_result, + ); + + _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); + } + + const case_slice = case_scope.instructionsSlice(); + // Since we use the switch_block_err_union instruction itself to refer + // to the capture, which will not be added to the child block, we need + // to handle ref_table manually. + const refs_len = refs: { + var n: usize = 0; + var check_inst = switch_block; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + n += 1; + check_inst = ref_inst; + } + if (uses_err) { + check_inst = err_inst; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + n += 1; + check_inst = ref_inst; + } + } + break :refs n; + }; + const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); + try payloads.ensureUnusedCapacity(gpa, body_len); + payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ + .body_len = @intCast(body_len), + .capture = if (case.payload_token != null) .by_val else .none, + .is_inline = case.inline_token != null, + .has_tag_capture = false, + }); + if (astgen.ref_table.fetchRemove(switch_block)) |kv| { + appendPossiblyRefdBodyInst(astgen, payloads, kv.value); + } + if (uses_err) { + if (astgen.ref_table.fetchRemove(err_inst)) |kv| { + appendPossiblyRefdBodyInst(astgen, payloads, kv.value); + } + } + appendBodyWithFixupsArrayList(astgen, payloads, case_slice); + } + } + // Now that the item expressions are generated we can add this. + try parent_gz.instructions.append(gpa, switch_block); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlockErrUnion).Struct.fields.len + + @intFromBool(multi_cases_len != 0) + + payloads.items.len - case_table_end + + (case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).Struct.fields.len); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlockErrUnion{ + .operand = raw_operand, + .bits = Zir.Inst.SwitchBlockErrUnion.Bits{ + .has_multi_cases = multi_cases_len != 0, + .has_else = has_else, + .scalar_cases_len = @intCast(scalar_cases_len), + .any_uses_err_capture = any_uses_err_capture, + .payload_is_ref = payload_is_ref, + }, + .main_src_node_offset = parent_gz.nodeIndexToRelative(catch_or_if_node), + }); + + if (multi_cases_len != 0) { + astgen.extra.appendAssumeCapacity(multi_cases_len); + } + + if (any_uses_err_capture) { + astgen.extra.appendAssumeCapacity(@intFromEnum(err_inst)); + } + + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index; + + for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { + var body_len_index = start_index; + var end_index = start_index; + const table_index = case_table_start + i; + if (table_index < scalar_case_table) { + end_index += 1; + } else if (table_index < multi_case_table) { + body_len_index += 1; + end_index += 2; + } else { + body_len_index += 2; + const items_len = payloads.items[start_index]; + const ranges_len = payloads.items[start_index + 1]; + end_index += 3 + items_len + 2 * ranges_len; + } + const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); + end_index += prong_info.body_len; + astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); + } + + if (need_result_rvalue) { + return rvalue(parent_gz, ri, switch_block.toRef(), switch_node); + } else { + return switch_block.toRef(); + } +} + +fn switchExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + switch_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const operand_node = node_datas[switch_node].lhs; + const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + + const need_rl = astgen.nodes_need_rl.contains(switch_node); + const block_ri: ResultInfo = if (need_rl) ri else .{ + .rl = switch (ri.rl) { + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, switch_node)).? }, + .inferred_ptr => .none, + else => ri.rl, + }, + .ctx = ri.ctx, + }; + // We need to call `rvalue` to write through to the pointer only if we had a + // result pointer and aren't forwarding it. + const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; + const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + + // We perform two passes over the AST. This first pass is to collect information + // for the following variables, make note of the special prong AST node index, + // and bail out with a compile error if there are multiple special prongs present. + var any_payload_is_ref = false; + var any_has_tag_capture = false; + var scalar_cases_len: u32 = 0; + var multi_cases_len: u32 = 0; + var inline_cases_len: u32 = 0; + var special_prong: Zir.SpecialProng = .none; + var special_node: Ast.Node.Index = 0; + var else_src: ?Ast.TokenIndex = null; + var underscore_src: ?Ast.TokenIndex = null; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + if (case.payload_token) |payload_token| { + const ident = if (token_tags[payload_token] == .asterisk) blk: { + any_payload_is_ref = true; + break :blk payload_token + 1; + } else payload_token; + if (token_tags[ident + 1] == .comma) { + any_has_tag_capture = true; + } + } + // Check for else/`_` prong. + if (case.ast.values.len == 0) { + const case_src = case.ast.arrow_token - 1; + if (else_src) |src| { + return astgen.failTokNotes( + case_src, + "multiple else prongs in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + src, + "previous else prong here", + .{}, + ), + }, + ); + } else if (underscore_src) |some_underscore| { + return astgen.failNodeNotes( + switch_node, + "else and '_' prong in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + case_src, + "else prong here", + .{}, + ), + try astgen.errNoteTok( + some_underscore, + "'_' prong here", + .{}, + ), + }, + ); + } + special_node = case_node; + special_prong = .@"else"; + else_src = case_src; + continue; + } else if (case.ast.values.len == 1 and + node_tags[case.ast.values[0]] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) + { + const case_src = case.ast.arrow_token - 1; + if (underscore_src) |src| { + return astgen.failTokNotes( + case_src, + "multiple '_' prongs in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + src, + "previous '_' prong here", + .{}, + ), + }, + ); + } else if (else_src) |some_else| { + return astgen.failNodeNotes( + switch_node, + "else and '_' prong in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + some_else, + "else prong here", + .{}, + ), + try astgen.errNoteTok( + case_src, + "'_' prong here", + .{}, + ), + }, + ); + } + if (case.inline_token != null) { + return astgen.failTok(case_src, "cannot inline '_' prong", .{}); + } + special_node = case_node; + special_prong = .under; + underscore_src = case_src; + continue; + } + + for (case.ast.values) |val| { + if (node_tags[val] == .string_literal) + return astgen.failNode(val, "cannot switch on strings", .{}); + } + + if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { + scalar_cases_len += 1; + } else { + multi_cases_len += 1; + } + if (case.inline_token != null) { + inline_cases_len += 1; + } + } + + const operand_ri: ResultInfo = .{ .rl = if (any_payload_is_ref) .ref else .none }; + + astgen.advanceSourceCursorToNode(operand_node); + const operand_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; + + const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); + const item_ri: ResultInfo = .{ .rl = .none }; + + // This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti, + // except the first cases_nodes.len slots are a table that indexes payloads later in the array, with + // the special case index coming first, then scalar_case_len indexes, then multi_cases_len indexes + const payloads = &astgen.scratch; + const scratch_top = astgen.scratch.items.len; + const case_table_start = scratch_top; + const scalar_case_table = case_table_start + @intFromBool(special_prong != .none); + const multi_case_table = scalar_case_table + scalar_cases_len; + const case_table_end = multi_case_table + multi_cases_len; + try astgen.scratch.resize(gpa, case_table_end); + defer astgen.scratch.items.len = scratch_top; + + var block_scope = parent_gz.makeSubBlock(scope); + // block_scope not used for collecting instructions + block_scope.instructions_top = GenZir.unstacked_top; + block_scope.setBreakResultInfo(block_ri); + + // Sema expects a dbg_stmt immediately before switch_block(_ref) + try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); + // This gets added to the parent block later, after the item expressions. + const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block; + const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node); + + // We re-use this same scope for all cases, including the special prong, if any. + var case_scope = parent_gz.makeSubBlock(&block_scope.base); + case_scope.instructions_top = GenZir.unstacked_top; + + // If any prong has an inline tag capture, allocate a shared dummy instruction for it + const tag_inst = if (any_has_tag_capture) tag_inst: { + const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + try astgen.instructions.append(astgen.gpa, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .value_placeholder, + .small = undefined, + .operand = undefined, + } }, + }); + break :tag_inst inst; + } else undefined; + + // In this pass we generate all the item and prong expressions. + var multi_case_index: u32 = 0; + var scalar_case_index: u32 = 0; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + + const is_multi_case = case.ast.values.len > 1 or + (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); + + var dbg_var_name: Zir.NullTerminatedString = .empty; + var dbg_var_inst: Zir.Inst.Ref = undefined; + var dbg_var_tag_name: Zir.NullTerminatedString = .empty; + var dbg_var_tag_inst: Zir.Inst.Ref = undefined; + var has_tag_capture = false; + var capture_val_scope: Scope.LocalVal = undefined; + var tag_scope: Scope.LocalVal = undefined; + + var capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = .none; + + const sub_scope = blk: { + const payload_token = case.payload_token orelse break :blk &case_scope.base; + const ident = if (token_tags[payload_token] == .asterisk) + payload_token + 1 + else + payload_token; + + const is_ptr = ident != payload_token; + capture = if (is_ptr) .by_ref else .by_val; + + const ident_slice = tree.tokenSlice(ident); + var payload_sub_scope: *Scope = undefined; + if (mem.eql(u8, ident_slice, "_")) { + if (is_ptr) { + return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); + } + payload_sub_scope = &case_scope.base; + } else { + const capture_name = try astgen.identAsString(ident); + try astgen.detectLocalShadowing(&case_scope.base, capture_name, ident, ident_slice, .capture); + capture_val_scope = .{ + .parent = &case_scope.base, + .gen_zir = &case_scope, + .name = capture_name, + .inst = switch_block.toRef(), + .token_src = ident, + .id_cat = .capture, + }; + dbg_var_name = capture_name; + dbg_var_inst = switch_block.toRef(); + payload_sub_scope = &capture_val_scope.base; + } + + const tag_token = if (token_tags[ident + 1] == .comma) + ident + 2 + else + break :blk payload_sub_scope; + const tag_slice = tree.tokenSlice(tag_token); + if (mem.eql(u8, tag_slice, "_")) { + return astgen.failTok(tag_token, "discard of tag capture; omit it instead", .{}); + } else if (case.inline_token == null) { + return astgen.failTok(tag_token, "tag capture on non-inline prong", .{}); + } + const tag_name = try astgen.identAsString(tag_token); + try astgen.detectLocalShadowing(payload_sub_scope, tag_name, tag_token, tag_slice, .@"switch tag capture"); + + assert(any_has_tag_capture); + has_tag_capture = true; + + tag_scope = .{ + .parent = payload_sub_scope, + .gen_zir = &case_scope, + .name = tag_name, + .inst = tag_inst.toRef(), + .token_src = tag_token, + .id_cat = .@"switch tag capture", + }; + dbg_var_tag_name = tag_name; + dbg_var_tag_inst = tag_inst.toRef(); + break :blk &tag_scope.base; + }; + + const header_index: u32 = @intCast(payloads.items.len); + const body_len_index = if (is_multi_case) blk: { + payloads.items[multi_case_table + multi_case_index] = header_index; + multi_case_index += 1; + try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len + + // items + var items_len: u32 = 0; + for (case.ast.values) |item_node| { + if (node_tags[item_node] == .switch_range) continue; + items_len += 1; + + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + try payloads.append(gpa, @intFromEnum(item_inst)); + } + + // ranges + var ranges_len: u32 = 0; + for (case.ast.values) |range| { + if (node_tags[range] != .switch_range) continue; + ranges_len += 1; + + const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); + const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); + try payloads.appendSlice(gpa, &[_]u32{ + @intFromEnum(first), @intFromEnum(last), + }); + } + + payloads.items[header_index] = items_len; + payloads.items[header_index + 1] = ranges_len; + break :blk header_index + 2; + } else if (case_node == special_node) blk: { + payloads.items[case_table_start] = header_index; + try payloads.resize(gpa, header_index + 1); // body_len + break :blk header_index; + } else blk: { + payloads.items[scalar_case_table + scalar_case_index] = header_index; + scalar_case_index += 1; + try payloads.resize(gpa, header_index + 2); // item, body_len + const item_node = case.ast.values[0]; + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + payloads.items[header_index] = @intFromEnum(item_inst); + break :blk header_index + 1; + }; + + { + // temporarily stack case_scope on parent_gz + case_scope.instructions_top = parent_gz.instructions.items.len; + defer case_scope.unstack(); + + if (dbg_var_name != .empty) { + try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); + } + if (dbg_var_tag_name != .empty) { + try case_scope.addDbgVar(.dbg_var_val, dbg_var_tag_name, dbg_var_tag_inst); + } + const target_expr_node = case.ast.target_expr; + const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node); + try checkUsed(parent_gz, &case_scope.base, sub_scope); + if (!parent_gz.refIsNoReturn(case_result)) { + _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); + } + + const case_slice = case_scope.instructionsSlice(); + // Since we use the switch_block instruction itself to refer to the + // capture, which will not be added to the child block, we need to + // handle ref_table manually, and the same for the inline tag + // capture instruction. + const refs_len = refs: { + var n: usize = 0; + var check_inst = switch_block; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + n += 1; + check_inst = ref_inst; + } + if (has_tag_capture) { + check_inst = tag_inst; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + n += 1; + check_inst = ref_inst; + } + } + break :refs n; + }; + const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); + try payloads.ensureUnusedCapacity(gpa, body_len); + payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ + .body_len = @intCast(body_len), + .capture = capture, + .is_inline = case.inline_token != null, + .has_tag_capture = has_tag_capture, + }); + if (astgen.ref_table.fetchRemove(switch_block)) |kv| { + appendPossiblyRefdBodyInst(astgen, payloads, kv.value); + } + if (has_tag_capture) { + if (astgen.ref_table.fetchRemove(tag_inst)) |kv| { + appendPossiblyRefdBodyInst(astgen, payloads, kv.value); + } + } + appendBodyWithFixupsArrayList(astgen, payloads, case_slice); + } + } + // Now that the item expressions are generated we can add this. + try parent_gz.instructions.append(gpa, switch_block); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len + + @intFromBool(multi_cases_len != 0) + + @intFromBool(any_has_tag_capture) + + payloads.items.len - case_table_end + + (case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).Struct.fields.len); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{ + .operand = raw_operand, + .bits = Zir.Inst.SwitchBlock.Bits{ + .has_multi_cases = multi_cases_len != 0, + .has_else = special_prong == .@"else", + .has_under = special_prong == .under, + .any_has_tag_capture = any_has_tag_capture, + .scalar_cases_len = @intCast(scalar_cases_len), + }, + }); + + if (multi_cases_len != 0) { + astgen.extra.appendAssumeCapacity(multi_cases_len); + } + + if (any_has_tag_capture) { + astgen.extra.appendAssumeCapacity(@intFromEnum(tag_inst)); + } + + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index; + + for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { + var body_len_index = start_index; + var end_index = start_index; + const table_index = case_table_start + i; + if (table_index < scalar_case_table) { + end_index += 1; + } else if (table_index < multi_case_table) { + body_len_index += 1; + end_index += 2; + } else { + body_len_index += 2; + const items_len = payloads.items[start_index]; + const ranges_len = payloads.items[start_index + 1]; + end_index += 3 + items_len + 2 * ranges_len; + } + const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); + end_index += prong_info.body_len; + astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); + } + + if (need_result_rvalue) { + return rvalue(parent_gz, ri, switch_block.toRef(), switch_node); + } else { + return switch_block.toRef(); + } +} + +fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + if (astgen.fn_block == null) { + return astgen.failNode(node, "'return' outside function scope", .{}); + } + + if (gz.any_defer_node != 0) { + return astgen.failNodeNotes(node, "cannot return from defer expression", .{}, &.{ + try astgen.errNoteNode( + gz.any_defer_node, + "defer expression here", + .{}, + ), + }); + } + + // Ensure debug line/column information is emitted for this return expression. + // Then we will save the line/column so that we can emit another one that goes + // "backwards" because we want to evaluate the operand, but then put the debug + // info back at the return keyword for error return tracing. + if (!gz.is_comptime) { + try emitDbgNode(gz, node); + } + const ret_lc = LineColumn{ astgen.source_line - gz.decl_line, astgen.source_column }; + + const defer_outer = &astgen.fn_block.?.base; + + const operand_node = node_datas[node].lhs; + if (operand_node == 0) { + // Returning a void value; skip error defers. + try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always, node); + + _ = try gz.addUnNode(.ret_node, .void_value, node); + return Zir.Inst.Ref.unreachable_value; + } + + if (node_tags[operand_node] == .error_value) { + // Hot path for `return error.Foo`. This bypasses result location logic as well as logic + // for detecting whether to add something to the function's inferred error set. + const ident_token = node_datas[operand_node].rhs; + const err_name_str_index = try astgen.identAsString(ident_token); + const defer_counts = countDefers(defer_outer, scope); + if (!defer_counts.need_err_code) { + try genDefers(gz, defer_outer, scope, .both_sans_err); + try emitDbgStmt(gz, ret_lc); + _ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token); + return Zir.Inst.Ref.unreachable_value; + } + const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token); + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); + try emitDbgStmt(gz, ret_lc); + _ = try gz.addUnNode(.ret_node, err_code, node); + return Zir.Inst.Ref.unreachable_value; + } + + const ri: ResultInfo = if (astgen.nodes_need_rl.contains(node)) .{ + .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, + .ctx = .@"return", + } else .{ + .rl = .{ .coerced_ty = astgen.fn_ret_ty }, + .ctx = .@"return", + }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .func; + const operand = try reachableExpr(gz, scope, ri, operand_node, node); + gz.anon_name_strategy = prev_anon_name_strategy; + + switch (nodeMayEvalToError(tree, operand_node)) { + .never => { + // Returning a value that cannot be an error; skip error defers. + try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always, node); + + try emitDbgStmt(gz, ret_lc); + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + }, + .always => { + // Value is always an error. Emit both error defers and regular defers. + const err_code = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); + try emitDbgStmt(gz, ret_lc); + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + }, + .maybe => { + const defer_counts = countDefers(defer_outer, scope); + if (!defer_counts.have_err) { + // Only regular defers; no branch needed. + try genDefers(gz, defer_outer, scope, .normal_only); + try emitDbgStmt(gz, ret_lc); + + // As our last action before the return, "pop" the error trace if needed + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }, node); + + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + } + + // Emit conditional branch for generating errdefers. + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + const is_non_err = try gz.addUnNode(.ret_is_non_err, result, node); + const condbr = try gz.addCondBr(.condbr, node); + + var then_scope = gz.makeSubBlock(scope); + defer then_scope.unstack(); + + try genDefers(&then_scope, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try then_scope.addRestoreErrRetIndex(.ret, .always, node); + + try emitDbgStmt(&then_scope, ret_lc); + try then_scope.addRet(ri, operand, node); + + var else_scope = gz.makeSubBlock(scope); + defer else_scope.unstack(); + + const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ + .both = try else_scope.addUnNode(.err_union_code, result, node), + }; + try genDefers(&else_scope, defer_outer, scope, which_ones); + try emitDbgStmt(&else_scope, ret_lc); + try else_scope.addRet(ri, operand, node); + + try setCondBrPayload(condbr, is_non_err, &then_scope, &else_scope); + + return Zir.Inst.Ref.unreachable_value; + }, + } +} + +/// Parses the string `buf` as a base 10 integer of type `u16`. +/// +/// Unlike std.fmt.parseInt, does not allow the '_' character in `buf`. +fn parseBitCount(buf: []const u8) std.fmt.ParseIntError!u16 { + if (buf.len == 0) return error.InvalidCharacter; + + var x: u16 = 0; + + for (buf) |c| { + const digit = switch (c) { + '0'...'9' => c - '0', + else => return error.InvalidCharacter, + }; + + if (x != 0) x = try std.math.mul(u16, x, 10); + x = try std.math.add(u16, x, digit); + } + + return x; +} + +fn identifier( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + ident: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + + const ident_token = main_tokens[ident]; + const ident_name_raw = tree.tokenSlice(ident_token); + if (mem.eql(u8, ident_name_raw, "_")) { + return astgen.failNode(ident, "'_' used as an identifier without @\"_\" syntax", .{}); + } + + // if not @"" syntax, just use raw token slice + if (ident_name_raw[0] != '@') { + if (primitive_instrs.get(ident_name_raw)) |zir_const_ref| { + return rvalue(gz, ri, zir_const_ref, ident); + } + + if (ident_name_raw.len >= 2) integer: { + const first_c = ident_name_raw[0]; + if (first_c == 'i' or first_c == 'u') { + const signedness: std.builtin.Signedness = switch (first_c == 'i') { + true => .signed, + false => .unsigned, + }; + if (ident_name_raw.len >= 3 and ident_name_raw[1] == '0') { + return astgen.failNode( + ident, + "primitive integer type '{s}' has leading zero", + .{ident_name_raw}, + ); + } + const bit_count = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) { + error.Overflow => return astgen.failNode( + ident, + "primitive integer type '{s}' exceeds maximum bit width of 65535", + .{ident_name_raw}, + ), + error.InvalidCharacter => break :integer, + }; + const result = try gz.add(.{ + .tag = .int_type, + .data = .{ .int_type = .{ + .src_node = gz.nodeIndexToRelative(ident), + .signedness = signedness, + .bit_count = bit_count, + } }, + }); + return rvalue(gz, ri, result, ident); + } + } + } + + // Local variables, including function parameters. + return localVarRef(gz, scope, ri, ident, ident_token); +} + +fn localVarRef( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + ident: Ast.Node.Index, + ident_token: Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const name_str_index = try astgen.identAsString(ident_token); + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + var num_namespaces_out: u32 = 0; + var capturing_namespace: ?*Scope.Namespace = null; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + + if (local_val.name == name_str_index) { + // Locals cannot shadow anything, so we do not need to look for ambiguous + // references in this case. + if (ri.rl == .discard and ri.ctx == .assignment) { + local_val.discarded = ident_token; + } else { + local_val.used = ident_token; + } + + const value_inst = try tunnelThroughClosure( + gz, + ident, + num_namespaces_out, + capturing_namespace, + local_val.inst, + local_val.token_src, + gpa, + ); + + return rvalueNoCoercePreRef(gz, ri, value_inst, ident); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + if (ri.rl == .discard and ri.ctx == .assignment) { + local_ptr.discarded = ident_token; + } else { + local_ptr.used = ident_token; + } + + // Can't close over a runtime variable + if (num_namespaces_out != 0 and !local_ptr.maybe_comptime and !gz.is_typeof) { + const ident_name = try astgen.identifierTokenString(ident_token); + return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{ + try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}), + try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}), + }); + } + + const ptr_inst = try tunnelThroughClosure( + gz, + ident, + num_namespaces_out, + capturing_namespace, + local_ptr.ptr, + local_ptr.token_src, + gpa, + ); + + switch (ri.rl) { + .ref, .ref_coerced_ty => { + local_ptr.used_as_lvalue = true; + return ptr_inst; + }, + else => { + const loaded = try gz.addUnNode(.load, ptr_inst, ident); + return rvalueNoCoercePreRef(gz, ri, loaded, ident); + }, + } + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(name_str_index)) |i| { + if (found_already) |f| { + return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + if (s.tag == .namespace) num_namespaces_out += 1; + capturing_namespace = ns; + s = ns.parent; + }, + .top => break, + }; + if (found_already == null) { + const ident_name = try astgen.identifierTokenString(ident_token); + return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name}); + } + + // Decl references happen by name rather than ZIR index so that when unrelated + // decls are modified, ZIR code containing references to them can be unmodified. + switch (ri.rl) { + .ref, .ref_coerced_ty => return gz.addStrTok(.decl_ref, name_str_index, ident_token), + else => { + const result = try gz.addStrTok(.decl_val, name_str_index, ident_token); + return rvalueNoCoercePreRef(gz, ri, result, ident); + }, + } +} + +/// Adds a capture to a namespace, if needed. +/// Returns the index of the closure_capture instruction. +fn tunnelThroughClosure( + gz: *GenZir, + inner_ref_node: Ast.Node.Index, + num_tunnels: u32, + ns: ?*Scope.Namespace, + value: Zir.Inst.Ref, + token: Ast.TokenIndex, + gpa: Allocator, +) !Zir.Inst.Ref { + // For trivial values, we don't need a tunnel. + // Just return the ref. + if (num_tunnels == 0 or value.toIndex() == null) { + return value; + } + + // Otherwise we need a tunnel. Check if this namespace + // already has one for this value. + const gop = try ns.?.captures.getOrPut(gpa, value.toIndex().?); + if (!gop.found_existing) { + // Make a new capture for this value but don't add it to the declaring_gz yet + try gz.astgen.instructions.append(gz.astgen.gpa, .{ + .tag = .closure_capture, + .data = .{ .un_tok = .{ + .operand = value, + .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token), + } }, + }); + gop.value_ptr.* = @enumFromInt(gz.astgen.instructions.len - 1); + } + + // Add an instruction to get the value from the closure into + // our current context + return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node); +} + +fn stringLiteral( + gz: *GenZir, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const str_lit_token = main_tokens[node]; + const str = try astgen.strLitAsString(str_lit_token); + const result = try gz.add(.{ + .tag = .str, + .data = .{ .str = .{ + .start = str.index, + .len = str.len, + } }, + }); + return rvalue(gz, ri, result, node); +} + +fn multilineStringLiteral( + gz: *GenZir, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const str = try astgen.strLitNodeAsString(node); + const result = try gz.add(.{ + .tag = .str, + .data = .{ .str = .{ + .start = str.index, + .len = str.len, + } }, + }); + return rvalue(gz, ri, result, node); +} + +fn charLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const main_token = main_tokens[node]; + const slice = tree.tokenSlice(main_token); + + switch (std.zig.parseCharLiteral(slice)) { + .success => |codepoint| { + const result = try gz.addInt(codepoint); + return rvalue(gz, ri, result, node); + }, + .failure => |err| return astgen.failWithStrLitError(err, main_token, slice, 0), + } +} + +const Sign = enum { negative, positive }; + +fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: Ast.Node.Index, sign: Sign) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const num_token = main_tokens[node]; + const bytes = tree.tokenSlice(num_token); + + const result: Zir.Inst.Ref = switch (std.zig.parseNumberLiteral(bytes)) { + .int => |num| switch (num) { + 0 => if (sign == .positive) .zero else return astgen.failTokNotes( + num_token, + "integer literal '-0' is ambiguous", + .{}, + &.{ + try astgen.errNoteTok(num_token, "use '0' for an integer zero", .{}), + try astgen.errNoteTok(num_token, "use '-0.0' for a floating-point signed zero", .{}), + }, + ), + 1 => .one, + else => try gz.addInt(num), + }, + .big_int => |base| big: { + const gpa = astgen.gpa; + var big_int = try std.math.big.int.Managed.init(gpa); + defer big_int.deinit(); + const prefix_offset: usize = if (base == .decimal) 0 else 2; + big_int.setString(@intFromEnum(base), bytes[prefix_offset..]) catch |err| switch (err) { + error.InvalidCharacter => unreachable, // caught in `parseNumberLiteral` + error.InvalidBase => unreachable, // we only pass 16, 8, 2, see above + error.OutOfMemory => return error.OutOfMemory, + }; + + const limbs = big_int.limbs[0..big_int.len()]; + assert(big_int.isPositive()); + break :big try gz.addIntBig(limbs); + }, + .float => { + const unsigned_float_number = std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { + error.InvalidCharacter => unreachable, // validated by tokenizer + }; + const float_number = switch (sign) { + .negative => -unsigned_float_number, + .positive => unsigned_float_number, + }; + // If the value fits into a f64 without losing any precision, store it that way. + @setFloatMode(.Strict); + const smaller_float: f64 = @floatCast(float_number); + const bigger_again: f128 = smaller_float; + if (bigger_again == float_number) { + const result = try gz.addFloat(smaller_float); + return rvalue(gz, ri, result, source_node); + } + // We need to use 128 bits. Break the float into 4 u32 values so we can + // put it into the `extra` array. + const int_bits: u128 = @bitCast(float_number); + const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ + .piece0 = @truncate(int_bits), + .piece1 = @truncate(int_bits >> 32), + .piece2 = @truncate(int_bits >> 64), + .piece3 = @truncate(int_bits >> 96), + }); + return rvalue(gz, ri, result, source_node); + }, + .failure => |err| return astgen.failWithNumberError(err, num_token, bytes), + }; + + if (sign == .positive) { + return rvalue(gz, ri, result, source_node); + } else { + const negated = try gz.addUnNode(.negate, result, source_node); + return rvalue(gz, ri, negated, source_node); + } +} + +fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) InnerError { + const is_float = std.mem.indexOfScalar(u8, bytes, '.') != null; + switch (err) { + .leading_zero => if (is_float) { + return astgen.failTok(token, "number '{s}' has leading zero", .{bytes}); + } else { + return astgen.failTokNotes(token, "number '{s}' has leading zero", .{bytes}, &.{ + try astgen.errNoteTok(token, "use '0o' prefix for octal literals", .{}), + }); + }, + .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}), + .upper_case_base => |i| return astgen.failOff(token, @intCast(i), "base prefix must be lowercase", .{}), + .invalid_float_base => |i| return astgen.failOff(token, @intCast(i), "invalid base for float literal", .{}), + .repeated_underscore => |i| return astgen.failOff(token, @intCast(i), "repeated digit separator", .{}), + .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(i), "expected digit before digit separator", .{}), + .invalid_digit => |info| return astgen.failOff(token, @intCast(info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), + .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(i), "invalid digit '{c}' in exponent", .{bytes[i]}), + .duplicate_exponent => |i| return astgen.failOff(token, @intCast(i), "duplicate exponent", .{}), + .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(i), "expected digit before exponent", .{}), + .special_after_underscore => |i| return astgen.failOff(token, @intCast(i), "expected digit before '{c}'", .{bytes[i]}), + .trailing_special => |i| return astgen.failOff(token, @intCast(i), "expected digit after '{c}'", .{bytes[i - 1]}), + .trailing_underscore => |i| return astgen.failOff(token, @intCast(i), "trailing digit separator", .{}), + .duplicate_period => unreachable, // Validated by tokenizer + .invalid_character => unreachable, // Validated by tokenizer + .invalid_exponent_sign => |i| { + assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer + return astgen.failOff(token, @intCast(i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); + }, + } +} + +fn asmExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + full: Ast.full.Asm, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const token_tags = tree.tokens.items(.tag); + + const TagAndTmpl = struct { tag: Zir.Inst.Extended, tmpl: Zir.NullTerminatedString }; + const tag_and_tmpl: TagAndTmpl = switch (node_tags[full.ast.template]) { + .string_literal => .{ + .tag = .@"asm", + .tmpl = (try astgen.strLitAsString(main_tokens[full.ast.template])).index, + }, + .multiline_string_literal => .{ + .tag = .@"asm", + .tmpl = (try astgen.strLitNodeAsString(full.ast.template)).index, + }, + else => .{ + .tag = .asm_expr, + .tmpl = @enumFromInt(@intFromEnum(try comptimeExpr(gz, scope, .{ .rl = .none }, full.ast.template))), + }, + }; + + // See https://github.com/ziglang/zig/issues/215 and related issues discussing + // possible inline assembly improvements. Until then here is status quo AstGen + // for assembly syntax. It's used by std lib crypto aesni.zig. + const is_container_asm = astgen.fn_block == null; + if (is_container_asm) { + if (full.volatile_token) |t| + return astgen.failTok(t, "volatile is meaningless on global assembly", .{}); + if (full.outputs.len != 0 or full.inputs.len != 0 or full.first_clobber != null) + return astgen.failNode(node, "global assembly cannot have inputs, outputs, or clobbers", .{}); + } else { + if (full.outputs.len == 0 and full.volatile_token == null) { + return astgen.failNode(node, "assembly expression with no output must be marked volatile", .{}); + } + } + if (full.outputs.len > 32) { + return astgen.failNode(full.outputs[32], "too many asm outputs", .{}); + } + var outputs_buffer: [32]Zir.Inst.Asm.Output = undefined; + const outputs = outputs_buffer[0..full.outputs.len]; + + var output_type_bits: u32 = 0; + + for (full.outputs, 0..) |output_node, i| { + const symbolic_name = main_tokens[output_node]; + const name = try astgen.identAsString(symbolic_name); + const constraint_token = symbolic_name + 2; + const constraint = (try astgen.strLitAsString(constraint_token)).index; + const has_arrow = token_tags[symbolic_name + 4] == .arrow; + if (has_arrow) { + if (output_type_bits != 0) { + return astgen.failNode(output_node, "inline assembly allows up to one output value", .{}); + } + output_type_bits |= @as(u32, 1) << @intCast(i); + const out_type_node = node_datas[output_node].lhs; + const out_type_inst = try typeExpr(gz, scope, out_type_node); + outputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = out_type_inst, + }; + } else { + const ident_token = symbolic_name + 4; + // TODO have a look at #215 and related issues and decide how to + // handle outputs. Do we want this to be identifiers? + // Or maybe we want to force this to be expressions with a pointer type. + outputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = try localVarRef(gz, scope, .{ .rl = .ref }, node, ident_token), + }; + } + } + + if (full.inputs.len > 32) { + return astgen.failNode(full.inputs[32], "too many asm inputs", .{}); + } + var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined; + const inputs = inputs_buffer[0..full.inputs.len]; + + for (full.inputs, 0..) |input_node, i| { + const symbolic_name = main_tokens[input_node]; + const name = try astgen.identAsString(symbolic_name); + const constraint_token = symbolic_name + 2; + const constraint = (try astgen.strLitAsString(constraint_token)).index; + const operand = try expr(gz, scope, .{ .rl = .none }, node_datas[input_node].lhs); + inputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = operand, + }; + } + + var clobbers_buffer: [32]u32 = undefined; + var clobber_i: usize = 0; + if (full.first_clobber) |first_clobber| clobbers: { + // asm ("foo" ::: "a", "b") + // asm ("foo" ::: "a", "b",) + var tok_i = first_clobber; + while (true) : (tok_i += 1) { + if (clobber_i >= clobbers_buffer.len) { + return astgen.failTok(tok_i, "too many asm clobbers", .{}); + } + clobbers_buffer[clobber_i] = @intFromEnum((try astgen.strLitAsString(tok_i)).index); + clobber_i += 1; + tok_i += 1; + switch (token_tags[tok_i]) { + .r_paren => break :clobbers, + .comma => { + if (token_tags[tok_i + 1] == .r_paren) { + break :clobbers; + } else { + continue; + } + }, + else => unreachable, + } + } + } + + const result = try gz.addAsm(.{ + .tag = tag_and_tmpl.tag, + .node = node, + .asm_source = tag_and_tmpl.tmpl, + .is_volatile = full.volatile_token != null, + .output_type_bits = output_type_bits, + .outputs = outputs, + .inputs = inputs, + .clobbers = clobbers_buffer[0..clobber_i], + }); + return rvalue(gz, ri, result, node); +} + +fn as( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs: Ast.Node.Index, + rhs: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const dest_type = try typeExpr(gz, scope, lhs); + const result = try reachableExpr(gz, scope, .{ .rl = .{ .ty = dest_type } }, rhs, node); + return rvalue(gz, ri, result, node); +} + +fn unionInit( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const union_type = try typeExpr(gz, scope, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); + const field_type = try gz.addPlNode(.field_type_ref, node, Zir.Inst.FieldTypeRef{ + .container_type = union_type, + .field_name = field_name, + }); + const init = try reachableExpr(gz, scope, .{ .rl = .{ .ty = field_type } }, params[2], node); + const result = try gz.addPlNode(.union_init, node, Zir.Inst.UnionInit{ + .union_type = union_type, + .init = init, + .field_name = field_name, + }); + return rvalue(gz, ri, result, node); +} + +fn bitCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const dest_type = try ri.rl.resultTypeForCast(gz, node, "@bitCast"); + const operand = try reachableExpr(gz, scope, .{ .rl = .none }, operand_node, node); + const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ + .lhs = dest_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, node); +} + +/// Handle one or more nested pointer cast builtins: +/// * @ptrCast +/// * @alignCast +/// * @addrSpaceCast +/// * @constCast +/// * @volatileCast +/// Any sequence of such builtins is treated as a single operation. This allowed +/// for sequences like `@ptrCast(@alignCast(ptr))` to work correctly despite the +/// intermediate result type being unknown. +fn ptrCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + root_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + var flags: Zir.Inst.FullPtrCastFlags = .{}; + + // Note that all pointer cast builtins have one parameter, so we only need + // to handle `builtin_call_two`. + var node = root_node; + while (true) { + switch (node_tags[node]) { + .builtin_call_two, .builtin_call_two_comma => {}, + .grouped_expression => { + // Handle the chaining even with redundant parentheses + node = node_datas[node].lhs; + continue; + }, + else => break, + } + + if (node_datas[node].lhs == 0) break; // 0 args + if (node_datas[node].rhs != 0) break; // 2 args + + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + const info = BuiltinFn.list.get(builtin_name) orelse break; + if (info.param_count != 1) break; + + switch (info.tag) { + else => break, + inline .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => |tag| { + if (@field(flags, @tagName(tag))) { + return astgen.failNode(node, "redundant {s}", .{builtin_name}); + } + @field(flags, @tagName(tag)) = true; + }, + } + + node = node_datas[node].lhs; + } + + const flags_i: u5 = @bitCast(flags); + assert(flags_i != 0); + + const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; + if (flags_i == @as(u5, @bitCast(ptr_only))) { + // Special case: simpler representation + return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); + } + + const no_result_ty_flags: Zir.Inst.FullPtrCastFlags = .{ + .const_cast = true, + .volatile_cast = true, + }; + if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) { + // Result type not needed + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_i, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(root_node), + .operand = operand, + }); + return rvalue(gz, ri, result, root_node); + } + + // Full cast including result type + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); + const result_type = try ri.rl.resultTypeForCast(gz, root_node, flags.needResultTypeBuiltinName()); + const operand = try expr(gz, scope, .{ .rl = .none }, node); + try emitDbgStmt(gz, cursor); + const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_i, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(root_node), + .lhs = result_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, root_node); +} + +fn typeOf( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + args: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (args.len < 1) { + return astgen.failNode(node, "expected at least 1 argument, found 0", .{}); + } + const gpa = astgen.gpa; + if (args.len == 1) { + const typeof_inst = try gz.makeBlockInst(.typeof_builtin, node); + + var typeof_scope = gz.makeSubBlock(scope); + typeof_scope.is_comptime = false; + typeof_scope.is_typeof = true; + typeof_scope.c_import = false; + defer typeof_scope.unstack(); + + const ty_expr = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, args[0], node); + if (!gz.refIsNoReturn(ty_expr)) { + _ = try typeof_scope.addBreak(.break_inline, typeof_inst, ty_expr); + } + try typeof_scope.setBlockBody(typeof_inst); + + // typeof_scope unstacked now, can add new instructions to gz + try gz.instructions.append(gpa, typeof_inst); + return rvalue(gz, ri, typeof_inst.toRef(), node); + } + const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; + const payload_index = try reserveExtra(astgen, payload_size + args.len); + const args_index = payload_index + payload_size; + + const typeof_inst = try gz.addExtendedMultiOpPayloadIndex(.typeof_peer, payload_index, args.len); + + var typeof_scope = gz.makeSubBlock(scope); + typeof_scope.is_comptime = false; + + for (args, 0..) |arg, i| { + const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node); + astgen.extra.items[args_index + i] = @intFromEnum(param_ref); + } + _ = try typeof_scope.addBreak(.break_inline, typeof_inst.toIndex().?, .void_value); + + const body = typeof_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ + .body_len = @intCast(body_len), + .body_index = @intCast(astgen.extra.items.len), + .src_node = gz.nodeIndexToRelative(node), + }); + try astgen.extra.ensureUnusedCapacity(gpa, body_len); + astgen.appendBodyWithFixups(body); + typeof_scope.unstack(); + + return rvalue(gz, ri, typeof_inst, node); +} + +fn minMax( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + args: []const Ast.Node.Index, + comptime op: enum { min, max }, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (args.len < 2) { + return astgen.failNode(node, "expected at least 2 arguments, found 0", .{}); + } + if (args.len == 2) { + const tag: Zir.Inst.Tag = switch (op) { + .min => .min, + .max => .max, + }; + const a = try expr(gz, scope, .{ .rl = .none }, args[0]); + const b = try expr(gz, scope, .{ .rl = .none }, args[1]); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = a, + .rhs = b, + }); + return rvalue(gz, ri, result, node); + } + const payload_index = try addExtra(astgen, Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + var extra_index = try reserveExtra(gz.astgen, args.len); + for (args) |arg| { + const arg_ref = try expr(gz, scope, .{ .rl = .none }, arg); + astgen.extra.items[extra_index] = @intFromEnum(arg_ref); + extra_index += 1; + } + const tag: Zir.Inst.Extended = switch (op) { + .min => .min_multi, + .max => .max_multi, + }; + const result = try gz.addExtendedMultiOpPayloadIndex(tag, payload_index, args.len); + return rvalue(gz, ri, result, node); +} + +fn builtinCall( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + + // We handle the different builtins manually because they have different semantics depending + // on the function. For example, `@as` and others participate in result location semantics, + // and `@cImport` creates a special scope that collects a .c source code text buffer. + // Also, some builtins have a variable number of parameters. + + const info = BuiltinFn.list.get(builtin_name) orelse { + return astgen.failNode(node, "invalid builtin function: '{s}'", .{ + builtin_name, + }); + }; + if (info.param_count) |expected| { + if (expected != params.len) { + const s = if (expected == 1) "" else "s"; + return astgen.failNode(node, "expected {d} argument{s}, found {d}", .{ + expected, s, params.len, + }); + } + } + + // Check function scope-only builtins + + if (astgen.fn_block == null and info.illegal_outside_function) + return astgen.failNode(node, "'{s}' outside function scope", .{builtin_name}); + + switch (info.tag) { + .import => { + const node_tags = tree.nodes.items(.tag); + const operand_node = params[0]; + + if (node_tags[operand_node] != .string_literal) { + // Spec reference: https://github.com/ziglang/zig/issues/2206 + return astgen.failNode(operand_node, "@import operand must be a string literal", .{}); + } + const str_lit_token = main_tokens[operand_node]; + const str = try astgen.strLitAsString(str_lit_token); + const str_slice = astgen.string_bytes.items[@intFromEnum(str.index)..][0..str.len]; + if (mem.indexOfScalar(u8, str_slice, 0) != null) { + return astgen.failTok(str_lit_token, "import path cannot contain null bytes", .{}); + } else if (str.len == 0) { + return astgen.failTok(str_lit_token, "import path cannot be empty", .{}); + } + const result = try gz.addStrTok(.import, str.index, str_lit_token); + const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); + if (!gop.found_existing) { + gop.value_ptr.* = str_lit_token; + } + return rvalue(gz, ri, result, node); + }, + .compile_log => { + const payload_index = try addExtra(gz.astgen, Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + var extra_index = try reserveExtra(gz.astgen, params.len); + for (params) |param| { + const param_ref = try expr(gz, scope, .{ .rl = .none }, param); + astgen.extra.items[extra_index] = @intFromEnum(param_ref); + extra_index += 1; + } + const result = try gz.addExtendedMultiOpPayloadIndex(.compile_log, payload_index, params.len); + return rvalue(gz, ri, result, node); + }, + .field => { + if (ri.rl == .ref or ri.rl == .ref_coerced_ty) { + return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ + .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), + }); + } + const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + + // zig fmt: off + .as => return as( gz, scope, ri, node, params[0], params[1]), + .bit_cast => return bitCast( gz, scope, ri, node, params[0]), + .TypeOf => return typeOf( gz, scope, ri, node, params), + .union_init => return unionInit(gz, scope, ri, node, params), + .c_import => return cImport( gz, scope, node, params[0]), + .min => return minMax( gz, scope, ri, node, params, .min), + .max => return minMax( gz, scope, ri, node, params, .max), + // zig fmt: on + + .@"export" => { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + // This function causes a Decl to be exported. The first parameter is not an expression, + // but an identifier of the Decl to be exported. + var namespace: Zir.Inst.Ref = .none; + var decl_name: Zir.NullTerminatedString = .empty; + switch (node_tags[params[0]]) { + .identifier => { + const ident_token = main_tokens[params[0]]; + if (isPrimitive(tree.tokenSlice(ident_token))) { + return astgen.failTok(ident_token, "unable to export primitive value", .{}); + } + decl_name = try astgen.identAsString(ident_token); + + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == decl_name) { + local_val.used = ident_token; + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = local_val.inst, + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == decl_name) { + if (!local_ptr.maybe_comptime) + return astgen.failNode(params[0], "unable to export runtime-known value", .{}); + local_ptr.used = ident_token; + const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = loaded, + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(decl_name)) |i| { + if (found_already) |f| { + return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + s = ns.parent; + }, + .top => break, + }; + if (found_already == null) { + const ident_name = try astgen.identifierTokenString(ident_token); + return astgen.failNode(params[0], "use of undeclared identifier '{s}'", .{ident_name}); + } + }, + .field_access => { + const namespace_node = node_datas[params[0]].lhs; + namespace = try typeExpr(gz, scope, namespace_node); + const dot_token = main_tokens[params[0]]; + const field_ident = dot_token + 1; + decl_name = try astgen.identAsString(field_ident); + }, + else => return astgen.failNode(params[0], "symbol to export must identify a declaration", .{}), + } + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]); + _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ + .namespace = namespace, + .decl_name = decl_name, + .options = options, + }); + return rvalue(gz, ri, .void_value, node); + }, + .@"extern" => { + const type_inst = try typeExpr(gz, scope, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .extern_options_type } }, params[1]); + const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = type_inst, + .rhs = options, + }); + return rvalue(gz, ri, result, node); + }, + .fence => { + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]); + _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_float_mode => { + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]); + _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_align_stack => { + const order = try expr(gz, scope, coerced_align_ri, params[0]); + _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_cold => { + const order = try expr(gz, scope, ri, params[0]); + _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + + .src => { + const token_starts = tree.tokens.items(.start); + const node_start = token_starts[tree.firstToken(node)]; + astgen.advanceSourceCursor(node_start); + const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.Src{ + .node = gz.nodeIndexToRelative(node), + .line = astgen.source_line, + .column = astgen.source_column, + }); + return rvalue(gz, ri, result, node); + }, + + // zig fmt: off + .This => return rvalue(gz, ri, try gz.addNodeExtended(.this, node), node), + .return_address => return rvalue(gz, ri, try gz.addNodeExtended(.ret_addr, node), node), + .error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node), + .frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node), + .frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node), + .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node), + .in_comptime => return rvalue(gz, ri, try gz.addNodeExtended(.in_comptime, node), node), + + .type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info), + .size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of), + .bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of), + .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), + + .int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .compile_error), + .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), + .int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum), + .int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .embed_file), + .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .anyerror_type } }, params[0], .error_name), + .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, params[0], .set_runtime_safety), + .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), + .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), + .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), + .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), + .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), + .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), + .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), + .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), + .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), + .abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs), + .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), + .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), + .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), + .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), + .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), + .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), + .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), + .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), + + .int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name), + .float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name), + .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], .ptr_from_int, builtin_name), + .enum_from_int => return typeCast(gz, scope, ri, node, params[0], .enum_from_int, builtin_name), + .float_cast => return typeCast(gz, scope, ri, node, params[0], .float_cast, builtin_name), + .int_cast => return typeCast(gz, scope, ri, node, params[0], .int_cast, builtin_name), + .truncate => return typeCast(gz, scope, ri, node, params[0], .truncate, builtin_name), + // zig fmt: on + + .Type => { + const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .type_info_type } }, params[0]); + + const gpa = gz.astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .reify, + .small = @intFromEnum(gz.anon_name_strategy), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + const result = new_index.toRef(); + return rvalue(gz, ri, result, node); + }, + .panic => { + try emitDbgNode(gz, node); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .panic); + }, + .trap => { + try emitDbgNode(gz, node); + _ = try gz.addNode(.trap, node); + return rvalue(gz, ri, .unreachable_value, node); + }, + .int_from_error => { + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); + const result = try gz.addExtendedPayload(.int_from_error, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .error_from_int => { + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); + const result = try gz.addExtendedPayload(.error_from_int, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .error_cast => { + try emitDbgNode(gz, node); + + const result = try gz.addExtendedPayload(.error_cast, Zir.Inst.BinNode{ + .lhs = try ri.rl.resultTypeForCast(gz, node, "@errorCast"), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .node = gz.nodeIndexToRelative(node), + }); + return rvalue(gz, ri, result, node); + }, + .ptr_cast, + .align_cast, + .addrspace_cast, + .const_cast, + .volatile_cast, + => return ptrCast(gz, scope, ri, node), + + // zig fmt: off + .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), + .has_field => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_field), + + .clz => return bitBuiltin(gz, scope, ri, node, params[0], .clz), + .ctz => return bitBuiltin(gz, scope, ri, node, params[0], .ctz), + .pop_count => return bitBuiltin(gz, scope, ri, node, params[0], .pop_count), + .byte_swap => return bitBuiltin(gz, scope, ri, node, params[0], .byte_swap), + .bit_reverse => return bitBuiltin(gz, scope, ri, node, params[0], .bit_reverse), + + .div_exact => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_exact), + .div_floor => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_floor), + .div_trunc => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_trunc), + .mod => return divBuiltin(gz, scope, ri, node, params[0], params[1], .mod), + .rem => return divBuiltin(gz, scope, ri, node, params[0], params[1], .rem), + + .shl_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shl_exact), + .shr_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shr_exact), + + .bit_offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .bit_offset_of), + .offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .offset_of), + + .c_undef => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_undef), + .c_include => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_include), + + .cmpxchg_strong => return cmpxchg(gz, scope, ri, node, params, 1), + .cmpxchg_weak => return cmpxchg(gz, scope, ri, node, params, 0), + // zig fmt: on + + .wasm_memory_size => { + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.wasm_memory_size, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .wasm_memory_grow => { + const index_arg = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[1]); + const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = index_arg, + .rhs = delta_arg, + }); + return rvalue(gz, ri, result, node); + }, + .c_define => { + if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0]); + const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = name, + .rhs = value, + }); + return rvalue(gz, ri, result, node); + }, + + .splat => { + const result_type = try ri.rl.resultTypeForCast(gz, node, "@splat"); + const elem_type = try gz.addUnNode(.vector_elem_type, result_type, node); + const scalar = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, params[0]); + const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ + .lhs = result_type, + .rhs = scalar, + }); + return rvalue(gz, ri, result, node); + }, + .reduce => { + const op = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .reduce_op_type } }, params[0]); + const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ + .lhs = op, + .rhs = scalar, + }); + return rvalue(gz, ri, result, node); + }, + + .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), + .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), + .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), + .shl_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .shl_with_overflow), + + .atomic_load => { + const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{ + // zig fmt: off + .elem_type = try typeExpr(gz, scope, params[0]), + .ptr = try expr (gz, scope, .{ .rl = .none }, params[1]), + .ordering = try expr (gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[2]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); + }, + .atomic_rmw => { + const int_type = try typeExpr(gz, scope, params[0]); + const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ + // zig fmt: off + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operation = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_rmw_op_type } }, params[2]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[3]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); + }, + .atomic_store => { + const int_type = try typeExpr(gz, scope, params[0]); + _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ + // zig fmt: off + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]), + // zig fmt: on + }); + return rvalue(gz, ri, .void_value, node); + }, + .mul_add => { + const float_type = try typeExpr(gz, scope, params[0]); + const mulend1 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[1]); + const mulend2 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[2]); + const addend = try expr(gz, scope, .{ .rl = .{ .ty = float_type } }, params[3]); + const result = try gz.addPlNode(.mul_add, node, Zir.Inst.MulAdd{ + .mulend1 = mulend1, + .mulend2 = mulend2, + .addend = addend, + }); + return rvalue(gz, ri, result, node); + }, + .call => { + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); + const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); + const args = try expr(gz, scope, .{ .rl = .none }, params[2]); + const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ + .modifier = modifier, + .callee = callee, + .args = args, + .flags = .{ + .is_nosuspend = gz.nosuspend_node != 0, + .ensure_result_used = false, + }, + }); + return rvalue(gz, ri, result, node); + }, + .field_parent_ptr => { + const parent_type = try typeExpr(gz, scope, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); + const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ + .parent_type = parent_type, + .field_name = field_name, + .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), + }); + return rvalue(gz, ri, result, node); + }, + .memcpy => { + _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Bin{ + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + }, + .memset => { + const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]); + const lhs_ty = try gz.addUnNode(.typeof, lhs, params[0]); + const elem_ty = try gz.addUnNode(.indexable_ptr_elem_type, lhs_ty, params[0]); + _ = try gz.addPlNode(.memset, node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = elem_ty } }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + }, + .shuffle => { + const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{ + .elem_type = try typeExpr(gz, scope, params[0]), + .a = try expr(gz, scope, .{ .rl = .none }, params[1]), + .b = try expr(gz, scope, .{ .rl = .none }, params[2]), + .mask = try comptimeExpr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .select => { + const result = try gz.addExtendedPayload(.select, Zir.Inst.Select{ + .node = gz.nodeIndexToRelative(node), + .elem_type = try typeExpr(gz, scope, params[0]), + .pred = try expr(gz, scope, .{ .rl = .none }, params[1]), + .a = try expr(gz, scope, .{ .rl = .none }, params[2]), + .b = try expr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .async_call => { + const result = try gz.addExtendedPayload(.builtin_async_call, Zir.Inst.AsyncCall{ + .node = gz.nodeIndexToRelative(node), + .frame_buffer = try expr(gz, scope, .{ .rl = .none }, params[0]), + .result_ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .fn_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), + .args = try expr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .Vector => { + const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ + .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]), + .rhs = try typeExpr(gz, scope, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + .prefetch => { + const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .prefetch_options_type } }, params[1]); + _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = ptr, + .rhs = options, + }); + return rvalue(gz, ri, .void_value, node); + }, + .c_va_arg => { + const result = try gz.addExtendedPayload(.c_va_arg, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .rhs = try typeExpr(gz, scope, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_copy => { + const result = try gz.addExtendedPayload(.c_va_copy, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_end => { + const result = try gz.addExtendedPayload(.c_va_end, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_start => { + if (!astgen.fn_var_args) { + return astgen.failNode(node, "'@cVaStart' in a non-variadic function", .{}); + } + return rvalue(gz, ri, try gz.addNodeExtended(.c_va_start, node), node); + }, + + .work_item_id => { + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_item_id, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .work_group_size => { + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_group_size, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .work_group_id => { + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_group_id, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + } +} + +fn hasDeclOrField( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const container_type = try typeExpr(gz, scope, lhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = container_type, + .rhs = name, + }); + return rvalue(gz, ri, result, node); +} + +fn typeCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, + builtin_name: []const u8, +) InnerError!Zir.Inst.Ref { + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const result_type = try ri.rl.resultTypeForCast(gz, node, builtin_name); + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); + + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = result_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, node); +} + +fn simpleUnOpType( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const operand = try typeExpr(gz, scope, operand_node); + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn simpleUnOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_ri: ResultInfo, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const operand = if (tag == .compile_error) + try comptimeExpr(gz, scope, operand_ri, operand_node) + else + try expr(gz, scope, operand_ri, operand_node); + switch (tag) { + .tag_name, .error_name, .int_from_ptr => try emitDbgStmt(gz, cursor), + else => {}, + } + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn negation( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + // Check for float literal as the sub-expression because we want to preserve + // its negativity rather than having it go through comptime subtraction. + const operand_node = node_datas[node].lhs; + if (node_tags[operand_node] == .number_literal) { + return numberLiteral(gz, ri, operand_node, node, .negative); + } + + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); + const result = try gz.addUnNode(.negate, operand, node); + return rvalue(gz, ri, result, node); +} + +fn cmpxchg( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, + small: u16, +) InnerError!Zir.Inst.Ref { + const int_type = try typeExpr(gz, scope, params[0]); + const result = try gz.addExtendedPayloadSmall(.cmpxchg, small, Zir.Inst.Cmpxchg{ + // zig fmt: off + .node = gz.nodeIndexToRelative(node), + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .expected_value = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .new_value = try expr(gz, scope, .{ .rl = .{ .coerced_ty = int_type } }, params[3]), + .success_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), + .failure_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[5]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); +} + +fn bitBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn divBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); + const rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node); + + try emitDbgStmt(gz, cursor); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); + return rvalue(gz, ri, result, node); +} + +fn simpleCBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Extended, +) InnerError!Zir.Inst.Ref { + const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; + if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, operand_node); + _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, .void_value, node); +} + +fn offsetOf( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const type_inst = try typeExpr(gz, scope, lhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = type_inst, + .rhs = field_name, + }); + return rvalue(gz, ri, result, node); +} + +fn shiftOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); + + const cursor = switch (gz.astgen.tree.nodes.items(.tag)[node]) { + .shl, .shr => maybeAdvanceSourceCursorToMainToken(gz, node), + else => undefined, + }; + + const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type }, .ctx = .shift_op }, rhs_node); + + switch (gz.astgen.tree.nodes.items(.tag)[node]) { + .shl, .shr => try emitDbgStmt(gz, cursor), + else => undefined, + } + + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, ri, result, node); +} + +fn cImport( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + body_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + if (gz.c_import) return gz.astgen.failNode(node, "cannot nest @cImport", .{}); + + var block_scope = gz.makeSubBlock(scope); + block_scope.is_comptime = true; + block_scope.c_import = true; + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.c_import, node); + const block_result = try expr(&block_scope, &block_scope.base, .{ .rl = .none }, body_node); + _ = try gz.addUnNode(.ensure_result_used, block_result, node); + if (!gz.refIsNoReturn(block_result)) { + _ = try block_scope.addBreak(.break_inline, block_inst, .void_value); + } + try block_scope.setBlockBody(block_inst); + // block_scope unstacked now, can add new instructions to gz + try gz.instructions.append(gpa, block_inst); + + return block_inst.toRef(); +} + +fn overflowArithmetic( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, + tag: Zir.Inst.Extended, +) InnerError!Zir.Inst.Ref { + const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]); + const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addExtendedPayload(tag, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, ri, result, node); +} + +fn callExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + call: Ast.full.Call, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const callee = try calleeExpr(gz, scope, call.ast.fn_expr); + const modifier: std.builtin.CallModifier = blk: { + if (gz.is_comptime) { + break :blk .compile_time; + } + if (call.async_token != null) { + break :blk .async_kw; + } + if (gz.nosuspend_node != 0) { + break :blk .no_async; + } + break :blk .auto; + }; + + { + astgen.advanceSourceCursor(astgen.tree.tokens.items(.start)[call.ast.lparen]); + const line = astgen.source_line - gz.decl_line; + const column = astgen.source_column; + // Sema expects a dbg_stmt immediately before call, + try emitDbgStmtForceCurrentIndex(gz, .{ line, column }); + } + + switch (callee) { + .direct => |obj| assert(obj != .none), + .field => |field| assert(field.obj_ptr != .none), + } + assert(node != 0); + + const call_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + const call_inst = call_index.toRef(); + try gz.astgen.instructions.append(astgen.gpa, undefined); + try gz.instructions.append(astgen.gpa, call_index); + + const scratch_top = astgen.scratch.items.len; + defer astgen.scratch.items.len = scratch_top; + + var scratch_index = scratch_top; + try astgen.scratch.resize(astgen.gpa, scratch_top + call.ast.params.len); + + for (call.ast.params) |param_node| { + var arg_block = gz.makeSubBlock(scope); + defer arg_block.unstack(); + + // `call_inst` is reused to provide the param type. + const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); + _ = try arg_block.addBreakWithSrcNode(.break_inline, call_index, arg_ref, param_node); + + const body = arg_block.instructionsSlice(); + try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + + astgen.scratch.items[scratch_index] = @intCast(astgen.scratch.items.len - scratch_top); + scratch_index += 1; + } + + // If our result location is a try/catch/error-union-if/return, a function argument, + // or an initializer for a `const` variable, the error trace propagates. + // Otherwise, it should always be popped (handled in Sema). + const propagate_error_trace = switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => true, + else => false, + }; + + switch (callee) { + .direct => |callee_obj| { + const payload_index = try addExtra(astgen, Zir.Inst.Call{ + .callee = callee_obj, + .flags = .{ + .pop_error_return_trace = !propagate_error_trace, + .packed_modifier = @intCast(@intFromEnum(modifier)), + .args_len = @intCast(call.ast.params.len), + }, + }); + if (call.ast.params.len != 0) { + try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]); + } + gz.astgen.instructions.set(@intFromEnum(call_index), .{ + .tag = .call, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = payload_index, + } }, + }); + }, + .field => |callee_field| { + const payload_index = try addExtra(astgen, Zir.Inst.FieldCall{ + .obj_ptr = callee_field.obj_ptr, + .field_name_start = callee_field.field_name_start, + .flags = .{ + .pop_error_return_trace = !propagate_error_trace, + .packed_modifier = @intCast(@intFromEnum(modifier)), + .args_len = @intCast(call.ast.params.len), + }, + }); + if (call.ast.params.len != 0) { + try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]); + } + gz.astgen.instructions.set(@intFromEnum(call_index), .{ + .tag = .field_call, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = payload_index, + } }, + }); + }, + } + return rvalue(gz, ri, call_inst, node); // TODO function call with result location +} + +const Callee = union(enum) { + field: struct { + /// A *pointer* to the object the field is fetched on, so that we can + /// promote the lvalue to an address if the first parameter requires it. + obj_ptr: Zir.Inst.Ref, + /// Offset into `string_bytes`. + field_name_start: Zir.NullTerminatedString, + }, + direct: Zir.Inst.Ref, +}; + +/// calleeExpr generates the function part of a call expression (f in f(x)), but +/// *not* the callee argument to the @call() builtin. Its purpose is to +/// distinguish between standard calls and method call syntax `a.b()`. Thus, if +/// the lhs is a field access, we return using the `field` union field; +/// otherwise, we use the `direct` union field. +fn calleeExpr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, +) InnerError!Callee { + const astgen = gz.astgen; + const tree = astgen.tree; + + const tag = tree.nodes.items(.tag)[node]; + switch (tag) { + .field_access => { + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const object_node = node_datas[node].lhs; + const dot_token = main_tokens[node]; + const field_ident = dot_token + 1; + const str_index = try astgen.identAsString(field_ident); + // Capture the object by reference so we can promote it to an + // address in Sema if needed. + const lhs = try expr(gz, scope, .{ .rl = .ref }, object_node); + + const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); + try emitDbgStmt(gz, cursor); + + return .{ .field = .{ + .obj_ptr = lhs, + .field_name_start = str_index, + } }; + }, + else => return .{ .direct = try expr(gz, scope, .{ .rl = .none }, node) }, + } +} + +const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{ + .{ "anyerror", .anyerror_type }, + .{ "anyframe", .anyframe_type }, + .{ "anyopaque", .anyopaque_type }, + .{ "bool", .bool_type }, + .{ "c_int", .c_int_type }, + .{ "c_long", .c_long_type }, + .{ "c_longdouble", .c_longdouble_type }, + .{ "c_longlong", .c_longlong_type }, + .{ "c_char", .c_char_type }, + .{ "c_short", .c_short_type }, + .{ "c_uint", .c_uint_type }, + .{ "c_ulong", .c_ulong_type }, + .{ "c_ulonglong", .c_ulonglong_type }, + .{ "c_ushort", .c_ushort_type }, + .{ "comptime_float", .comptime_float_type }, + .{ "comptime_int", .comptime_int_type }, + .{ "f128", .f128_type }, + .{ "f16", .f16_type }, + .{ "f32", .f32_type }, + .{ "f64", .f64_type }, + .{ "f80", .f80_type }, + .{ "false", .bool_false }, + .{ "i16", .i16_type }, + .{ "i32", .i32_type }, + .{ "i64", .i64_type }, + .{ "i128", .i128_type }, + .{ "i8", .i8_type }, + .{ "isize", .isize_type }, + .{ "noreturn", .noreturn_type }, + .{ "null", .null_value }, + .{ "true", .bool_true }, + .{ "type", .type_type }, + .{ "u16", .u16_type }, + .{ "u29", .u29_type }, + .{ "u32", .u32_type }, + .{ "u64", .u64_type }, + .{ "u128", .u128_type }, + .{ "u1", .u1_type }, + .{ "u8", .u8_type }, + .{ "undefined", .undef }, + .{ "usize", .usize_type }, + .{ "void", .void_type }, +}); + +comptime { + // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map. + const primitives = std.zig.primitives; + for (primitive_instrs.kvs) |kv| { + if (!primitives.isPrimitive(kv.key)) { + @compileError("std.zig.isPrimitive() is not aware of Zir instr '" ++ @tagName(kv.value) ++ "'"); + } + } + for (primitives.names.kvs) |kv| { + if (primitive_instrs.get(kv.key) == null) { + @compileError("std.zig.primitives entry '" ++ kv.key ++ "' does not have a corresponding Zir instr"); + } + } +} + +fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + switch (node_tags[node]) { + .number_literal => { + const ident = main_tokens[node]; + return switch (std.zig.parseNumberLiteral(tree.tokenSlice(ident))) { + .int => |number| switch (number) { + 0 => true, + else => false, + }, + else => false, + }; + }, + else => return false, + } +} + +fn nodeMayAppendToErrorTrace(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + // These don't have the opportunity to call any runtime functions. + .error_value, + .identifier, + .@"comptime", + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // Anything that does not eval to an error is guaranteed to pop any + // additions to the error trace, so it effectively does not append. + else => return nodeMayEvalToError(tree, start_node) != .never, + } + } +} + +fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + => unreachable, + + .error_value => return .always, + + .@"asm", + .asm_simple, + .identifier, + .field_access, + .deref, + .array_access, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => return .maybe, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + .@"defer", + .@"errdefer", + .address_of, + .optional_type, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .array_type_sentinel, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .@"suspend", + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + .anyframe_type, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_destructure, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + => return .never, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // LHS sub-expression may still be an error under the outer optional or error union + .@"catch", + .@"orelse", + => return .maybe, + + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + => { + const lbrace = main_tokens[node]; + if (token_tags[lbrace - 1] == .colon) { + // Labeled blocks may need a memory location to forward + // to their break statements. + return .maybe; + } else { + return .never; + } + }, + + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe; + return builtin_info.eval_to_error; + }, + } + } +} + +/// Returns `true` if it is known the type expression has more than one possible value; +/// `false` otherwise. +fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .@"defer", + .@"errdefer", + .address_of, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .@"suspend", + .fn_decl, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_destructure, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"catch", + .@"orelse", + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .optional_type, + .anyframe_type, + .array_type_sentinel, + => return true, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_char_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .comptime_float_type, + .comptime_int_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .type_type, + .u16_type, + .u29_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + => return true, + + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, + } + } +} + +/// Returns `true` if it is known the expression is a type that cannot be used at runtime; +/// `false` otherwise. +fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .@"defer", + .@"errdefer", + .address_of, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .@"suspend", + .fn_decl, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_destructure, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"catch", + .@"orelse", + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .optional_type, + .anyframe_type, + .array_type_sentinel, + => return false, + + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => return true, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_char_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .u16_type, + .u29_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + .comptime_float_type, + .comptime_int_type, + .type_type, + => return true, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, + } + } +} + +/// Returns `true` if the node uses `gz.anon_name_strategy`. +fn nodeUsesAnonNameStrategy(tree: *const Ast, node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + => return true, + .builtin_call_two, .builtin_call_two_comma, .builtin_call, .builtin_call_comma => { + const builtin_token = tree.nodes.items(.main_token)[node]; + const builtin_name = tree.tokenSlice(builtin_token); + return std.mem.eql(u8, builtin_name, "@Type"); + }, + else => return false, + } +} + +/// Applies `rl` semantics to `result`. Expressions which do not do their own handling of +/// result locations must call this function on their result. +/// As an example, if `ri.rl` is `.ptr`, it will write the result to the pointer. +/// If `ri.rl` is `.ty`, it will coerce the result to the type. +/// Assumes nothing stacked on `gz`. +fn rvalue( + gz: *GenZir, + ri: ResultInfo, + raw_result: Zir.Inst.Ref, + src_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + return rvalueInner(gz, ri, raw_result, src_node, true); +} + +/// Like `rvalue`, but refuses to perform coercions before taking references for +/// the `ref_coerced_ty` result type. This is used for local variables which do +/// not have `alloc`s, because we want variables to have consistent addresses, +/// i.e. we want them to act like lvalues. +fn rvalueNoCoercePreRef( + gz: *GenZir, + ri: ResultInfo, + raw_result: Zir.Inst.Ref, + src_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + return rvalueInner(gz, ri, raw_result, src_node, false); +} + +fn rvalueInner( + gz: *GenZir, + ri: ResultInfo, + raw_result: Zir.Inst.Ref, + src_node: Ast.Node.Index, + allow_coerce_pre_ref: bool, +) InnerError!Zir.Inst.Ref { + const result = r: { + if (raw_result.toIndex()) |result_index| { + const zir_tags = gz.astgen.instructions.items(.tag); + const data = gz.astgen.instructions.items(.data)[@intFromEnum(result_index)]; + if (zir_tags[@intFromEnum(result_index)].isAlwaysVoid(data)) { + break :r Zir.Inst.Ref.void_value; + } + } + break :r raw_result; + }; + if (gz.endsWithNoReturn()) return result; + switch (ri.rl) { + .none, .coerced_ty => return result, + .discard => { + // Emit a compile error for discarding error values. + _ = try gz.addUnNode(.ensure_result_non_error, result, src_node); + return .void_value; + }, + .ref, .ref_coerced_ty => { + const coerced_result = if (allow_coerce_pre_ref and ri.rl == .ref_coerced_ty) res: { + const ptr_ty = ri.rl.ref_coerced_ty; + break :res try gz.addPlNode(.coerce_ptr_elem_ty, src_node, Zir.Inst.Bin{ + .lhs = ptr_ty, + .rhs = result, + }); + } else result; + // We need a pointer but we have a value. + // Unfortunately it's not quite as simple as directly emitting a ref + // instruction here because we need subsequent address-of operator on + // const locals to return the same address. + const astgen = gz.astgen; + const tree = astgen.tree; + const src_token = tree.firstToken(src_node); + const result_index = coerced_result.toIndex() orelse + return gz.addUnTok(.ref, coerced_result, src_token); + const zir_tags = gz.astgen.instructions.items(.tag); + if (zir_tags[@intFromEnum(result_index)].isParam() or astgen.isInferred(coerced_result)) + return gz.addUnTok(.ref, coerced_result, src_token); + const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index); + if (!gop.found_existing) { + gop.value_ptr.* = try gz.makeUnTok(.ref, coerced_result, src_token); + } + return gop.value_ptr.*.toRef(); + }, + .ty => |ty_inst| { + // Quickly eliminate some common, unnecessary type coercion. + const as_ty = @as(u64, @intFromEnum(Zir.Inst.Ref.type_type)) << 32; + const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32; + const as_bool = @as(u64, @intFromEnum(Zir.Inst.Ref.bool_type)) << 32; + const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32; + const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32; + switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) { + as_ty | @intFromEnum(Zir.Inst.Ref.u1_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u8_type), + as_ty | @intFromEnum(Zir.Inst.Ref.i8_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u16_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u29_type), + as_ty | @intFromEnum(Zir.Inst.Ref.i16_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u32_type), + as_ty | @intFromEnum(Zir.Inst.Ref.i32_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u64_type), + as_ty | @intFromEnum(Zir.Inst.Ref.i64_type), + as_ty | @intFromEnum(Zir.Inst.Ref.u128_type), + as_ty | @intFromEnum(Zir.Inst.Ref.i128_type), + as_ty | @intFromEnum(Zir.Inst.Ref.usize_type), + as_ty | @intFromEnum(Zir.Inst.Ref.isize_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_char_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_short_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_ushort_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_int_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_uint_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_long_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_ulong_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_longlong_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_ulonglong_type), + as_ty | @intFromEnum(Zir.Inst.Ref.c_longdouble_type), + as_ty | @intFromEnum(Zir.Inst.Ref.f16_type), + as_ty | @intFromEnum(Zir.Inst.Ref.f32_type), + as_ty | @intFromEnum(Zir.Inst.Ref.f64_type), + as_ty | @intFromEnum(Zir.Inst.Ref.f80_type), + as_ty | @intFromEnum(Zir.Inst.Ref.f128_type), + as_ty | @intFromEnum(Zir.Inst.Ref.anyopaque_type), + as_ty | @intFromEnum(Zir.Inst.Ref.bool_type), + as_ty | @intFromEnum(Zir.Inst.Ref.void_type), + as_ty | @intFromEnum(Zir.Inst.Ref.type_type), + as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_type), + as_ty | @intFromEnum(Zir.Inst.Ref.comptime_int_type), + as_ty | @intFromEnum(Zir.Inst.Ref.comptime_float_type), + as_ty | @intFromEnum(Zir.Inst.Ref.noreturn_type), + as_ty | @intFromEnum(Zir.Inst.Ref.anyframe_type), + as_ty | @intFromEnum(Zir.Inst.Ref.null_type), + as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type), + as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type), + as_ty | @intFromEnum(Zir.Inst.Ref.atomic_order_type), + as_ty | @intFromEnum(Zir.Inst.Ref.atomic_rmw_op_type), + as_ty | @intFromEnum(Zir.Inst.Ref.calling_convention_type), + as_ty | @intFromEnum(Zir.Inst.Ref.address_space_type), + as_ty | @intFromEnum(Zir.Inst.Ref.float_mode_type), + as_ty | @intFromEnum(Zir.Inst.Ref.reduce_op_type), + as_ty | @intFromEnum(Zir.Inst.Ref.call_modifier_type), + as_ty | @intFromEnum(Zir.Inst.Ref.prefetch_options_type), + as_ty | @intFromEnum(Zir.Inst.Ref.export_options_type), + as_ty | @intFromEnum(Zir.Inst.Ref.extern_options_type), + as_ty | @intFromEnum(Zir.Inst.Ref.type_info_type), + as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type), + as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type), + as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), + as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), + as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), + as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type), + as_ty | @intFromEnum(Zir.Inst.Ref.generic_poison_type), + as_ty | @intFromEnum(Zir.Inst.Ref.empty_struct_type), + as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero), + as_comptime_int | @intFromEnum(Zir.Inst.Ref.one), + as_bool | @intFromEnum(Zir.Inst.Ref.bool_true), + as_bool | @intFromEnum(Zir.Inst.Ref.bool_false), + as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize), + as_usize | @intFromEnum(Zir.Inst.Ref.one_usize), + as_void | @intFromEnum(Zir.Inst.Ref.void_value), + => return result, // type of result is already correct + + // Need an explicit type coercion instruction. + else => return gz.addPlNode(ri.zirTag(), src_node, Zir.Inst.As{ + .dest_type = ty_inst, + .operand = result, + }), + } + }, + .ptr => |ptr_res| { + _ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{ + .lhs = ptr_res.inst, + .rhs = result, + }); + return .void_value; + }, + .inferred_ptr => |alloc| { + _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ + .lhs = alloc, + .rhs = result, + }); + return .void_value; + }, + .destructure => |destructure| { + const components = destructure.components; + _ = try gz.addPlNode(.validate_destructure, src_node, Zir.Inst.ValidateDestructure{ + .operand = result, + .destructure_node = gz.nodeIndexToRelative(destructure.src_node), + .expect_len = @intCast(components.len), + }); + for (components, 0..) |component, i| { + if (component == .discard) continue; + const elem_val = try gz.add(.{ + .tag = .elem_val_imm, + .data = .{ .elem_val_imm = .{ + .operand = result, + .idx = @intCast(i), + } }, + }); + switch (component) { + .typed_ptr => |ptr_res| { + _ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{ + .lhs = ptr_res.inst, + .rhs = elem_val, + }); + }, + .inferred_ptr => |ptr_inst| { + _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ + .lhs = ptr_inst, + .rhs = elem_val, + }); + }, + .discard => unreachable, + } + } + return .void_value; + }, + } +} + +/// Given an identifier token, obtain the string for it. +/// If the token uses @"" syntax, parses as a string, reports errors if applicable, +/// and allocates the result within `astgen.arena`. +/// Otherwise, returns a reference to the source code bytes directly. +/// See also `appendIdentStr` and `parseStrLit`. +fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]const u8 { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + assert(token_tags[token] == .identifier); + const ident_name = tree.tokenSlice(token); + if (!mem.startsWith(u8, ident_name, "@")) { + return ident_name; + } + var buf: ArrayListUnmanaged(u8) = .{}; + defer buf.deinit(astgen.gpa); + try astgen.parseStrLit(token, &buf, ident_name, 1); + if (mem.indexOfScalar(u8, buf.items, 0) != null) { + return astgen.failTok(token, "identifier cannot contain null bytes", .{}); + } else if (buf.items.len == 0) { + return astgen.failTok(token, "identifier cannot be empty", .{}); + } + const duped = try astgen.arena.dupe(u8, buf.items); + return duped; +} + +/// Given an identifier token, obtain the string for it (possibly parsing as a string +/// literal if it is @"" syntax), and append the string to `buf`. +/// See also `identifierTokenString` and `parseStrLit`. +fn appendIdentStr( + astgen: *AstGen, + token: Ast.TokenIndex, + buf: *ArrayListUnmanaged(u8), +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + assert(token_tags[token] == .identifier); + const ident_name = tree.tokenSlice(token); + if (!mem.startsWith(u8, ident_name, "@")) { + return buf.appendSlice(astgen.gpa, ident_name); + } else { + const start = buf.items.len; + try astgen.parseStrLit(token, buf, ident_name, 1); + const slice = buf.items[start..]; + if (mem.indexOfScalar(u8, slice, 0) != null) { + return astgen.failTok(token, "identifier cannot contain null bytes", .{}); + } else if (slice.len == 0) { + return astgen.failTok(token, "identifier cannot be empty", .{}); + } + } +} + +/// Appends the result to `buf`. +fn parseStrLit( + astgen: *AstGen, + token: Ast.TokenIndex, + buf: *ArrayListUnmanaged(u8), + bytes: []const u8, + offset: u32, +) InnerError!void { + const raw_string = bytes[offset..]; + var buf_managed = buf.toManaged(astgen.gpa); + const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string); + buf.* = buf_managed.moveToUnmanaged(); + switch (try result) { + .success => return, + .failure => |err| return astgen.failWithStrLitError(err, token, bytes, offset), + } +} + +fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token: Ast.TokenIndex, bytes: []const u8, offset: u32) InnerError { + const raw_string = bytes[offset..]; + switch (err) { + .invalid_escape_character => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "invalid escape character: '{c}'", + .{raw_string[bad_index]}, + ); + }, + .expected_hex_digit => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "expected hex digit, found '{c}'", + .{raw_string[bad_index]}, + ); + }, + .empty_unicode_escape_sequence => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "empty unicode escape sequence", + .{}, + ); + }, + .expected_hex_digit_or_rbrace => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "expected hex digit or '}}', found '{c}'", + .{raw_string[bad_index]}, + ); + }, + .invalid_unicode_codepoint => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "unicode escape does not correspond to a valid codepoint", + .{}, + ); + }, + .expected_lbrace => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "expected '{{', found '{c}", + .{raw_string[bad_index]}, + ); + }, + .expected_rbrace => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "expected '}}', found '{c}", + .{raw_string[bad_index]}, + ); + }, + .expected_single_quote => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "expected single quote ('), found '{c}", + .{raw_string[bad_index]}, + ); + }, + .invalid_character => |bad_index| { + return astgen.failOff( + token, + offset + @as(u32, @intCast(bad_index)), + "invalid byte in string or character literal: '{c}'", + .{raw_string[bad_index]}, + ); + }, + } +} + +fn failNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) InnerError { + return astgen.failNodeNotes(node, format, args, &[0]u32{}); +} + +fn appendErrorNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) Allocator.Error!void { + try astgen.appendErrorNodeNotes(node, format, args, &[0]u32{}); +} + +fn appendErrorNodeNotes( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) Allocator.Error!void { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + const notes_index: u32 = if (notes.len != 0) blk: { + const notes_start = astgen.extra.items.len; + try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); + astgen.extra.appendAssumeCapacity(@intCast(notes.len)); + astgen.extra.appendSliceAssumeCapacity(notes); + break :blk @intCast(notes_start); + } else 0; + try astgen.compile_errors.append(astgen.gpa, .{ + .msg = msg, + .node = node, + .token = 0, + .byte_offset = 0, + .notes = notes_index, + }); +} + +fn failNodeNotes( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) InnerError { + try appendErrorNodeNotes(astgen, node, format, args, notes); + return error.AnalysisFail; +} + +fn failTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) InnerError { + return astgen.failTokNotes(token, format, args, &[0]u32{}); +} + +fn appendErrorTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) !void { + try astgen.appendErrorTokNotesOff(token, 0, format, args, &[0]u32{}); +} + +fn failTokNotes( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) InnerError { + try appendErrorTokNotesOff(astgen, token, 0, format, args, notes); + return error.AnalysisFail; +} + +fn appendErrorTokNotes( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) !void { + return appendErrorTokNotesOff(astgen, token, 0, format, args, notes); +} + +/// Same as `fail`, except given a token plus an offset from its starting byte +/// offset. +fn failOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, +) InnerError { + try appendErrorTokNotesOff(astgen, token, byte_offset, format, args, &.{}); + return error.AnalysisFail; +} + +fn appendErrorTokNotesOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) !void { + @setCold(true); + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); + try string_bytes.writer(gpa).print(format ++ "\x00", args); + const notes_index: u32 = if (notes.len != 0) blk: { + const notes_start = astgen.extra.items.len; + try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len); + astgen.extra.appendAssumeCapacity(@intCast(notes.len)); + astgen.extra.appendSliceAssumeCapacity(notes); + break :blk @intCast(notes_start); + } else 0; + try astgen.compile_errors.append(gpa, .{ + .msg = msg, + .node = 0, + .token = token, + .byte_offset = byte_offset, + .notes = notes_index, + }); +} + +fn errNoteTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + return errNoteTokOff(astgen, token, 0, format, args); +} + +fn errNoteTokOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + return astgen.addExtra(Zir.Inst.CompileErrors.Item{ + .msg = msg, + .node = 0, + .token = token, + .byte_offset = byte_offset, + .notes = 0, + }); +} + +fn errNoteNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + return astgen.addExtra(Zir.Inst.CompileErrors.Item{ + .msg = msg, + .node = node, + .token = 0, + .byte_offset = 0, + .notes = 0, + }); +} + +fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !Zir.NullTerminatedString { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index: u32 = @intCast(string_bytes.items.len); + try astgen.appendIdentStr(ident_token, string_bytes); + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @enumFromInt(gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + try string_bytes.append(gpa, 0); + return @enumFromInt(str_index); + } +} + +/// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`. +/// `end_token` must point at the first token after the last doc coment line. +/// Returns 0 if no doc comment is present. +fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString { + if (end_token == 0) return .empty; + + const token_tags = astgen.tree.tokens.items(.tag); + + var tok = end_token - 1; + while (token_tags[tok] == .doc_comment) { + if (tok == 0) break; + tok -= 1; + } else { + tok += 1; + } + + return docCommentAsStringFromFirst(astgen, end_token, tok); +} + +/// end_token must be > the index of the last doc comment. +fn docCommentAsStringFromFirst( + astgen: *AstGen, + end_token: Ast.TokenIndex, + start_token: Ast.TokenIndex, +) !Zir.NullTerminatedString { + if (start_token == end_token) return .empty; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index: u32 = @intCast(string_bytes.items.len); + const token_starts = astgen.tree.tokens.items(.start); + const token_tags = astgen.tree.tokens.items(.tag); + + const total_bytes = token_starts[end_token] - token_starts[start_token]; + try string_bytes.ensureUnusedCapacity(gpa, total_bytes); + + var current_token = start_token; + while (current_token < end_token) : (current_token += 1) { + switch (token_tags[current_token]) { + .doc_comment => { + const tok_bytes = astgen.tree.tokenSlice(current_token)[3..]; + string_bytes.appendSliceAssumeCapacity(tok_bytes); + if (current_token != end_token - 1) { + string_bytes.appendAssumeCapacity('\n'); + } + }, + else => break, + } + } + + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @enumFromInt(gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + try string_bytes.append(gpa, 0); + return @enumFromInt(str_index); + } +} + +const IndexSlice = struct { index: Zir.NullTerminatedString, len: u32 }; + +fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index: u32 = @intCast(string_bytes.items.len); + const token_bytes = astgen.tree.tokenSlice(str_lit_token); + try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); + const key: []const u8 = string_bytes.items[str_index..]; + if (std.mem.indexOfScalar(u8, key, 0)) |_| return .{ + .index = @enumFromInt(str_index), + .len = @intCast(key.len), + }; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return .{ + .index = @enumFromInt(gop.key_ptr.*), + .len = @intCast(key.len), + }; + } else { + gop.key_ptr.* = str_index; + // Still need a null byte because we are using the same table + // to lookup null terminated strings, so if we get a match, it has to + // be null terminated for that to work. + try string_bytes.append(gpa, 0); + return .{ + .index = @enumFromInt(str_index), + .len = @intCast(key.len), + }; + } +} + +fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const start = node_datas[node].lhs; + const end = node_datas[node].rhs; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = string_bytes.items.len; + + // First line: do not append a newline. + var tok_i = start; + { + const slice = tree.tokenSlice(tok_i); + const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; + const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + try string_bytes.appendSlice(gpa, line_bytes); + tok_i += 1; + } + // Following lines: each line prepends a newline. + while (tok_i <= end) : (tok_i += 1) { + const slice = tree.tokenSlice(tok_i); + const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; + const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1); + string_bytes.appendAssumeCapacity('\n'); + string_bytes.appendSliceAssumeCapacity(line_bytes); + } + const len = string_bytes.items.len - str_index; + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = @enumFromInt(str_index), + .len = @intCast(len), + }; +} + +fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !Zir.NullTerminatedString { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index: u32 = @intCast(string_bytes.items.len); + const token_bytes = astgen.tree.tokenSlice(str_lit_token); + try string_bytes.append(gpa, 0); // Indicates this is a test. + try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); + const slice = string_bytes.items[str_index + 1 ..]; + if (mem.indexOfScalar(u8, slice, 0) != null) { + return astgen.failTok(str_lit_token, "test name cannot contain null bytes", .{}); + } else if (slice.len == 0) { + return astgen.failTok(str_lit_token, "empty test name must be omitted", .{}); + } + try string_bytes.append(gpa, 0); + return @enumFromInt(str_index); +} + +const Scope = struct { + tag: Tag, + + fn cast(base: *Scope, comptime T: type) ?*T { + if (T == Defer) { + switch (base.tag) { + .defer_normal, .defer_error => return @fieldParentPtr(T, "base", base), + else => return null, + } + } + if (T == Namespace) { + switch (base.tag) { + .namespace, .enum_namespace => return @fieldParentPtr(T, "base", base), + else => return null, + } + } + if (base.tag != T.base_tag) + return null; + + return @fieldParentPtr(T, "base", base); + } + + fn parent(base: *Scope) ?*Scope { + return switch (base.tag) { + .gen_zir => base.cast(GenZir).?.parent, + .local_val => base.cast(LocalVal).?.parent, + .local_ptr => base.cast(LocalPtr).?.parent, + .defer_normal, .defer_error => base.cast(Defer).?.parent, + .namespace, .enum_namespace => base.cast(Namespace).?.parent, + .top => null, + }; + } + + const Tag = enum { + gen_zir, + local_val, + local_ptr, + defer_normal, + defer_error, + namespace, + enum_namespace, + top, + }; + + /// The category of identifier. These tag names are user-visible in compile errors. + const IdCat = enum { + @"function parameter", + @"local constant", + @"local variable", + @"switch tag capture", + capture, + }; + + /// This is always a `const` local and importantly the `inst` is a value type, not a pointer. + /// This structure lives as long as the AST generation of the Block + /// node that contains the variable. + const LocalVal = struct { + const base_tag: Tag = .local_val; + base: Scope = Scope{ .tag = base_tag }, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + gen_zir: *GenZir, + inst: Zir.Inst.Ref, + /// Source location of the corresponding variable declaration. + token_src: Ast.TokenIndex, + /// Track the first identifer where it is referenced. + /// 0 means never referenced. + used: Ast.TokenIndex = 0, + /// Track the identifier where it is discarded, like this `_ = foo;`. + /// 0 means never discarded. + discarded: Ast.TokenIndex = 0, + /// String table index. + name: Zir.NullTerminatedString, + id_cat: IdCat, + }; + + /// This could be a `const` or `var` local. It has a pointer instead of a value. + /// This structure lives as long as the AST generation of the Block + /// node that contains the variable. + const LocalPtr = struct { + const base_tag: Tag = .local_ptr; + base: Scope = Scope{ .tag = base_tag }, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + gen_zir: *GenZir, + ptr: Zir.Inst.Ref, + /// Source location of the corresponding variable declaration. + token_src: Ast.TokenIndex, + /// Track the first identifer where it is referenced. + /// 0 means never referenced. + used: Ast.TokenIndex = 0, + /// Track the identifier where it is discarded, like this `_ = foo;`. + /// 0 means never discarded. + discarded: Ast.TokenIndex = 0, + /// Whether this value is used as an lvalue after inititialization. + /// If not, we know it can be `const`, so will emit a compile error if it is `var`. + used_as_lvalue: bool = false, + /// String table index. + name: Zir.NullTerminatedString, + id_cat: IdCat, + /// true means we find out during Sema whether the value is comptime. + /// false means it is already known at AstGen the value is runtime-known. + maybe_comptime: bool, + }; + + const Defer = struct { + base: Scope, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + index: u32, + len: u32, + remapped_err_code: Zir.Inst.OptionalIndex = .none, + }; + + /// Represents a global scope that has any number of declarations in it. + /// Each declaration has this as the parent scope. + const Namespace = struct { + const base_tag: Tag = .namespace; + base: Scope = Scope{ .tag = base_tag }, + + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + /// Maps string table index to the source location of declaration, + /// for the purposes of reporting name shadowing compile errors. + decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{}, + node: Ast.Node.Index, + inst: Zir.Inst.Index, + + /// The astgen scope containing this namespace. + /// Only valid during astgen. + declaring_gz: ?*GenZir, + + /// Map from the raw captured value to the instruction + /// ref of the capture for decls in this namespace + captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + + fn deinit(self: *Namespace, gpa: Allocator) void { + self.decls.deinit(gpa); + self.captures.deinit(gpa); + self.* = undefined; + } + }; + + const Top = struct { + const base_tag: Scope.Tag = .top; + base: Scope = Scope{ .tag = base_tag }, + }; +}; + +/// This is a temporary structure; references to it are valid only +/// while constructing a `Zir`. +const GenZir = struct { + const base_tag: Scope.Tag = .gen_zir; + base: Scope = Scope{ .tag = base_tag }, + /// Whether we're already in a scope known to be comptime. This is set + /// whenever we know Sema will analyze the current block with `is_comptime`, + /// for instance when we're within a `struct_decl` or a `block_comptime`. + is_comptime: bool, + /// Whether we're in an expression within a `@TypeOf` operand. In this case, closure of runtime + /// variables is permitted where it is usually not. + is_typeof: bool = false, + /// This is set to true for inline loops; false otherwise. + is_inline: bool = false, + c_import: bool = false, + /// How decls created in this scope should be named. + anon_name_strategy: Zir.Inst.NameStrategy = .anon, + /// The containing decl AST node. + decl_node_index: Ast.Node.Index, + /// The containing decl line index, absolute. + decl_line: u32, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + /// All `GenZir` scopes for the same ZIR share this. + astgen: *AstGen, + /// Keeps track of the list of instructions in this scope. Possibly shared. + /// Indexes to instructions in `astgen`. + instructions: *ArrayListUnmanaged(Zir.Inst.Index), + /// A sub-block may share its instructions ArrayList with containing GenZir, + /// if use is strictly nested. This saves prior size of list for unstacking. + instructions_top: usize, + label: ?Label = null, + break_block: Zir.Inst.OptionalIndex = .none, + continue_block: Zir.Inst.OptionalIndex = .none, + /// Only valid when setBreakResultInfo is called. + break_result_info: AstGen.ResultInfo = undefined, + + suspend_node: Ast.Node.Index = 0, + nosuspend_node: Ast.Node.Index = 0, + /// Set if this GenZir is a defer. + cur_defer_node: Ast.Node.Index = 0, + // Set if this GenZir is a defer or it is inside a defer. + any_defer_node: Ast.Node.Index = 0, + + /// Namespace members are lazy. When executing a decl within a namespace, + /// any references to external instructions need to be treated specially. + /// This list tracks those references. See also .closure_capture and .closure_get. + /// Keys are the raw instruction index, values are the closure_capture instruction. + captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + + const unstacked_top = std.math.maxInt(usize); + /// Call unstack before adding any new instructions to containing GenZir. + fn unstack(self: *GenZir) void { + if (self.instructions_top != unstacked_top) { + self.instructions.items.len = self.instructions_top; + self.instructions_top = unstacked_top; + } + } + + fn isEmpty(self: *const GenZir) bool { + return (self.instructions_top == unstacked_top) or + (self.instructions.items.len == self.instructions_top); + } + + fn instructionsSlice(self: *const GenZir) []Zir.Inst.Index { + return if (self.instructions_top == unstacked_top) + &[0]Zir.Inst.Index{} + else + self.instructions.items[self.instructions_top..]; + } + + fn instructionsSliceUpto(self: *const GenZir, stacked_gz: *GenZir) []Zir.Inst.Index { + return if (self.instructions_top == unstacked_top) + &[0]Zir.Inst.Index{} + else if (self.instructions == stacked_gz.instructions and stacked_gz.instructions_top != unstacked_top) + self.instructions.items[self.instructions_top..stacked_gz.instructions_top] + else + self.instructions.items[self.instructions_top..]; + } + + fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir { + return .{ + .is_comptime = gz.is_comptime, + .is_typeof = gz.is_typeof, + .c_import = gz.c_import, + .decl_node_index = gz.decl_node_index, + .decl_line = gz.decl_line, + .parent = scope, + .astgen = gz.astgen, + .suspend_node = gz.suspend_node, + .nosuspend_node = gz.nosuspend_node, + .any_defer_node = gz.any_defer_node, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + } + + const Label = struct { + token: Ast.TokenIndex, + block_inst: Zir.Inst.Index, + used: bool = false, + }; + + /// Assumes nothing stacked on `gz`. + fn endsWithNoReturn(gz: GenZir) bool { + if (gz.isEmpty()) return false; + const tags = gz.astgen.instructions.items(.tag); + const last_inst = gz.instructions.items[gz.instructions.items.len - 1]; + return tags[@intFromEnum(last_inst)].isNoReturn(); + } + + /// TODO all uses of this should be replaced with uses of `endsWithNoReturn`. + fn refIsNoReturn(gz: GenZir, inst_ref: Zir.Inst.Ref) bool { + if (inst_ref == .unreachable_value) return true; + if (inst_ref.toIndex()) |inst_index| { + return gz.astgen.instructions.items(.tag)[@intFromEnum(inst_index)].isNoReturn(); + } + return false; + } + + fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { + return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index)); + } + + fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { + return token - gz.srcToken(); + } + + fn srcToken(gz: GenZir) Ast.TokenIndex { + return gz.astgen.tree.firstToken(gz.decl_node_index); + } + + fn setBreakResultInfo(gz: *GenZir, parent_ri: AstGen.ResultInfo) void { + // Depending on whether the result location is a pointer or value, different + // ZIR needs to be generated. In the former case we rely on storing to the + // pointer to communicate the result, and use breakvoid; in the latter case + // the block break instructions will have the result values. + switch (parent_ri.rl) { + .coerced_ty => |ty_inst| { + // Type coercion needs to happen before breaks. + gz.break_result_info = .{ .rl = .{ .ty = ty_inst }, .ctx = parent_ri.ctx }; + }, + .discard => { + // We don't forward the result context here. This prevents + // "unnecessary discard" errors from being caused by expressions + // far from the actual discard, such as a `break` from a + // discarded block. + gz.break_result_info = .{ .rl = .discard }; + }, + else => { + gz.break_result_info = parent_ri; + }, + } + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setBoolBrBody(gz: *GenZir, bool_br: Zir.Inst.Index, bool_br_lhs: Zir.Inst.Ref) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.BoolBr).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(bool_br)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.BoolBr{ + .lhs = bool_br_lhs, + .body_len = body_len, + }); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setBlockBody(gz: *GenZir, inst: Zir.Inst.Index) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(inst)].pl_node.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Block{ .body_len = body_len }, + ); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setTryBody(gz: *GenZir, inst: Zir.Inst.Index, operand: Zir.Inst.Ref) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Try).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[@intFromEnum(inst)].pl_node.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Try{ + .operand = operand, + .body_len = body_len, + }, + ); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Must be called with the following stack set up: + /// * gz (bottom) + /// * align_gz + /// * addrspace_gz + /// * section_gz + /// * cc_gz + /// * ret_gz + /// * body_gz (top) + /// Unstacks all of those except for `gz`. + fn addFunc(gz: *GenZir, args: struct { + src_node: Ast.Node.Index, + lbrace_line: u32 = 0, + lbrace_column: u32 = 0, + param_block: Zir.Inst.Index, + + align_gz: ?*GenZir, + addrspace_gz: ?*GenZir, + section_gz: ?*GenZir, + cc_gz: ?*GenZir, + ret_gz: ?*GenZir, + body_gz: ?*GenZir, + + align_ref: Zir.Inst.Ref, + addrspace_ref: Zir.Inst.Ref, + section_ref: Zir.Inst.Ref, + cc_ref: Zir.Inst.Ref, + ret_ref: Zir.Inst.Ref, + + lib_name: Zir.NullTerminatedString, + noalias_bits: u32, + is_var_args: bool, + is_inferred_error: bool, + is_test: bool, + is_extern: bool, + is_noinline: bool, + }) !Zir.Inst.Ref { + assert(args.src_node != 0); + const astgen = gz.astgen; + const gpa = astgen.gpa; + const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref; + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + + var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; + var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; + var src_locs_and_hash_buffer: [7]u32 = undefined; + var src_locs_and_hash: []u32 = src_locs_and_hash_buffer[0..0]; + if (args.body_gz) |body_gz| { + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + const fn_decl = args.src_node; + assert(node_tags[fn_decl] == .fn_decl or node_tags[fn_decl] == .test_decl); + const block = node_datas[fn_decl].rhs; + const rbrace_start = token_starts[tree.lastToken(block)]; + astgen.advanceSourceCursor(rbrace_start); + const rbrace_line: u32 = @intCast(astgen.source_line - gz.decl_line); + const rbrace_column: u32 = @intCast(astgen.source_column); + + const columns = args.lbrace_column | (rbrace_column << 16); + + const proto_hash: std.zig.SrcHash = switch (node_tags[fn_decl]) { + .fn_decl => sig_hash: { + const proto_node = node_datas[fn_decl].lhs; + break :sig_hash std.zig.hashSrc(tree.getNodeSource(proto_node)); + }, + .test_decl => std.zig.hashSrc(""), // tests don't have a prototype + else => unreachable, + }; + const proto_hash_arr: [4]u32 = @bitCast(proto_hash); + + src_locs_and_hash_buffer = .{ + args.lbrace_line, + rbrace_line, + columns, + proto_hash_arr[0], + proto_hash_arr[1], + proto_hash_arr[2], + proto_hash_arr[3], + }; + src_locs_and_hash = &src_locs_and_hash_buffer; + + body = body_gz.instructionsSlice(); + if (args.ret_gz) |ret_gz| + ret_body = ret_gz.instructionsSliceUpto(body_gz); + } else { + if (args.ret_gz) |ret_gz| + ret_body = ret_gz.instructionsSlice(); + } + const body_len = astgen.countBodyLenAfterFixups(body); + + if (args.cc_ref != .none or args.lib_name != .empty or args.is_var_args or args.is_test or + args.is_extern or args.align_ref != .none or args.section_ref != .none or + args.addrspace_ref != .none or args.noalias_bits != 0 or args.is_noinline) + { + var align_body: []Zir.Inst.Index = &.{}; + var addrspace_body: []Zir.Inst.Index = &.{}; + var section_body: []Zir.Inst.Index = &.{}; + var cc_body: []Zir.Inst.Index = &.{}; + if (args.ret_gz != null) { + align_body = args.align_gz.?.instructionsSliceUpto(args.addrspace_gz.?); + addrspace_body = args.addrspace_gz.?.instructionsSliceUpto(args.section_gz.?); + section_body = args.section_gz.?.instructionsSliceUpto(args.cc_gz.?); + cc_body = args.cc_gz.?.instructionsSliceUpto(args.ret_gz.?); + } + + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.FuncFancy).Struct.fields.len + + fancyFnExprExtraLen(astgen, align_body, args.align_ref) + + fancyFnExprExtraLen(astgen, addrspace_body, args.addrspace_ref) + + fancyFnExprExtraLen(astgen, section_body, args.section_ref) + + fancyFnExprExtraLen(astgen, cc_body, args.cc_ref) + + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + + body_len + src_locs_and_hash.len + + @intFromBool(args.lib_name != .empty) + + @intFromBool(args.noalias_bits != 0), + ); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.FuncFancy{ + .param_block = args.param_block, + .body_len = body_len, + .bits = .{ + .is_var_args = args.is_var_args, + .is_inferred_error = args.is_inferred_error, + .is_test = args.is_test, + .is_extern = args.is_extern, + .is_noinline = args.is_noinline, + .has_lib_name = args.lib_name != .empty, + .has_any_noalias = args.noalias_bits != 0, + + .has_align_ref = args.align_ref != .none, + .has_addrspace_ref = args.addrspace_ref != .none, + .has_section_ref = args.section_ref != .none, + .has_cc_ref = args.cc_ref != .none, + .has_ret_ty_ref = ret_ref != .none, + + .has_align_body = align_body.len != 0, + .has_addrspace_body = addrspace_body.len != 0, + .has_section_body = section_body.len != 0, + .has_cc_body = cc_body.len != 0, + .has_ret_ty_body = ret_body.len != 0, + }, + }); + if (args.lib_name != .empty) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.lib_name)); + } + + const zir_datas = astgen.instructions.items(.data); + if (align_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, align_body)); + astgen.appendBodyWithFixups(align_body); + const break_extra = zir_datas[@intFromEnum(align_body[align_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (args.align_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_ref)); + } + if (addrspace_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, addrspace_body)); + astgen.appendBodyWithFixups(addrspace_body); + const break_extra = + zir_datas[@intFromEnum(addrspace_body[addrspace_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (args.addrspace_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.addrspace_ref)); + } + if (section_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, section_body)); + astgen.appendBodyWithFixups(section_body); + const break_extra = + zir_datas[@intFromEnum(section_body[section_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (args.section_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.section_ref)); + } + if (cc_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, cc_body)); + astgen.appendBodyWithFixups(cc_body); + const break_extra = zir_datas[@intFromEnum(cc_body[cc_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (args.cc_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.cc_ref)); + } + if (ret_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, ret_body)); + astgen.appendBodyWithFixups(ret_body); + const break_extra = zir_datas[@intFromEnum(ret_body[ret_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (ret_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref)); + } + + if (args.noalias_bits != 0) { + astgen.extra.appendAssumeCapacity(args.noalias_bits); + } + + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); + + // Order is important when unstacking. + if (args.body_gz) |body_gz| body_gz.unstack(); + if (args.ret_gz != null) { + args.ret_gz.?.unstack(); + args.cc_gz.?.unstack(); + args.section_gz.?.unstack(); + args.addrspace_gz.?.unstack(); + args.align_gz.?.unstack(); + } + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + astgen.instructions.appendAssumeCapacity(.{ + .tag = .func_fancy, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(args.src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } else { + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Func).Struct.fields.len + 1 + + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + + body_len + src_locs_and_hash.len, + ); + + const ret_body_len = if (ret_body.len != 0) + countBodyLenAfterFixups(astgen, ret_body) + else + @intFromBool(ret_ref != .none); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{ + .param_block = args.param_block, + .ret_body_len = ret_body_len, + .body_len = body_len, + }); + const zir_datas = astgen.instructions.items(.data); + if (ret_body.len != 0) { + astgen.appendBodyWithFixups(ret_body); + + const break_extra = zir_datas[@intFromEnum(ret_body[ret_body.len - 1])].@"break".payload_index; + astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = + @intFromEnum(new_index); + } else if (ret_ref != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref)); + } + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); + + // Order is important when unstacking. + if (args.body_gz) |body_gz| body_gz.unstack(); + if (args.ret_gz) |ret_gz| ret_gz.unstack(); + if (args.cc_gz) |cc_gz| cc_gz.unstack(); + if (args.section_gz) |section_gz| section_gz.unstack(); + if (args.addrspace_gz) |addrspace_gz| addrspace_gz.unstack(); + if (args.align_gz) |align_gz| align_gz.unstack(); + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func; + astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(args.src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + } + + fn fancyFnExprExtraLen(astgen: *AstGen, body: []Zir.Inst.Index, ref: Zir.Inst.Ref) u32 { + // In the case of non-empty body, there is one for the body length, + // and then one for each instruction. + return countBodyLenAfterFixups(astgen, body) + @intFromBool(ref != .none); + } + + fn addVar(gz: *GenZir, args: struct { + align_inst: Zir.Inst.Ref, + lib_name: Zir.NullTerminatedString, + var_type: Zir.Inst.Ref, + init: Zir.Inst.Ref, + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + }) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.ExtendedVar).Struct.fields.len + + @intFromBool(args.lib_name != .empty) + + @intFromBool(args.align_inst != .none) + + @intFromBool(args.init != .none), + ); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedVar{ + .var_type = args.var_type, + }); + if (args.lib_name != .empty) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.lib_name)); + } + if (args.align_inst != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); + } + if (args.init != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.init)); + } + + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .variable, + .small = @bitCast(Zir.Inst.ExtendedVar.Small{ + .has_lib_name = args.lib_name != .empty, + .has_align = args.align_inst != .none, + .has_init = args.init != .none, + .is_extern = args.is_extern, + .is_const = args.is_const, + .is_threadlocal = args.is_threadlocal, + }), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .int, + .data = .{ .int = integer }, + }); + } + + fn addIntBig(gz: *GenZir, limbs: []const std.math.big.Limb) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); + + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .int_big, + .data = .{ .str = .{ + .start = @enumFromInt(astgen.string_bytes.items.len), + .len = @intCast(limbs.len), + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + astgen.string_bytes.appendSliceAssumeCapacity(mem.sliceAsBytes(limbs)); + return new_index.toRef(); + } + + fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .float, + .data = .{ .float = number }, + }); + } + + fn addUnNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + assert(operand != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + } + + fn makeUnNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + assert(operand != .none); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + try gz.astgen.instructions.append(gz.astgen.gpa, .{ + .tag = tag, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + return new_index; + } + + fn addPlNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + extra: anytype, + ) !Zir.Inst.Ref { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(extra); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + fn addPlNodePayloadIndex( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + payload_index: u32, + ) !Zir.Inst.Ref { + return try gz.add(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = payload_index, + } }, + }); + } + + /// Supports `param_gz` stacked on `gz`. Assumes nothing stacked on `param_gz`. Unstacks `param_gz`. + fn addParam( + gz: *GenZir, + param_gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + name: Zir.NullTerminatedString, + first_doc_comment: ?Ast.TokenIndex, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + const param_body = param_gz.instructionsSlice(); + const body_len = gz.astgen.countBodyLenAfterFixups(param_body); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Param).Struct.fields.len + body_len); + + const doc_comment_index = if (first_doc_comment) |first| + try gz.astgen.docCommentAsStringFromFirst(abs_tok_index, first) + else + .empty; + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ + .name = name, + .doc_comment = doc_comment_index, + .body_len = @intCast(body_len), + }); + gz.astgen.appendBodyWithFixups(param_body); + param_gz.unstack(); + + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_tok = .{ + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn addExtendedPayload(gz: *GenZir, opcode: Zir.Inst.Extended, extra: anytype) !Zir.Inst.Ref { + return addExtendedPayloadSmall(gz, opcode, undefined, extra); + } + + fn addExtendedPayloadSmall( + gz: *GenZir, + opcode: Zir.Inst.Extended, + small: u16, + extra: anytype, + ) !Zir.Inst.Ref { + const gpa = gz.astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(extra); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + fn addExtendedMultiOp( + gz: *GenZir, + opcode: Zir.Inst.Extended, + node: Ast.Node.Index, + operands: []const Zir.Inst.Ref, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.NodeMultiOp).Struct.fields.len + operands.len, + ); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = @intCast(operands.len), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + astgen.appendRefsAssumeCapacity(operands); + return new_index.toRef(); + } + + fn addExtendedMultiOpPayloadIndex( + gz: *GenZir, + opcode: Zir.Inst.Extended, + payload_index: u32, + trailing_len: usize, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = @intCast(trailing_len), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + fn addUnTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Ref { + assert(operand != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .un_tok = .{ + .operand = operand, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + } + + fn makeUnTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Index { + const astgen = gz.astgen; + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + assert(operand != .none); + try astgen.instructions.append(astgen.gpa, .{ + .tag = tag, + .data = .{ .un_tok = .{ + .operand = operand, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + return new_index; + } + + fn addStrTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + str_index: Zir.NullTerminatedString, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .str_tok = .{ + .start = str_index, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + } + + fn addSaveErrRetIndex( + gz: *GenZir, + cond: union(enum) { + always: void, + if_of_error_type: Zir.Inst.Ref, + }, + ) !Zir.Inst.Index { + return gz.addAsIndex(.{ + .tag = .save_err_ret_index, + .data = .{ .save_err_ret_index = .{ + .operand = switch (cond) { + .if_of_error_type => |x| x, + else => .none, + }, + } }, + }); + } + + const BranchTarget = union(enum) { + ret, + block: Zir.Inst.Index, + }; + + fn addRestoreErrRetIndex( + gz: *GenZir, + bt: BranchTarget, + cond: union(enum) { + always: void, + if_non_error: Zir.Inst.Ref, + }, + src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + switch (cond) { + .always => return gz.addAsIndex(.{ + .tag = .restore_err_ret_index_unconditional, + .data = .{ .un_node = .{ + .operand = switch (bt) { + .ret => .none, + .block => |b| b.toRef(), + }, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }), + .if_non_error => |operand| switch (bt) { + .ret => return gz.addAsIndex(.{ + .tag = .restore_err_ret_index_fn_entry, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }), + .block => |block| return (try gz.addExtendedPayload( + .restore_err_ret_index, + Zir.Inst.RestoreErrRetIndex{ + .src_node = gz.nodeIndexToRelative(src_node), + .block = block.toRef(), + .operand = operand, + }, + )).toIndex().?, + }, + } + } + + fn addBreak( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index = try gz.makeBreak(tag, block_inst, operand); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn makeBreak( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + ) !Zir.Inst.Index { + return gz.makeBreakCommon(tag, block_inst, operand, null); + } + + fn addBreakWithSrcNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + operand_src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index = try gz.makeBreakWithSrcNode(tag, block_inst, operand, operand_src_node); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn makeBreakWithSrcNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + operand_src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + return gz.makeBreakCommon(tag, block_inst, operand, operand_src_node); + } + + fn makeBreakCommon( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + operand_src_node: ?Ast.Node.Index, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Break).Struct.fields.len); + + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .@"break" = .{ + .operand = operand, + .payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Break{ + .operand_src_node = if (operand_src_node) |src_node| + gz.nodeIndexToRelative(src_node) + else + Zir.Inst.Break.no_src_node, + .block_inst = block_inst, + }), + } }, + }); + return new_index; + } + + fn addBin( + gz: *GenZir, + tag: Zir.Inst.Tag, + lhs: Zir.Inst.Ref, + rhs: Zir.Inst.Ref, + ) !Zir.Inst.Ref { + assert(lhs != .none); + assert(rhs != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .bin = .{ + .lhs = lhs, + .rhs = rhs, + } }, + }); + } + + fn addDefer(gz: *GenZir, index: u32, len: u32) !void { + _ = try gz.add(.{ + .tag = .@"defer", + .data = .{ .@"defer" = .{ + .index = index, + .len = len, + } }, + }); + } + + fn addDecl( + gz: *GenZir, + tag: Zir.Inst.Tag, + decl_index: u32, + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = decl_index, + } }, + }); + } + + fn addNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .node = gz.nodeIndexToRelative(src_node) }, + }); + } + + fn addInstNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + inst: Zir.Inst.Index, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .inst_node = .{ + .inst = inst, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + } + + fn addNodeExtended( + gz: *GenZir, + opcode: Zir.Inst.Extended, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = undefined, + .operand = @bitCast(gz.nodeIndexToRelative(src_node)), + } }, + }); + } + + fn addAllocExtended( + gz: *GenZir, + args: struct { + /// Absolute node index. This function does the conversion to offset from Decl. + node: Ast.Node.Index, + type_inst: Zir.Inst.Ref, + align_inst: Zir.Inst.Ref, + is_const: bool, + is_comptime: bool, + }, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.AllocExtended).Struct.fields.len + + @intFromBool(args.type_inst != .none) + + @intFromBool(args.align_inst != .none), + ); + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.AllocExtended{ + .src_node = gz.nodeIndexToRelative(args.node), + }); + if (args.type_inst != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.type_inst)); + } + if (args.align_inst != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); + } + + const has_type: u4 = @intFromBool(args.type_inst != .none); + const has_align: u4 = @intFromBool(args.align_inst != .none); + const is_const: u4 = @intFromBool(args.is_const); + const is_comptime: u4 = @intFromBool(args.is_comptime); + const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); + + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .alloc, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + fn addAsm( + gz: *GenZir, + args: struct { + tag: Zir.Inst.Extended, + /// Absolute node index. This function does the conversion to offset from Decl. + node: Ast.Node.Index, + asm_source: Zir.NullTerminatedString, + output_type_bits: u32, + is_volatile: bool, + outputs: []const Zir.Inst.Asm.Output, + inputs: []const Zir.Inst.Asm.Input, + clobbers: []const u32, + }, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Asm).Struct.fields.len + + args.outputs.len * @typeInfo(Zir.Inst.Asm.Output).Struct.fields.len + + args.inputs.len * @typeInfo(Zir.Inst.Asm.Input).Struct.fields.len + + args.clobbers.len); + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Asm{ + .src_node = gz.nodeIndexToRelative(args.node), + .asm_source = args.asm_source, + .output_type_bits = args.output_type_bits, + }); + for (args.outputs) |output| { + _ = gz.astgen.addExtraAssumeCapacity(output); + } + for (args.inputs) |input| { + _ = gz.astgen.addExtraAssumeCapacity(input); + } + gz.astgen.extra.appendSliceAssumeCapacity(args.clobbers); + + // * 0b00000000_000XXXXX - `outputs_len`. + // * 0b000000XX_XXX00000 - `inputs_len`. + // * 0b0XXXXX00_00000000 - `clobbers_len`. + // * 0bX0000000_00000000 - is volatile + const small: u16 = @as(u16, @intCast(args.outputs.len)) | + @as(u16, @intCast(args.inputs.len << 5)) | + @as(u16, @intCast(args.clobbers.len << 10)) | + (@as(u16, @intFromBool(args.is_volatile)) << 15); + + const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = args.tag, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index.toRef(); + } + + /// Note that this returns a `Zir.Inst.Index` not a ref. + /// Does *not* append the block instruction to the scope. + /// Leaves the `payload_index` field undefined. + fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + const gpa = gz.astgen.gpa; + try gz.astgen.instructions.append(gpa, .{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = undefined, + } }, + }); + return new_index; + } + + /// Note that this returns a `Zir.Inst.Index` not a ref. + /// Leaves the `payload_index` field undefined. + fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + try gz.astgen.instructions.append(gpa, .{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = undefined, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + fields_len: u32, + decls_len: u32, + backing_int_ref: Zir.Inst.Ref, + backing_int_body_len: u32, + layout: std.builtin.Type.ContainerLayout, + known_non_opv: bool, + known_comptime_only: bool, + is_tuple: bool, + any_comptime_fields: bool, + any_default_inits: bool, + any_aligned_fields: bool, + fields_hash: std.zig.SrcHash, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + // Node 0 is valid for the root `struct_decl` of a file! + assert(args.src_node != 0 or gz.parent.tag == .top); + + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), + }); + + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + if (args.backing_int_ref != .none) { + astgen.extra.appendAssumeCapacity(args.backing_int_body_len); + if (args.backing_int_body_len == 0) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_ref)); + } + } + astgen.instructions.set(@intFromEnum(inst), .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .struct_decl, + .small = @bitCast(Zir.Inst.StructDecl.Small{ + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .has_backing_int = args.backing_int_ref != .none, + .known_non_opv = args.known_non_opv, + .known_comptime_only = args.known_comptime_only, + .is_tuple = args.is_tuple, + .name_strategy = gz.anon_name_strategy, + .layout = args.layout, + .any_comptime_fields = args.any_comptime_fields, + .any_default_inits = args.any_default_inits, + .any_aligned_fields = args.any_aligned_fields, + }), + .operand = payload_index, + } }, + }); + } + + fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + tag_type: Zir.Inst.Ref, + body_len: u32, + fields_len: u32, + decls_len: u32, + layout: std.builtin.Type.ContainerLayout, + auto_enum_tag: bool, + any_aligned_fields: bool, + fields_hash: std.zig.SrcHash, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + assert(args.src_node != 0); + + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), + }); + + if (args.tag_type != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); + } + if (args.body_len != 0) { + astgen.extra.appendAssumeCapacity(args.body_len); + } + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(@intFromEnum(inst), .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .union_decl, + .small = @bitCast(Zir.Inst.UnionDecl.Small{ + .has_tag_type = args.tag_type != .none, + .has_body_len = args.body_len != 0, + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + .layout = args.layout, + .auto_enum_tag = args.auto_enum_tag, + .any_aligned_fields = args.any_aligned_fields, + }), + .operand = payload_index, + } }, + }); + } + + fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + tag_type: Zir.Inst.Ref, + body_len: u32, + fields_len: u32, + decls_len: u32, + nonexhaustive: bool, + fields_hash: std.zig.SrcHash, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + assert(args.src_node != 0); + + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + .src_node = gz.nodeIndexToRelative(args.src_node), + }); + + if (args.tag_type != .none) { + astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); + } + if (args.body_len != 0) { + astgen.extra.appendAssumeCapacity(args.body_len); + } + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(@intFromEnum(inst), .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .enum_decl, + .small = @bitCast(Zir.Inst.EnumDecl.Small{ + .has_tag_type = args.tag_type != .none, + .has_body_len = args.body_len != 0, + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + .nonexhaustive = args.nonexhaustive, + }), + .operand = payload_index, + } }, + }); + } + + fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + decls_len: u32, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + assert(args.src_node != 0); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{ + .src_node = gz.nodeIndexToRelative(args.src_node), + }); + + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(@intFromEnum(inst), .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .opaque_decl, + .small = @bitCast(Zir.Inst.OpaqueDecl.Small{ + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + }), + .operand = payload_index, + } }, + }); + } + + fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref { + return (try gz.addAsIndex(inst)).toRef(); + } + + fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(inst); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn reserveInstructionIndex(gz: *GenZir) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); + gz.astgen.instructions.len += 1; + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { + switch (ri.rl) { + .ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node), + .coerced_ty => _ = try gz.addUnNode(.ret_node, operand, node), + else => unreachable, + } + } + + fn addNamespaceCaptures(gz: *GenZir, namespace: *Scope.Namespace) !void { + if (namespace.captures.count() > 0) { + try gz.instructions.ensureUnusedCapacity(gz.astgen.gpa, namespace.captures.count()); + for (namespace.captures.values()) |capture| { + gz.instructions.appendAssumeCapacity(capture); + } + } + } + + fn addDbgVar(gz: *GenZir, tag: Zir.Inst.Tag, name: Zir.NullTerminatedString, inst: Zir.Inst.Ref) !void { + if (gz.is_comptime) return; + + _ = try gz.add(.{ .tag = tag, .data = .{ + .str_op = .{ + .str = name, + .operand = inst, + }, + } }); + } +}; + +/// This can only be for short-lived references; the memory becomes invalidated +/// when another string is added. +fn nullTerminatedString(astgen: AstGen, index: Zir.NullTerminatedString) [*:0]const u8 { + return @ptrCast(astgen.string_bytes.items[@intFromEnum(index)..]); +} + +/// Local variables shadowing detection, including function parameters. +fn detectLocalShadowing( + astgen: *AstGen, + scope: *Scope, + ident_name: Zir.NullTerminatedString, + name_token: Ast.TokenIndex, + token_bytes: []const u8, + id_cat: Scope.IdCat, +) !void { + const gpa = astgen.gpa; + if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { + return astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ + token_bytes, + }, &[_]u32{ + try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ + token_bytes, + }), + }); + } + + var s = scope; + var outer_scope = false; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == ident_name) { + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + if (outer_scope) { + return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ + @tagName(id_cat), name, @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ + @tagName(local_val.id_cat), name, + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == ident_name) { + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + if (outer_scope) { + return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ + @tagName(id_cat), name, @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ + @tagName(local_ptr.id_cat), name, + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_ptr.parent; + }, + .namespace, .enum_namespace => { + outer_scope = true; + const ns = s.cast(Scope.Namespace).?; + const decl_node = ns.decls.get(ident_name) orelse { + s = ns.parent; + continue; + }; + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + return astgen.failTokNotes(name_token, "{s} shadows declaration of '{s}'", .{ + @tagName(id_cat), name, + }, &[_]u32{ + try astgen.errNoteNode(decl_node, "declared here", .{}), + }); + }, + .gen_zir => { + s = s.cast(GenZir).?.parent; + outer_scope = true; + }, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .top => break, + }; +} + +const LineColumn = struct { u32, u32 }; + +/// Advances the source cursor to the main token of `node` if not in comptime scope. +/// Usually paired with `emitDbgStmt`. +fn maybeAdvanceSourceCursorToMainToken(gz: *GenZir, node: Ast.Node.Index) LineColumn { + if (gz.is_comptime) return .{ gz.astgen.source_line - gz.decl_line, gz.astgen.source_column }; + + const tree = gz.astgen.tree; + const token_starts = tree.tokens.items(.start); + const main_tokens = tree.nodes.items(.main_token); + const node_start = token_starts[main_tokens[node]]; + gz.astgen.advanceSourceCursor(node_start); + + return .{ gz.astgen.source_line - gz.decl_line, gz.astgen.source_column }; +} + +/// Advances the source cursor to the beginning of `node`. +fn advanceSourceCursorToNode(astgen: *AstGen, node: Ast.Node.Index) void { + const tree = astgen.tree; + const token_starts = tree.tokens.items(.start); + const node_start = token_starts[tree.firstToken(node)]; + astgen.advanceSourceCursor(node_start); +} + +/// Advances the source cursor to an absolute byte offset `end` in the file. +fn advanceSourceCursor(astgen: *AstGen, end: usize) void { + const source = astgen.tree.source; + var i = astgen.source_offset; + var line = astgen.source_line; + var column = astgen.source_column; + assert(i <= end); + while (i < end) : (i += 1) { + if (source[i] == '\n') { + line += 1; + column = 0; + } else { + column += 1; + } + } + astgen.source_offset = i; + astgen.source_line = line; + astgen.source_column = column; +} + +fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !u32 { + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + var decl_count: u32 = 0; + for (members) |member_node| { + const name_token = switch (node_tags[member_node]) { + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => blk: { + decl_count += 1; + break :blk main_tokens[member_node] + 1; + }, + + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + => blk: { + decl_count += 1; + const ident = main_tokens[member_node] + 1; + if (token_tags[ident] != .identifier) { + switch (astgen.failNode(member_node, "missing function name", .{})) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + break :blk ident; + }, + + .@"comptime", .@"usingnamespace", .test_decl => { + decl_count += 1; + continue; + }, + + else => continue, + }; + + const token_bytes = astgen.tree.tokenSlice(name_token); + if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { + switch (astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ + token_bytes, + }, &[_]u32{ + try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ + token_bytes, + }), + })) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + + const name_str_index = try astgen.identAsString(name_token); + const gop = try namespace.decls.getOrPut(gpa, name_str_index); + if (gop.found_existing) { + const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(name_str_index))); + defer gpa.free(name); + switch (astgen.failNodeNotes(member_node, "redeclaration of '{s}'", .{ + name, + }, &[_]u32{ + try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}), + })) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + + var s = namespace.parent; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == name_str_index) { + return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ + token_bytes, @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ + token_bytes, @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_ptr.parent; + }, + .namespace, .enum_namespace => s = s.cast(Scope.Namespace).?.parent, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .top => break, + }; + gop.value_ptr.* = member_node; + } + return decl_count; +} + +fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { + const inst = ref.toIndex() orelse return false; + const zir_tags = astgen.instructions.items(.tag); + return switch (zir_tags[@intFromEnum(inst)]) { + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + => true, + + .extended => { + const zir_data = astgen.instructions.items(.data); + if (zir_data[@intFromEnum(inst)].extended.opcode != .alloc) return false; + const small: Zir.Inst.AllocExtended.Small = @bitCast(zir_data[@intFromEnum(inst)].extended.small); + return !small.has_type; + }, + + else => false, + }; +} + +/// Assumes capacity for body has already been added. Needed capacity taking into +/// account fixups can be found with `countBodyLenAfterFixups`. +fn appendBodyWithFixups(astgen: *AstGen, body: []const Zir.Inst.Index) void { + return appendBodyWithFixupsArrayList(astgen, &astgen.extra, body); +} + +fn appendBodyWithFixupsArrayList( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body: []const Zir.Inst.Index, +) void { + for (body) |body_inst| { + appendPossiblyRefdBodyInst(astgen, list, body_inst); + } +} + +fn appendPossiblyRefdBodyInst( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body_inst: Zir.Inst.Index, +) void { + list.appendAssumeCapacity(@intFromEnum(body_inst)); + const kv = astgen.ref_table.fetchRemove(body_inst) orelse return; + const ref_inst = kv.value; + return appendPossiblyRefdBodyInst(astgen, list, ref_inst); +} + +fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { + var count = body.len; + for (body) |body_inst| { + var check_inst = body_inst; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + count += 1; + check_inst = ref_inst; + } + } + return @intCast(count); +} + +fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void { + if (gz.is_comptime) return; + if (gz.instructions.items.len > 0) { + const astgen = gz.astgen; + const last = gz.instructions.items[gz.instructions.items.len - 1]; + if (astgen.instructions.items(.tag)[@intFromEnum(last)] == .dbg_stmt) { + astgen.instructions.items(.data)[@intFromEnum(last)].dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }; + return; + } + } + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }, + } }); +} + +/// In some cases, Sema expects us to generate a `dbg_stmt` at the instruction +/// *index* directly preceding the next instruction (e.g. if a call is %10, it +/// expects a dbg_stmt at %9). TODO: this logic may allow redundant dbg_stmt +/// instructions; fix up Sema so we don't need it! +fn emitDbgStmtForceCurrentIndex(gz: *GenZir, lc: LineColumn) !void { + const astgen = gz.astgen; + if (gz.instructions.items.len > 0 and + @intFromEnum(gz.instructions.items[gz.instructions.items.len - 1]) == astgen.instructions.len - 1) + { + const last = astgen.instructions.len - 1; + if (astgen.instructions.items(.tag)[last] == .dbg_stmt) { + astgen.instructions.items(.data)[last].dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }; + return; + } + } + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = lc[0], + .column = lc[1], + }, + } }); +} + +fn lowerAstErrors(astgen: *AstGen) !void { + const tree = astgen.tree; + assert(tree.errors.len > 0); + + const gpa = astgen.gpa; + const parse_err = tree.errors[0]; + + var msg: std.ArrayListUnmanaged(u8) = .{}; + defer msg.deinit(gpa); + + const token_starts = tree.tokens.items(.start); + const token_tags = tree.tokens.items(.tag); + + var notes: std.ArrayListUnmanaged(u32) = .{}; + defer notes.deinit(gpa); + + if (token_tags[parse_err.token + @intFromBool(parse_err.token_is_prev)] == .invalid) { + const tok = parse_err.token + @intFromBool(parse_err.token_is_prev); + const bad_off: u32 = @intCast(tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len); + const byte_abs = token_starts[parse_err.token + @intFromBool(parse_err.token_is_prev)] + bad_off; + try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{ + std.zig.fmtEscapes(tree.source[byte_abs..][0..1]), + })); + } + + for (tree.errors[1..]) |note| { + if (!note.is_note) break; + + msg.clearRetainingCapacity(); + try tree.renderError(note, msg.writer(gpa)); + try notes.append(gpa, try astgen.errNoteTok(note.token, "{s}", .{msg.items})); + } + + const extra_offset = tree.errorOffset(parse_err); + msg.clearRetainingCapacity(); + try tree.renderError(parse_err, msg.writer(gpa)); + try astgen.appendErrorTokNotesOff(parse_err.token, extra_offset, "{s}", .{msg.items}, notes.items); +} + +const DeclarationName = union(enum) { + named: Ast.TokenIndex, + named_test: Ast.TokenIndex, + unnamed_test, + decltest: Zir.NullTerminatedString, + @"comptime", + @"usingnamespace", +}; + +/// Sets all extra data for a `declaration` instruction. +/// Unstacks `value_gz`, `align_gz`, `linksection_gz`, and `addrspace_gz`. +fn setDeclaration( + decl_inst: Zir.Inst.Index, + src_hash: std.zig.SrcHash, + name: DeclarationName, + line_offset: u32, + is_pub: bool, + is_export: bool, + doc_comment: Zir.NullTerminatedString, + value_gz: *GenZir, + /// May be `null` if all these blocks would be empty. + /// If `null`, then `value_gz` must have nothing stacked on it. + extra_gzs: ?struct { + /// Must be stacked on `value_gz`. + align_gz: *GenZir, + /// Must be stacked on `align_gz`. + linksection_gz: *GenZir, + /// Must be stacked on `linksection_gz`, and have nothing stacked on it. + addrspace_gz: *GenZir, + }, +) !void { + const astgen = value_gz.astgen; + const gpa = astgen.gpa; + + const empty_body: []Zir.Inst.Index = &.{}; + const value_body, const align_body, const linksection_body, const addrspace_body = if (extra_gzs) |e| .{ + value_gz.instructionsSliceUpto(e.align_gz), + e.align_gz.instructionsSliceUpto(e.linksection_gz), + e.linksection_gz.instructionsSliceUpto(e.addrspace_gz), + e.addrspace_gz.instructionsSlice(), + } else .{ value_gz.instructionsSlice(), empty_body, empty_body, empty_body }; + + const value_len = astgen.countBodyLenAfterFixups(value_body); + const align_len = astgen.countBodyLenAfterFixups(align_body); + const linksection_len = astgen.countBodyLenAfterFixups(linksection_body); + const addrspace_len = astgen.countBodyLenAfterFixups(addrspace_body); + + const true_doc_comment: Zir.NullTerminatedString = switch (name) { + .decltest => |test_name| test_name, + else => doc_comment, + }; + + const src_hash_arr: [4]u32 = @bitCast(src_hash); + + const extra: Zir.Inst.Declaration = .{ + .src_hash_0 = src_hash_arr[0], + .src_hash_1 = src_hash_arr[1], + .src_hash_2 = src_hash_arr[2], + .src_hash_3 = src_hash_arr[3], + .name = switch (name) { + .named => |tok| @enumFromInt(@intFromEnum(try astgen.identAsString(tok))), + .named_test => |tok| @enumFromInt(@intFromEnum(try astgen.testNameString(tok))), + .unnamed_test => .unnamed_test, + .decltest => .decltest, + .@"comptime" => .@"comptime", + .@"usingnamespace" => .@"usingnamespace", + }, + .line_offset = line_offset, + .flags = .{ + .value_body_len = @intCast(value_len), + .is_pub = is_pub, + .is_export = is_export, + .has_doc_comment = true_doc_comment != .empty, + .has_align_linksection_addrspace = align_len != 0 or linksection_len != 0 or addrspace_len != 0, + }, + }; + astgen.instructions.items(.data)[@intFromEnum(decl_inst)].pl_node.payload_index = try astgen.addExtra(extra); + if (extra.flags.has_doc_comment) { + try astgen.extra.append(gpa, @intFromEnum(true_doc_comment)); + } + if (extra.flags.has_align_linksection_addrspace) { + try astgen.extra.appendSlice(gpa, &.{ + align_len, + linksection_len, + addrspace_len, + }); + } + try astgen.extra.ensureUnusedCapacity(gpa, value_len + align_len + linksection_len + addrspace_len); + astgen.appendBodyWithFixups(value_body); + if (extra.flags.has_align_linksection_addrspace) { + astgen.appendBodyWithFixups(align_body); + astgen.appendBodyWithFixups(linksection_body); + astgen.appendBodyWithFixups(addrspace_body); + } + + if (extra_gzs) |e| { + e.addrspace_gz.unstack(); + e.linksection_gz.unstack(); + e.align_gz.unstack(); + } + value_gz.unstack(); +} diff --git a/src/AstGen.zig b/src/AstGen.zig deleted file mode 100644 index 20b1077420..0000000000 --- a/src/AstGen.zig +++ /dev/null @@ -1,13661 +0,0 @@ -//! Ingests an AST and produces ZIR code. -const AstGen = @This(); - -const std = @import("std"); -const Ast = std.zig.Ast; -const mem = std.mem; -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; -const ArrayListUnmanaged = std.ArrayListUnmanaged; -const StringIndexAdapter = std.hash_map.StringIndexAdapter; -const StringIndexContext = std.hash_map.StringIndexContext; - -const isPrimitive = std.zig.primitives.isPrimitive; - -const Zir = std.zig.Zir; -const BuiltinFn = std.zig.BuiltinFn; -const AstRlAnnotate = std.zig.AstRlAnnotate; - -gpa: Allocator, -tree: *const Ast, -/// The set of nodes which, given the choice, must expose a result pointer to -/// sub-expressions. See `AstRlAnnotate` for details. -nodes_need_rl: *const AstRlAnnotate.RlNeededSet, -instructions: std.MultiArrayList(Zir.Inst) = .{}, -extra: ArrayListUnmanaged(u32) = .{}, -string_bytes: ArrayListUnmanaged(u8) = .{}, -/// Tracks the current byte offset within the source file. -/// Used to populate line deltas in the ZIR. AstGen maintains -/// this "cursor" throughout the entire AST lowering process in order -/// to avoid starting over the line/column scan for every declaration, which -/// would be O(N^2). -source_offset: u32 = 0, -/// Tracks the corresponding line of `source_offset`. -/// This value is absolute. -source_line: u32 = 0, -/// Tracks the corresponding column of `source_offset`. -/// This value is absolute. -source_column: u32 = 0, -/// Used for temporary allocations; freed after AstGen is complete. -/// The resulting ZIR code has no references to anything in this arena. -arena: Allocator, -string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, -compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, -/// The topmost block of the current function. -fn_block: ?*GenZir = null, -fn_var_args: bool = false, -/// The return type of the current function. This may be a trivial `Ref`, or -/// otherwise it refers to a `ret_type` instruction. -fn_ret_ty: Zir.Inst.Ref = .none, -/// Maps string table indexes to the first `@import` ZIR instruction -/// that uses this string as the operand. -imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}, -/// Used for temporary storage when building payloads. -scratch: std.ArrayListUnmanaged(u32) = .{}, -/// Whenever a `ref` instruction is needed, it is created and saved in this -/// table instead of being immediately appended to the current block body. -/// Then, when the instruction is being added to the parent block (typically from -/// setBlockBody), if it has a ref_table entry, then the ref instruction is added -/// there. This makes sure two properties are upheld: -/// 1. All pointers to the same locals return the same address. This is required -/// to be compliant with the language specification. -/// 2. `ref` instructions will dominate their uses. This is a required property -/// of ZIR. -/// The key is the ref operand; the value is the ref instruction. -ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, - -const InnerError = error{ OutOfMemory, AnalysisFail }; - -fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { - const fields = std.meta.fields(@TypeOf(extra)); - try astgen.extra.ensureUnusedCapacity(astgen.gpa, fields.len); - return addExtraAssumeCapacity(astgen, extra); -} - -fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { - const fields = std.meta.fields(@TypeOf(extra)); - const extra_index: u32 = @intCast(astgen.extra.items.len); - astgen.extra.items.len += fields.len; - setExtra(astgen, extra_index, extra); - return extra_index; -} - -fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { - const fields = std.meta.fields(@TypeOf(extra)); - var i = index; - inline for (fields) |field| { - astgen.extra.items[i] = switch (field.type) { - u32 => @field(extra, field.name), - - Zir.Inst.Ref, - Zir.Inst.Index, - Zir.Inst.Declaration.Name, - Zir.NullTerminatedString, - => @intFromEnum(@field(extra, field.name)), - - i32, - Zir.Inst.Call.Flags, - Zir.Inst.BuiltinCall.Flags, - Zir.Inst.SwitchBlock.Bits, - Zir.Inst.SwitchBlockErrUnion.Bits, - Zir.Inst.FuncFancy.Bits, - Zir.Inst.Declaration.Flags, - => @bitCast(@field(extra, field.name)), - - else => @compileError("bad field type"), - }; - i += 1; - } -} - -fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 { - const extra_index: u32 = @intCast(astgen.extra.items.len); - try astgen.extra.resize(astgen.gpa, extra_index + size); - return extra_index; -} - -fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { - return astgen.extra.appendSlice(astgen.gpa, @ptrCast(refs)); -} - -fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { - astgen.extra.appendSliceAssumeCapacity(@ptrCast(refs)); -} - -pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - var nodes_need_rl = try AstRlAnnotate.annotate(gpa, arena.allocator(), tree); - defer nodes_need_rl.deinit(gpa); - - var astgen: AstGen = .{ - .gpa = gpa, - .arena = arena.allocator(), - .tree = &tree, - .nodes_need_rl = &nodes_need_rl, - }; - defer astgen.deinit(gpa); - - // String table index 0 is reserved for `NullTerminatedString.empty`. - try astgen.string_bytes.append(gpa, 0); - - // We expect at least as many ZIR instructions and extra data items - // as AST nodes. - try astgen.instructions.ensureTotalCapacity(gpa, tree.nodes.len); - - // First few indexes of extra are reserved and set at the end. - const reserved_count = @typeInfo(Zir.ExtraIndex).Enum.fields.len; - try astgen.extra.ensureTotalCapacity(gpa, tree.nodes.len + reserved_count); - astgen.extra.items.len += reserved_count; - - var top_scope: Scope.Top = .{}; - - var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; - var gen_scope: GenZir = .{ - .is_comptime = true, - .parent = &top_scope.base, - .anon_name_strategy = .parent, - .decl_node_index = 0, - .decl_line = 0, - .astgen = &astgen, - .instructions = &gz_instructions, - .instructions_top = 0, - }; - defer gz_instructions.deinit(gpa); - - // The AST -> ZIR lowering process assumes an AST that does not have any - // parse errors. - if (tree.errors.len == 0) { - if (AstGen.structDeclInner( - &gen_scope, - &gen_scope.base, - 0, - tree.containerDeclRoot(), - .Auto, - 0, - )) |struct_decl_ref| { - assert(struct_decl_ref.toIndex().? == .main_struct_inst); - } else |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, // Handled via compile_errors below. - } - } else { - try lowerAstErrors(&astgen); - } - - const err_index = @intFromEnum(Zir.ExtraIndex.compile_errors); - if (astgen.compile_errors.items.len == 0) { - astgen.extra.items[err_index] = 0; - } else { - try astgen.extra.ensureUnusedCapacity(gpa, 1 + astgen.compile_errors.items.len * - @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); - - astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ - .items_len = @intCast(astgen.compile_errors.items.len), - }); - - for (astgen.compile_errors.items) |item| { - _ = astgen.addExtraAssumeCapacity(item); - } - } - - const imports_index = @intFromEnum(Zir.ExtraIndex.imports); - if (astgen.imports.count() == 0) { - astgen.extra.items[imports_index] = 0; - } else { - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Imports).Struct.fields.len + - astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); - - astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ - .imports_len = @intCast(astgen.imports.count()), - }); - - var it = astgen.imports.iterator(); - while (it.next()) |entry| { - _ = astgen.addExtraAssumeCapacity(Zir.Inst.Imports.Item{ - .name = entry.key_ptr.*, - .token = entry.value_ptr.*, - }); - } - } - - return Zir{ - .instructions = astgen.instructions.toOwnedSlice(), - .string_bytes = try astgen.string_bytes.toOwnedSlice(gpa), - .extra = try astgen.extra.toOwnedSlice(gpa), - }; -} - -fn deinit(astgen: *AstGen, gpa: Allocator) void { - astgen.instructions.deinit(gpa); - astgen.extra.deinit(gpa); - astgen.string_table.deinit(gpa); - astgen.string_bytes.deinit(gpa); - astgen.compile_errors.deinit(gpa); - astgen.imports.deinit(gpa); - astgen.scratch.deinit(gpa); - astgen.ref_table.deinit(gpa); -} - -const ResultInfo = struct { - /// The semantics requested for the result location - rl: Loc, - - /// The "operator" consuming the result location - ctx: Context = .none, - - /// Turns a `coerced_ty` back into a `ty`. Should be called at branch points - /// such as if and switch expressions. - fn br(ri: ResultInfo) ResultInfo { - return switch (ri.rl) { - .coerced_ty => |ty| .{ - .rl = .{ .ty = ty }, - .ctx = ri.ctx, - }, - else => ri, - }; - } - - fn zirTag(ri: ResultInfo) Zir.Inst.Tag { - switch (ri.rl) { - .ty => return switch (ri.ctx) { - .shift_op => .as_shift_operand, - else => .as_node, - }, - else => unreachable, - } - } - - const Loc = union(enum) { - /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the - /// expression should be generated. The result instruction from the expression must - /// be ignored. - discard, - /// The expression has an inferred type, and it will be evaluated as an rvalue. - none, - /// The expression will be coerced into this type, but it will be evaluated as an rvalue. - ty: Zir.Inst.Ref, - /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, - /// so no `as` instruction needs to be emitted. - coerced_ty: Zir.Inst.Ref, - /// The expression must generate a pointer rather than a value. For example, the left hand side - /// of an assignment uses this kind of result location. - ref, - /// The expression must generate a pointer rather than a value, and the pointer will be coerced - /// by other code to this type, which is guaranteed by earlier instructions to be a pointer type. - ref_coerced_ty: Zir.Inst.Ref, - /// The expression must store its result into this typed pointer. The result instruction - /// from the expression must be ignored. - ptr: PtrResultLoc, - /// The expression must store its result into this allocation, which has an inferred type. - /// The result instruction from the expression must be ignored. - /// Always an instruction with tag `alloc_inferred`. - inferred_ptr: Zir.Inst.Ref, - /// The expression has a sequence of pointers to store its results into due to a destructure - /// operation. Each of these pointers may or may not have an inferred type. - destructure: struct { - /// The AST node of the destructure operation itself. - src_node: Ast.Node.Index, - /// The pointers to store results into. - components: []const DestructureComponent, - }, - - const DestructureComponent = union(enum) { - typed_ptr: PtrResultLoc, - inferred_ptr: Zir.Inst.Ref, - discard, - }; - - const PtrResultLoc = struct { - inst: Zir.Inst.Ref, - src_node: ?Ast.Node.Index = null, - }; - - /// Find the result type for a cast builtin given the result location. - /// If the location does not have a known result type, emits an error on - /// the given node. - fn resultType(rl: Loc, gz: *GenZir, node: Ast.Node.Index) !?Zir.Inst.Ref { - return switch (rl) { - .discard, .none, .ref, .inferred_ptr, .destructure => null, - .ty, .coerced_ty => |ty_ref| ty_ref, - .ref_coerced_ty => |ptr_ty| try gz.addUnNode(.elem_type, ptr_ty, node), - .ptr => |ptr| { - const ptr_ty = try gz.addUnNode(.typeof, ptr.inst, node); - return try gz.addUnNode(.elem_type, ptr_ty, node); - }, - }; - } - - fn resultTypeForCast(rl: Loc, gz: *GenZir, node: Ast.Node.Index, builtin_name: []const u8) !Zir.Inst.Ref { - const astgen = gz.astgen; - if (try rl.resultType(gz, node)) |ty| return ty; - switch (rl) { - .destructure => |destructure| return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ - try astgen.errNoteNode(destructure.src_node, "destructure expressions do not provide a single result type", .{}), - try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), - }), - else => return astgen.failNodeNotes(node, "{s} must have a known result type", .{builtin_name}, &.{ - try astgen.errNoteNode(node, "use @as to provide explicit result type", .{}), - }), - } - } - }; - - const Context = enum { - /// The expression is the operand to a return expression. - @"return", - /// The expression is the input to an error-handling operator (if-else, try, or catch). - error_handling_expr, - /// The expression is the right-hand side of a shift operation. - shift_op, - /// The expression is an argument in a function call. - fn_arg, - /// The expression is the right-hand side of an initializer for a `const` variable - const_init, - /// The expression is the right-hand side of an assignment expression. - assignment, - /// No specific operator in particular. - none, - }; -}; - -const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } }; -const coerced_addrspace_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .address_space_type } }; -const coerced_linksection_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }; -const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } }; -const coerced_bool_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .bool_type } }; - -fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - return comptimeExpr(gz, scope, coerced_type_ri, type_node); -} - -fn reachableTypeExpr( - gz: *GenZir, - scope: *Scope, - type_node: Ast.Node.Index, - reachable_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - return reachableExprComptime(gz, scope, coerced_type_ri, type_node, reachable_node, true); -} - -/// Same as `expr` but fails with a compile error if the result type is `noreturn`. -fn reachableExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - reachable_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - return reachableExprComptime(gz, scope, ri, node, reachable_node, false); -} - -fn reachableExprComptime( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - reachable_node: Ast.Node.Index, - force_comptime: bool, -) InnerError!Zir.Inst.Ref { - const result_inst = if (force_comptime) - try comptimeExpr(gz, scope, ri, node) - else - try expr(gz, scope, ri, node); - - if (gz.refIsNoReturn(result_inst)) { - try gz.astgen.appendErrorNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{ - try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}), - }); - } - return result_inst; -} - -fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - switch (node_tags[node]) { - .root => unreachable, - .@"usingnamespace" => unreachable, - .test_decl => unreachable, - .global_var_decl => unreachable, - .local_var_decl => unreachable, - .simple_var_decl => unreachable, - .aligned_var_decl => unreachable, - .switch_case => unreachable, - .switch_case_inline => unreachable, - .switch_case_one => unreachable, - .switch_case_inline_one => unreachable, - .container_field_init => unreachable, - .container_field_align => unreachable, - .container_field => unreachable, - .asm_output => unreachable, - .asm_input => unreachable, - - .assign, - .assign_destructure, - .assign_bit_and, - .assign_bit_or, - .assign_shl, - .assign_shl_sat, - .assign_shr, - .assign_bit_xor, - .assign_div, - .assign_sub, - .assign_sub_wrap, - .assign_sub_sat, - .assign_mod, - .assign_add, - .assign_add_wrap, - .assign_add_sat, - .assign_mul, - .assign_mul_wrap, - .assign_mul_sat, - .add, - .add_wrap, - .add_sat, - .sub, - .sub_wrap, - .sub_sat, - .mul, - .mul_wrap, - .mul_sat, - .div, - .mod, - .bit_and, - .bit_or, - .shl, - .shl_sat, - .shr, - .bit_xor, - .bang_equal, - .equal_equal, - .greater_than, - .greater_or_equal, - .less_than, - .less_or_equal, - .array_cat, - .array_mult, - .bool_and, - .bool_or, - .@"asm", - .asm_simple, - .string_literal, - .number_literal, - .call, - .call_comma, - .async_call, - .async_call_comma, - .call_one, - .call_one_comma, - .async_call_one, - .async_call_one_comma, - .unreachable_literal, - .@"return", - .@"if", - .if_simple, - .@"while", - .while_simple, - .while_cont, - .bool_not, - .address_of, - .optional_type, - .block, - .block_semicolon, - .block_two, - .block_two_semicolon, - .@"break", - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - .array_type, - .array_type_sentinel, - .enum_literal, - .multiline_string_literal, - .char_literal, - .@"defer", - .@"errdefer", - .@"catch", - .error_union, - .merge_error_sets, - .switch_range, - .for_range, - .@"await", - .bit_not, - .negation, - .negation_wrap, - .@"resume", - .@"try", - .slice, - .slice_open, - .slice_sentinel, - .array_init_one, - .array_init_one_comma, - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .array_init, - .array_init_comma, - .struct_init_one, - .struct_init_one_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - .struct_init, - .struct_init_comma, - .@"switch", - .switch_comma, - .@"for", - .for_simple, - .@"suspend", - .@"continue", - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - .fn_decl, - .anyframe_type, - .anyframe_literal, - .error_set_decl, - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .@"comptime", - .@"nosuspend", - .error_value, - => return astgen.failNode(node, "invalid left-hand side to assignment", .{}), - - .builtin_call, - .builtin_call_comma, - .builtin_call_two, - .builtin_call_two_comma, - => { - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - // If the builtin is an invalid name, we don't cause an error here; instead - // let it pass, and the error will be "invalid builtin function" later. - if (BuiltinFn.list.get(builtin_name)) |info| { - if (!info.allows_lvalue) { - return astgen.failNode(node, "invalid left-hand side to assignment", .{}); - } - } - }, - - // These can be assigned to. - .unwrap_optional, - .deref, - .field_access, - .array_access, - .identifier, - .grouped_expression, - .@"orelse", - => {}, - } - return expr(gz, scope, .{ .rl = .ref }, node); -} - -/// Turn Zig AST into untyped ZIR instructions. -/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the -/// result instruction can be used to inspect whether it is isNoReturn() but that is it, -/// it must otherwise not be used. -fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - - const prev_anon_name_strategy = gz.anon_name_strategy; - defer gz.anon_name_strategy = prev_anon_name_strategy; - if (!nodeUsesAnonNameStrategy(tree, node)) { - gz.anon_name_strategy = .anon; - } - - switch (node_tags[node]) { - .root => unreachable, // Top-level declaration. - .@"usingnamespace" => unreachable, // Top-level declaration. - .test_decl => unreachable, // Top-level declaration. - .container_field_init => unreachable, // Top-level declaration. - .container_field_align => unreachable, // Top-level declaration. - .container_field => unreachable, // Top-level declaration. - .fn_decl => unreachable, // Top-level declaration. - - .global_var_decl => unreachable, // Handled in `blockExpr`. - .local_var_decl => unreachable, // Handled in `blockExpr`. - .simple_var_decl => unreachable, // Handled in `blockExpr`. - .aligned_var_decl => unreachable, // Handled in `blockExpr`. - .@"defer" => unreachable, // Handled in `blockExpr`. - .@"errdefer" => unreachable, // Handled in `blockExpr`. - - .switch_case => unreachable, // Handled in `switchExpr`. - .switch_case_inline => unreachable, // Handled in `switchExpr`. - .switch_case_one => unreachable, // Handled in `switchExpr`. - .switch_case_inline_one => unreachable, // Handled in `switchExpr`. - .switch_range => unreachable, // Handled in `switchExpr`. - - .asm_output => unreachable, // Handled in `asmExpr`. - .asm_input => unreachable, // Handled in `asmExpr`. - - .for_range => unreachable, // Handled in `forExpr`. - - .assign => { - try assign(gz, scope, node); - return rvalue(gz, ri, .void_value, node); - }, - - .assign_destructure => { - // Note that this variant does not declare any new var/const: that - // variant is handled by `blockExprStmts`. - try assignDestructure(gz, scope, node); - return rvalue(gz, ri, .void_value, node); - }, - - .assign_shl => { - try assignShift(gz, scope, node, .shl); - return rvalue(gz, ri, .void_value, node); - }, - .assign_shl_sat => { - try assignShiftSat(gz, scope, node); - return rvalue(gz, ri, .void_value, node); - }, - .assign_shr => { - try assignShift(gz, scope, node, .shr); - return rvalue(gz, ri, .void_value, node); - }, - - .assign_bit_and => { - try assignOp(gz, scope, node, .bit_and); - return rvalue(gz, ri, .void_value, node); - }, - .assign_bit_or => { - try assignOp(gz, scope, node, .bit_or); - return rvalue(gz, ri, .void_value, node); - }, - .assign_bit_xor => { - try assignOp(gz, scope, node, .xor); - return rvalue(gz, ri, .void_value, node); - }, - .assign_div => { - try assignOp(gz, scope, node, .div); - return rvalue(gz, ri, .void_value, node); - }, - .assign_sub => { - try assignOp(gz, scope, node, .sub); - return rvalue(gz, ri, .void_value, node); - }, - .assign_sub_wrap => { - try assignOp(gz, scope, node, .subwrap); - return rvalue(gz, ri, .void_value, node); - }, - .assign_sub_sat => { - try assignOp(gz, scope, node, .sub_sat); - return rvalue(gz, ri, .void_value, node); - }, - .assign_mod => { - try assignOp(gz, scope, node, .mod_rem); - return rvalue(gz, ri, .void_value, node); - }, - .assign_add => { - try assignOp(gz, scope, node, .add); - return rvalue(gz, ri, .void_value, node); - }, - .assign_add_wrap => { - try assignOp(gz, scope, node, .addwrap); - return rvalue(gz, ri, .void_value, node); - }, - .assign_add_sat => { - try assignOp(gz, scope, node, .add_sat); - return rvalue(gz, ri, .void_value, node); - }, - .assign_mul => { - try assignOp(gz, scope, node, .mul); - return rvalue(gz, ri, .void_value, node); - }, - .assign_mul_wrap => { - try assignOp(gz, scope, node, .mulwrap); - return rvalue(gz, ri, .void_value, node); - }, - .assign_mul_sat => { - try assignOp(gz, scope, node, .mul_sat); - return rvalue(gz, ri, .void_value, node); - }, - - // zig fmt: off - .shl => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shl), - .shr => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shr), - - .add => return simpleBinOp(gz, scope, ri, node, .add), - .add_wrap => return simpleBinOp(gz, scope, ri, node, .addwrap), - .add_sat => return simpleBinOp(gz, scope, ri, node, .add_sat), - .sub => return simpleBinOp(gz, scope, ri, node, .sub), - .sub_wrap => return simpleBinOp(gz, scope, ri, node, .subwrap), - .sub_sat => return simpleBinOp(gz, scope, ri, node, .sub_sat), - .mul => return simpleBinOp(gz, scope, ri, node, .mul), - .mul_wrap => return simpleBinOp(gz, scope, ri, node, .mulwrap), - .mul_sat => return simpleBinOp(gz, scope, ri, node, .mul_sat), - .div => return simpleBinOp(gz, scope, ri, node, .div), - .mod => return simpleBinOp(gz, scope, ri, node, .mod_rem), - .shl_sat => return simpleBinOp(gz, scope, ri, node, .shl_sat), - - .bit_and => return simpleBinOp(gz, scope, ri, node, .bit_and), - .bit_or => return simpleBinOp(gz, scope, ri, node, .bit_or), - .bit_xor => return simpleBinOp(gz, scope, ri, node, .xor), - .bang_equal => return simpleBinOp(gz, scope, ri, node, .cmp_neq), - .equal_equal => return simpleBinOp(gz, scope, ri, node, .cmp_eq), - .greater_than => return simpleBinOp(gz, scope, ri, node, .cmp_gt), - .greater_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_gte), - .less_than => return simpleBinOp(gz, scope, ri, node, .cmp_lt), - .less_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_lte), - .array_cat => return simpleBinOp(gz, scope, ri, node, .array_cat), - - .array_mult => { - // This syntax form does not currently use the result type in the language specification. - // However, the result type can be used to emit more optimal code for large multiplications by - // having Sema perform a coercion before the multiplication operation. - const result = try gz.addPlNode(.array_mul, node, Zir.Inst.ArrayMul{ - .res_ty = if (try ri.rl.resultType(gz, node)) |t| t else .none, - .lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), - .rhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs), - }); - return rvalue(gz, ri, result, node); - }, - - .error_union => return simpleBinOp(gz, scope, ri, node, .error_union_type), - .merge_error_sets => return simpleBinOp(gz, scope, ri, node, .merge_error_sets), - - .bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and), - .bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or), - - .bool_not => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, node_datas[node].lhs, .bool_not), - .bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not), - - .negation => return negation(gz, scope, ri, node), - .negation_wrap => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .negate_wrap), - - .identifier => return identifier(gz, scope, ri, node), - - .asm_simple, - .@"asm", - => return asmExpr(gz, scope, ri, node, tree.fullAsm(node).?), - - .string_literal => return stringLiteral(gz, ri, node), - .multiline_string_literal => return multilineStringLiteral(gz, ri, node), - - .number_literal => return numberLiteral(gz, ri, node, node, .positive), - // zig fmt: on - - .builtin_call_two, .builtin_call_two_comma => { - if (node_datas[node].lhs == 0) { - const params = [_]Ast.Node.Index{}; - return builtinCall(gz, scope, ri, node, ¶ms); - } else if (node_datas[node].rhs == 0) { - const params = [_]Ast.Node.Index{node_datas[node].lhs}; - return builtinCall(gz, scope, ri, node, ¶ms); - } else { - const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; - return builtinCall(gz, scope, ri, node, ¶ms); - } - }, - .builtin_call, .builtin_call_comma => { - const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - return builtinCall(gz, scope, ri, node, params); - }, - - .call_one, - .call_one_comma, - .async_call_one, - .async_call_one_comma, - .call, - .call_comma, - .async_call, - .async_call_comma, - => { - var buf: [1]Ast.Node.Index = undefined; - return callExpr(gz, scope, ri, node, tree.fullCall(&buf, node).?); - }, - - .unreachable_literal => { - try emitDbgNode(gz, node); - _ = try gz.addAsIndex(.{ - .tag = .@"unreachable", - .data = .{ .@"unreachable" = .{ - .src_node = gz.nodeIndexToRelative(node), - } }, - }); - return Zir.Inst.Ref.unreachable_value; - }, - .@"return" => return ret(gz, scope, node), - .field_access => return fieldAccess(gz, scope, ri, node), - - .if_simple, - .@"if", - => { - const if_full = tree.fullIf(node).?; - no_switch_on_err: { - const error_token = if_full.error_token orelse break :no_switch_on_err; - switch (node_tags[if_full.ast.else_expr]) { - .@"switch", .switch_comma => {}, - else => break :no_switch_on_err, - } - const switch_operand = node_datas[if_full.ast.else_expr].lhs; - if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; - if (!mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; - return switchExprErrUnion(gz, scope, ri.br(), node, .@"if"); - } - return ifExpr(gz, scope, ri.br(), node, if_full); - }, - - .while_simple, - .while_cont, - .@"while", - => return whileExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false), - - .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullFor(node).?, false), - - .slice_open => { - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{ - .lhs = lhs, - .start = start, - }); - return rvalue(gz, ri, result, node); - }, - .slice => { - const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); - const lhs_node = node_datas[node].lhs; - const lhs_tag = node_tags[lhs_node]; - const lhs_is_slice_sentinel = lhs_tag == .slice_sentinel; - const lhs_is_open_slice = lhs_tag == .slice_open or - (lhs_is_slice_sentinel and tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel).end == 0); - if (lhs_is_open_slice and nodeIsTriviallyZero(tree, extra.start)) { - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[lhs_node].lhs); - - const start = if (lhs_is_slice_sentinel) start: { - const lhs_extra = tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel); - break :start try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, lhs_extra.start); - } else try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[lhs_node].rhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const len = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(.slice_length, node, Zir.Inst.SliceLength{ - .lhs = lhs, - .start = start, - .len = len, - .start_src_node_offset = gz.nodeIndexToRelative(lhs_node), - .sentinel = .none, - }); - return rvalue(gz, ri, result, node); - } - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); - const end = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end); - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{ - .lhs = lhs, - .start = start, - .end = end, - }); - return rvalue(gz, ri, result, node); - }, - .slice_sentinel => { - const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); - const lhs_node = node_datas[node].lhs; - const lhs_tag = node_tags[lhs_node]; - const lhs_is_slice_sentinel = lhs_tag == .slice_sentinel; - const lhs_is_open_slice = lhs_tag == .slice_open or - (lhs_is_slice_sentinel and tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel).end == 0); - if (lhs_is_open_slice and nodeIsTriviallyZero(tree, extra.start)) { - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[lhs_node].lhs); - - const start = if (lhs_is_slice_sentinel) start: { - const lhs_extra = tree.extraData(node_datas[lhs_node].rhs, Ast.Node.SliceSentinel); - break :start try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, lhs_extra.start); - } else try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[lhs_node].rhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const len = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; - const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(.slice_length, node, Zir.Inst.SliceLength{ - .lhs = lhs, - .start = start, - .len = len, - .start_src_node_offset = gz.nodeIndexToRelative(lhs_node), - .sentinel = sentinel, - }); - return rvalue(gz, ri, result, node); - } - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); - const end = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; - const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{ - .lhs = lhs, - .start = start, - .end = end, - .sentinel = sentinel, - }); - return rvalue(gz, ri, result, node); - }, - - .deref => { - const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); - _ = try gz.addUnNode(.validate_deref, lhs, node); - switch (ri.rl) { - .ref, .ref_coerced_ty => return lhs, - else => { - const result = try gz.addUnNode(.load, lhs, node); - return rvalue(gz, ri, result, node); - }, - } - }, - .address_of => { - const operand_rl: ResultInfo.Loc = if (try ri.rl.resultType(gz, node)) |res_ty_inst| rl: { - _ = try gz.addUnTok(.validate_ref_ty, res_ty_inst, tree.firstToken(node)); - break :rl .{ .ref_coerced_ty = res_ty_inst }; - } else .ref; - const result = try expr(gz, scope, .{ .rl = operand_rl }, node_datas[node].lhs); - return rvalue(gz, ri, result, node); - }, - .optional_type => { - const operand = try typeExpr(gz, scope, node_datas[node].lhs); - const result = try gz.addUnNode(.optional_type, operand, node); - return rvalue(gz, ri, result, node); - }, - .unwrap_optional => switch (ri.rl) { - .ref, .ref_coerced_ty => { - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - try emitDbgStmt(gz, cursor); - - return gz.addUnNode(.optional_payload_safe_ptr, lhs, node); - }, - else => { - const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - try emitDbgStmt(gz, cursor); - - return rvalue(gz, ri, try gz.addUnNode(.optional_payload_safe, lhs, node), node); - }, - }, - .block_two, .block_two_semicolon => { - const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; - if (node_datas[node].lhs == 0) { - return blockExpr(gz, scope, ri, node, statements[0..0]); - } else if (node_datas[node].rhs == 0) { - return blockExpr(gz, scope, ri, node, statements[0..1]); - } else { - return blockExpr(gz, scope, ri, node, statements[0..2]); - } - }, - .block, .block_semicolon => { - const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - return blockExpr(gz, scope, ri, node, statements); - }, - .enum_literal => return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal), - .error_value => return simpleStrTok(gz, ri, node_datas[node].rhs, node, .error_value), - // TODO restore this when implementing https://github.com/ziglang/zig/issues/6025 - // .anyframe_literal => return rvalue(gz, ri, .anyframe_type, node), - .anyframe_literal => { - const result = try gz.addUnNode(.anyframe_type, .void_type, node); - return rvalue(gz, ri, result, node); - }, - .anyframe_type => { - const return_type = try typeExpr(gz, scope, node_datas[node].rhs); - const result = try gz.addUnNode(.anyframe_type, return_type, node); - return rvalue(gz, ri, result, node); - }, - .@"catch" => { - const catch_token = main_tokens[node]; - const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe) - catch_token + 2 - else - null; - no_switch_on_err: { - const capture_token = payload_token orelse break :no_switch_on_err; - switch (node_tags[node_datas[node].rhs]) { - .@"switch", .switch_comma => {}, - else => break :no_switch_on_err, - } - const switch_operand = node_datas[node_datas[node].rhs].lhs; - if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; - if (!mem.eql(u8, tree.tokenSlice(capture_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; - return switchExprErrUnion(gz, scope, ri.br(), node, .@"catch"); - } - switch (ri.rl) { - .ref, .ref_coerced_ty => return orelseCatchExpr( - gz, - scope, - ri, - node, - node_datas[node].lhs, - .is_non_err_ptr, - .err_union_payload_unsafe_ptr, - .err_union_code_ptr, - node_datas[node].rhs, - payload_token, - ), - else => return orelseCatchExpr( - gz, - scope, - ri, - node, - node_datas[node].lhs, - .is_non_err, - .err_union_payload_unsafe, - .err_union_code, - node_datas[node].rhs, - payload_token, - ), - } - }, - .@"orelse" => switch (ri.rl) { - .ref, .ref_coerced_ty => return orelseCatchExpr( - gz, - scope, - ri, - node, - node_datas[node].lhs, - .is_non_null_ptr, - .optional_payload_unsafe_ptr, - undefined, - node_datas[node].rhs, - null, - ), - else => return orelseCatchExpr( - gz, - scope, - ri, - node, - node_datas[node].lhs, - .is_non_null, - .optional_payload_unsafe, - undefined, - node_datas[node].rhs, - null, - ), - }, - - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - => return ptrType(gz, scope, ri, node, tree.fullPtrType(node).?), - - .container_decl, - .container_decl_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .container_decl_two, - .container_decl_two_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - => { - var buf: [2]Ast.Node.Index = undefined; - return containerDecl(gz, scope, ri, node, tree.fullContainerDecl(&buf, node).?); - }, - - .@"break" => return breakExpr(gz, scope, node), - .@"continue" => return continueExpr(gz, scope, node), - .grouped_expression => return expr(gz, scope, ri, node_datas[node].lhs), - .array_type => return arrayType(gz, scope, ri, node), - .array_type_sentinel => return arrayTypeSentinel(gz, scope, ri, node), - .char_literal => return charLiteral(gz, ri, node), - .error_set_decl => return errorSetDecl(gz, ri, node), - .array_access => return arrayAccess(gz, scope, ri, node), - .@"comptime" => return comptimeExprAst(gz, scope, ri, node), - .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node), - - .@"nosuspend" => return nosuspendExpr(gz, scope, ri, node), - .@"suspend" => return suspendExpr(gz, scope, node), - .@"await" => return awaitExpr(gz, scope, ri, node), - .@"resume" => return resumeExpr(gz, scope, ri, node), - - .@"try" => return tryExpr(gz, scope, ri, node, node_datas[node].lhs), - - .array_init_one, - .array_init_one_comma, - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .array_init, - .array_init_comma, - => { - var buf: [2]Ast.Node.Index = undefined; - return arrayInitExpr(gz, scope, ri, node, tree.fullArrayInit(&buf, node).?); - }, - - .struct_init_one, - .struct_init_one_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - .struct_init, - .struct_init_comma, - => { - var buf: [2]Ast.Node.Index = undefined; - return structInitExpr(gz, scope, ri, node, tree.fullStructInit(&buf, node).?); - }, - - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - => { - var buf: [1]Ast.Node.Index = undefined; - return fnProtoExpr(gz, scope, ri, node, tree.fullFnProto(&buf, node).?); - }, - } -} - -fn nosuspendExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const body_node = node_datas[node].lhs; - assert(body_node != 0); - if (gz.nosuspend_node != 0) { - try astgen.appendErrorNodeNotes(node, "redundant nosuspend block", .{}, &[_]u32{ - try astgen.errNoteNode(gz.nosuspend_node, "other nosuspend block here", .{}), - }); - } - gz.nosuspend_node = node; - defer gz.nosuspend_node = 0; - return expr(gz, scope, ri, body_node); -} - -fn suspendExpr( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const body_node = node_datas[node].lhs; - - if (gz.nosuspend_node != 0) { - return astgen.failNodeNotes(node, "suspend inside nosuspend block", .{}, &[_]u32{ - try astgen.errNoteNode(gz.nosuspend_node, "nosuspend block here", .{}), - }); - } - if (gz.suspend_node != 0) { - return astgen.failNodeNotes(node, "cannot suspend inside suspend block", .{}, &[_]u32{ - try astgen.errNoteNode(gz.suspend_node, "other suspend block here", .{}), - }); - } - assert(body_node != 0); - - const suspend_inst = try gz.makeBlockInst(.suspend_block, node); - try gz.instructions.append(gpa, suspend_inst); - - var suspend_scope = gz.makeSubBlock(scope); - suspend_scope.suspend_node = node; - defer suspend_scope.unstack(); - - const body_result = try expr(&suspend_scope, &suspend_scope.base, .{ .rl = .none }, body_node); - if (!gz.refIsNoReturn(body_result)) { - _ = try suspend_scope.addBreak(.break_inline, suspend_inst, .void_value); - } - try suspend_scope.setBlockBody(suspend_inst); - - return suspend_inst.toRef(); -} - -fn awaitExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const rhs_node = node_datas[node].lhs; - - if (gz.suspend_node != 0) { - return astgen.failNodeNotes(node, "cannot await inside suspend block", .{}, &[_]u32{ - try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), - }); - } - const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); - const result = if (gz.nosuspend_node != 0) - try gz.addExtendedPayload(.await_nosuspend, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }) - else - try gz.addUnNode(.@"await", operand, node); - - return rvalue(gz, ri, result, node); -} - -fn resumeExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const rhs_node = node_datas[node].lhs; - const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); - const result = try gz.addUnNode(.@"resume", operand, node); - return rvalue(gz, ri, result, node); -} - -fn fnProtoExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - fn_proto: Ast.full.FnProto, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - if (fn_proto.name_token) |some| { - return astgen.failTok(some, "function type cannot have a name", .{}); - } - - const is_extern = blk: { - const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; - break :blk token_tags[maybe_extern_token] == .keyword_extern; - }; - assert(!is_extern); - - var block_scope = gz.makeSubBlock(scope); - defer block_scope.unstack(); - - const block_inst = try gz.makeBlockInst(.block_inline, node); - - var noalias_bits: u32 = 0; - const is_var_args = is_var_args: { - var param_type_i: usize = 0; - var it = fn_proto.iterate(tree); - while (it.next()) |param| : (param_type_i += 1) { - const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { - .keyword_noalias => is_comptime: { - noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse - return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); - break :is_comptime false; - }, - .keyword_comptime => true, - else => false, - } else false; - - const is_anytype = if (param.anytype_ellipsis3) |token| blk: { - switch (token_tags[token]) { - .keyword_anytype => break :blk true, - .ellipsis3 => break :is_var_args true, - else => unreachable, - } - } else false; - - const param_name = if (param.name_token) |name_token| blk: { - if (mem.eql(u8, "_", tree.tokenSlice(name_token))) - break :blk .empty; - - break :blk try astgen.identAsString(name_token); - } else .empty; - - if (is_anytype) { - const name_token = param.name_token orelse param.anytype_ellipsis3.?; - - const tag: Zir.Inst.Tag = if (is_comptime) - .param_anytype_comptime - else - .param_anytype; - _ = try block_scope.addStrTok(tag, param_name, name_token); - } else { - const param_type_node = param.type_expr; - assert(param_type_node != 0); - var param_gz = block_scope.makeSubBlock(scope); - defer param_gz.unstack(); - const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); - const param_inst_expected: Zir.Inst.Index = @enumFromInt(astgen.instructions.len + 1); - _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); - const main_tokens = tree.nodes.items(.main_token); - const name_token = param.name_token orelse main_tokens[param_type_node]; - const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; - const param_inst = try block_scope.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); - assert(param_inst_expected == param_inst); - } - } - break :is_var_args false; - }; - - const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - break :inst try expr(&block_scope, scope, coerced_align_ri, fn_proto.ast.align_expr); - }; - - if (fn_proto.ast.addrspace_expr != 0) { - return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{}); - } - - if (fn_proto.ast.section_expr != 0) { - return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{}); - } - - const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0) - try expr( - &block_scope, - scope, - .{ .rl = .{ .coerced_ty = .calling_convention_type } }, - fn_proto.ast.callconv_expr, - ) - else - Zir.Inst.Ref.none; - - const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; - const is_inferred_error = token_tags[maybe_bang] == .bang; - if (is_inferred_error) { - return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); - } - const ret_ty = try expr(&block_scope, scope, coerced_type_ri, fn_proto.ast.return_type); - - const result = try block_scope.addFunc(.{ - .src_node = fn_proto.ast.proto_node, - - .cc_ref = cc, - .cc_gz = null, - .align_ref = align_ref, - .align_gz = null, - .ret_ref = ret_ty, - .ret_gz = null, - .section_ref = .none, - .section_gz = null, - .addrspace_ref = .none, - .addrspace_gz = null, - - .param_block = block_inst, - .body_gz = null, - .lib_name = .empty, - .is_var_args = is_var_args, - .is_inferred_error = false, - .is_test = false, - .is_extern = false, - .is_noinline = false, - .noalias_bits = noalias_bits, - }); - - _ = try block_scope.addBreak(.break_inline, block_inst, result); - try block_scope.setBlockBody(block_inst); - try gz.instructions.append(astgen.gpa, block_inst); - - return rvalue(gz, ri, block_inst.toRef(), fn_proto.ast.proto_node); -} - -fn arrayInitExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - array_init: Ast.full.ArrayInit, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - - assert(array_init.ast.elements.len != 0); // Otherwise it would be struct init. - - const array_ty: Zir.Inst.Ref, const elem_ty: Zir.Inst.Ref = inst: { - if (array_init.ast.type_expr == 0) break :inst .{ .none, .none }; - - infer: { - const array_type: Ast.full.ArrayType = tree.fullArrayType(array_init.ast.type_expr) orelse break :infer; - // This intentionally does not support `@"_"` syntax. - if (node_tags[array_type.ast.elem_count] == .identifier and - mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_")) - { - const len_inst = try gz.addInt(array_init.ast.elements.len); - const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); - if (array_type.ast.sentinel == 0) { - const array_type_inst = try gz.addPlNode(.array_type, array_init.ast.type_expr, Zir.Inst.Bin{ - .lhs = len_inst, - .rhs = elem_type, - }); - break :inst .{ array_type_inst, elem_type }; - } else { - const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); - const array_type_inst = try gz.addPlNode( - .array_type_sentinel, - array_init.ast.type_expr, - Zir.Inst.ArrayTypeSentinel{ - .len = len_inst, - .elem_type = elem_type, - .sentinel = sentinel, - }, - ); - break :inst .{ array_type_inst, elem_type }; - } - } - } - const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); - _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{ - .ty = array_type_inst, - .init_count = @intCast(array_init.ast.elements.len), - }); - break :inst .{ array_type_inst, .none }; - }; - - if (array_ty != .none) { - // Typed inits do not use RLS for language simplicity. - switch (ri.rl) { - .discard => { - if (elem_ty != .none) { - const elem_ri: ResultInfo = .{ .rl = .{ .ty = elem_ty } }; - for (array_init.ast.elements) |elem_init| { - _ = try expr(gz, scope, elem_ri, elem_init); - } - } else { - for (array_init.ast.elements, 0..) |elem_init, i| { - const this_elem_ty = try gz.add(.{ - .tag = .array_init_elem_type, - .data = .{ .bin = .{ - .lhs = array_ty, - .rhs = @enumFromInt(i), - } }, - }); - _ = try expr(gz, scope, .{ .rl = .{ .ty = this_elem_ty } }, elem_init); - } - } - return .void_value; - }, - .ref => return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, array_ty, elem_ty, true), - else => { - const array_inst = try arrayInitExprTyped(gz, scope, node, array_init.ast.elements, array_ty, elem_ty, false); - return rvalue(gz, ri, array_inst, node); - }, - } - } - - switch (ri.rl) { - .none => return arrayInitExprAnon(gz, scope, node, array_init.ast.elements), - .discard => { - for (array_init.ast.elements) |elem_init| { - _ = try expr(gz, scope, .{ .rl = .discard }, elem_init); - } - return Zir.Inst.Ref.void_value; - }, - .ref => { - const result = try arrayInitExprAnon(gz, scope, node, array_init.ast.elements); - return gz.addUnTok(.ref, result, tree.firstToken(node)); - }, - .ref_coerced_ty => |ptr_ty_inst| { - const dest_arr_ty_inst = try gz.addPlNode(.validate_array_init_ref_ty, node, Zir.Inst.ArrayInitRefTy{ - .ptr_ty = ptr_ty_inst, - .elem_count = @intCast(array_init.ast.elements.len), - }); - return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, dest_arr_ty_inst, .none, true); - }, - .ty, .coerced_ty => |result_ty_inst| { - _ = try gz.addPlNode(.validate_array_init_result_ty, node, Zir.Inst.ArrayInit{ - .ty = result_ty_inst, - .init_count = @intCast(array_init.ast.elements.len), - }); - return arrayInitExprTyped(gz, scope, node, array_init.ast.elements, result_ty_inst, .none, false); - }, - .ptr => |ptr| { - try arrayInitExprPtr(gz, scope, node, array_init.ast.elements, ptr.inst); - return .void_value; - }, - .inferred_ptr => { - // We can't get elem pointers of an untyped inferred alloc, so must perform a - // standard anonymous initialization followed by an rvalue store. - // See corresponding logic in structInitExpr. - const result = try arrayInitExprAnon(gz, scope, node, array_init.ast.elements); - return rvalue(gz, ri, result, node); - }, - .destructure => |destructure| { - // Untyped init - destructure directly into result pointers - if (array_init.ast.elements.len != destructure.components.len) { - return astgen.failNodeNotes(node, "expected {} elements for destructure, found {}", .{ - destructure.components.len, - array_init.ast.elements.len, - }, &.{ - try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), - }); - } - for (array_init.ast.elements, destructure.components) |elem_init, ds_comp| { - const elem_ri: ResultInfo = .{ .rl = switch (ds_comp) { - .typed_ptr => |ptr_rl| .{ .ptr = ptr_rl }, - .inferred_ptr => |ptr_inst| .{ .inferred_ptr = ptr_inst }, - .discard => .discard, - } }; - _ = try expr(gz, scope, elem_ri, elem_init); - } - return .void_value; - }, - } -} - -/// An array initialization expression using an `array_init_anon` instruction. -fn arrayInitExprAnon( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - elements: []const Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - - const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(elements.len), - }); - var extra_index = try reserveExtra(astgen, elements.len); - - for (elements) |elem_init| { - const elem_ref = try expr(gz, scope, .{ .rl = .none }, elem_init); - astgen.extra.items[extra_index] = @intFromEnum(elem_ref); - extra_index += 1; - } - return try gz.addPlNodePayloadIndex(.array_init_anon, node, payload_index); -} - -/// An array initialization expression using an `array_init` or `array_init_ref` instruction. -fn arrayInitExprTyped( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - elements: []const Ast.Node.Index, - ty_inst: Zir.Inst.Ref, - maybe_elem_ty_inst: Zir.Inst.Ref, - is_ref: bool, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - - const len = elements.len + 1; // +1 for type - const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ - .operands_len = @intCast(len), - }); - var extra_index = try reserveExtra(astgen, len); - astgen.extra.items[extra_index] = @intFromEnum(ty_inst); - extra_index += 1; - - if (maybe_elem_ty_inst != .none) { - const elem_ri: ResultInfo = .{ .rl = .{ .coerced_ty = maybe_elem_ty_inst } }; - for (elements) |elem_init| { - const elem_inst = try expr(gz, scope, elem_ri, elem_init); - astgen.extra.items[extra_index] = @intFromEnum(elem_inst); - extra_index += 1; - } - } else { - for (elements, 0..) |elem_init, i| { - const ri: ResultInfo = .{ .rl = .{ .coerced_ty = try gz.add(.{ - .tag = .array_init_elem_type, - .data = .{ .bin = .{ - .lhs = ty_inst, - .rhs = @enumFromInt(i), - } }, - }) } }; - - const elem_inst = try expr(gz, scope, ri, elem_init); - astgen.extra.items[extra_index] = @intFromEnum(elem_inst); - extra_index += 1; - } - } - - const tag: Zir.Inst.Tag = if (is_ref) .array_init_ref else .array_init; - return try gz.addPlNodePayloadIndex(tag, node, payload_index); -} - -/// An array initialization expression using element pointers. -fn arrayInitExprPtr( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - elements: []const Ast.Node.Index, - ptr_inst: Zir.Inst.Ref, -) InnerError!void { - const astgen = gz.astgen; - - const array_ptr_inst = try gz.addUnNode(.opt_eu_base_ptr_init, ptr_inst, node); - - const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(elements.len), - }); - var extra_index = try reserveExtra(astgen, elements.len); - - for (elements, 0..) |elem_init, i| { - const elem_ptr_inst = try gz.addPlNode(.array_init_elem_ptr, elem_init, Zir.Inst.ElemPtrImm{ - .ptr = array_ptr_inst, - .index = @intCast(i), - }); - astgen.extra.items[extra_index] = @intFromEnum(elem_ptr_inst.toIndex().?); - extra_index += 1; - _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = elem_ptr_inst } } }, elem_init); - } - - _ = try gz.addPlNodePayloadIndex(.validate_ptr_array_init, node, payload_index); -} - -fn structInitExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - struct_init: Ast.full.StructInit, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - - if (struct_init.ast.type_expr == 0) { - if (struct_init.ast.fields.len == 0) { - // Anonymous init with no fields. - switch (ri.rl) { - .discard => return .void_value, - .ref_coerced_ty => |ptr_ty_inst| return gz.addUnNode(.struct_init_empty_ref_result, ptr_ty_inst, node), - .ty, .coerced_ty => |ty_inst| return gz.addUnNode(.struct_init_empty_result, ty_inst, node), - .ptr => { - // TODO: should we modify this to use RLS for the field stores here? - const ty_inst = (try ri.rl.resultType(gz, node)).?; - const val = try gz.addUnNode(.struct_init_empty_result, ty_inst, node); - return rvalue(gz, ri, val, node); - }, - .none, .ref, .inferred_ptr => { - return rvalue(gz, ri, .empty_struct, node); - }, - .destructure => |destructure| { - return astgen.failNodeNotes(node, "empty initializer cannot be destructured", .{}, &.{ - try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), - }); - }, - } - } - } else array: { - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - const array_type: Ast.full.ArrayType = tree.fullArrayType(struct_init.ast.type_expr) orelse { - if (struct_init.ast.fields.len == 0) { - const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); - const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); - return rvalue(gz, ri, result, node); - } - break :array; - }; - const is_inferred_array_len = node_tags[array_type.ast.elem_count] == .identifier and - // This intentionally does not support `@"_"` syntax. - mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_"); - if (struct_init.ast.fields.len == 0) { - if (is_inferred_array_len) { - const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); - const array_type_inst = if (array_type.ast.sentinel == 0) blk: { - break :blk try gz.addPlNode(.array_type, struct_init.ast.type_expr, Zir.Inst.Bin{ - .lhs = .zero_usize, - .rhs = elem_type, - }); - } else blk: { - const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); - break :blk try gz.addPlNode( - .array_type_sentinel, - struct_init.ast.type_expr, - Zir.Inst.ArrayTypeSentinel{ - .len = .zero_usize, - .elem_type = elem_type, - .sentinel = sentinel, - }, - ); - }; - const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node); - return rvalue(gz, ri, result, node); - } - const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); - const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); - return rvalue(gz, ri, result, node); - } else { - return astgen.failNode( - struct_init.ast.type_expr, - "initializing array with struct syntax", - .{}, - ); - } - } - - { - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); - - var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); - try duplicate_names.ensureTotalCapacity(@intCast(struct_init.ast.fields.len)); - - // When there aren't errors, use this to avoid a second iteration. - var any_duplicate = false; - - for (struct_init.ast.fields) |field| { - const name_token = tree.firstToken(field) - 2; - const name_index = try astgen.identAsString(name_token); - - const gop = try duplicate_names.getOrPut(name_index); - - if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, name_token); - any_duplicate = true; - } else { - gop.value_ptr.* = .{}; - try gop.value_ptr.append(sfba_allocator, name_token); - } - } - - if (any_duplicate) { - var it = duplicate_names.iterator(); - - while (it.next()) |entry| { - const record = entry.value_ptr.*; - if (record.items.len > 1) { - var error_notes = std.ArrayList(u32).init(astgen.arena); - - for (record.items[1..]) |duplicate| { - try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate name here", .{})); - } - - try error_notes.append(try astgen.errNoteNode(node, "struct declared here", .{})); - - try astgen.appendErrorTokNotes( - record.items[0], - "duplicate struct field name", - .{}, - error_notes.items, - ); - } - } - - return error.AnalysisFail; - } - } - - if (struct_init.ast.type_expr != 0) { - // Typed inits do not use RLS for language simplicity. - const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); - _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); - switch (ri.rl) { - .ref => return structInitExprTyped(gz, scope, node, struct_init, ty_inst, true), - else => { - const struct_inst = try structInitExprTyped(gz, scope, node, struct_init, ty_inst, false); - return rvalue(gz, ri, struct_inst, node); - }, - } - } - - switch (ri.rl) { - .none => return structInitExprAnon(gz, scope, node, struct_init), - .discard => { - // Even if discarding we must perform side-effects. - for (struct_init.ast.fields) |field_init| { - _ = try expr(gz, scope, .{ .rl = .discard }, field_init); - } - return .void_value; - }, - .ref => { - const result = try structInitExprAnon(gz, scope, node, struct_init); - return gz.addUnTok(.ref, result, tree.firstToken(node)); - }, - .ref_coerced_ty => |ptr_ty_inst| { - const result_ty_inst = try gz.addUnNode(.elem_type, ptr_ty_inst, node); - _ = try gz.addUnNode(.validate_struct_init_result_ty, result_ty_inst, node); - return structInitExprTyped(gz, scope, node, struct_init, result_ty_inst, true); - }, - .ty, .coerced_ty => |result_ty_inst| { - _ = try gz.addUnNode(.validate_struct_init_result_ty, result_ty_inst, node); - return structInitExprTyped(gz, scope, node, struct_init, result_ty_inst, false); - }, - .ptr => |ptr| { - try structInitExprPtr(gz, scope, node, struct_init, ptr.inst); - return .void_value; - }, - .inferred_ptr => { - // We can't get field pointers of an untyped inferred alloc, so must perform a - // standard anonymous initialization followed by an rvalue store. - // See corresponding logic in arrayInitExpr. - const struct_inst = try structInitExprAnon(gz, scope, node, struct_init); - return rvalue(gz, ri, struct_inst, node); - }, - .destructure => |destructure| { - // This is an untyped init, so is an actual struct, which does - // not support destructuring. - return astgen.failNodeNotes(node, "struct value cannot be destructured", .{}, &.{ - try astgen.errNoteNode(destructure.src_node, "result destructured here", .{}), - }); - }, - } -} - -/// A struct initialization expression using a `struct_init_anon` instruction. -fn structInitExprAnon( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - struct_init: Ast.full.StructInit, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - - const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ - .fields_len = @intCast(struct_init.ast.fields.len), - }); - const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len; - var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); - - for (struct_init.ast.fields) |field_init| { - const name_token = tree.firstToken(field_init) - 2; - const str_index = try astgen.identAsString(name_token); - setExtra(astgen, extra_index, Zir.Inst.StructInitAnon.Item{ - .field_name = str_index, - .init = try expr(gz, scope, .{ .rl = .none }, field_init), - }); - extra_index += field_size; - } - - return gz.addPlNodePayloadIndex(.struct_init_anon, node, payload_index); -} - -/// A struct initialization expression using a `struct_init` or `struct_init_ref` instruction. -fn structInitExprTyped( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - struct_init: Ast.full.StructInit, - ty_inst: Zir.Inst.Ref, - is_ref: bool, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - - const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ - .fields_len = @intCast(struct_init.ast.fields.len), - }); - const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len; - var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); - - for (struct_init.ast.fields) |field_init| { - const name_token = tree.firstToken(field_init) - 2; - const str_index = try astgen.identAsString(name_token); - const field_ty_inst = try gz.addPlNode(.struct_init_field_type, field_init, Zir.Inst.FieldType{ - .container_type = ty_inst, - .name_start = str_index, - }); - setExtra(astgen, extra_index, Zir.Inst.StructInit.Item{ - .field_type = field_ty_inst.toIndex().?, - .init = try expr(gz, scope, .{ .rl = .{ .coerced_ty = field_ty_inst } }, field_init), - }); - extra_index += field_size; - } - - const tag: Zir.Inst.Tag = if (is_ref) .struct_init_ref else .struct_init; - return gz.addPlNodePayloadIndex(tag, node, payload_index); -} - -/// A struct initialization expression using field pointers. -fn structInitExprPtr( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - struct_init: Ast.full.StructInit, - ptr_inst: Zir.Inst.Ref, -) InnerError!void { - const astgen = gz.astgen; - const tree = astgen.tree; - - const struct_ptr_inst = try gz.addUnNode(.opt_eu_base_ptr_init, ptr_inst, node); - - const payload_index = try addExtra(astgen, Zir.Inst.Block{ - .body_len = @intCast(struct_init.ast.fields.len), - }); - var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len); - - for (struct_init.ast.fields) |field_init| { - const name_token = tree.firstToken(field_init) - 2; - const str_index = try astgen.identAsString(name_token); - const field_ptr = try gz.addPlNode(.struct_init_field_ptr, field_init, Zir.Inst.Field{ - .lhs = struct_ptr_inst, - .field_name_start = str_index, - }); - astgen.extra.items[extra_index] = @intFromEnum(field_ptr.toIndex().?); - extra_index += 1; - _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = field_ptr } } }, field_init); - } - - _ = try gz.addPlNodePayloadIndex(.validate_ptr_struct_init, node, payload_index); -} - -/// This explicitly calls expr in a comptime scope by wrapping it in a `block_comptime` if -/// necessary. It should be used whenever we need to force compile-time evaluation of something, -/// such as a type. -/// The function corresponding to `comptime` expression syntax is `comptimeExprAst`. -fn comptimeExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - if (gz.is_comptime) { - // No need to change anything! - return expr(gz, scope, ri, node); - } - - // There's an optimization here: if the body will be evaluated at comptime regardless, there's - // no need to wrap it in a block. This is hard to determine in general, but we can identify a - // common subset of trivially comptime expressions to take down the size of the ZIR a bit. - const tree = gz.astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const node_tags = tree.nodes.items(.tag); - switch (node_tags[node]) { - // Any identifier in `primitive_instrs` is trivially comptime. In particular, this includes - // some common types, so we can elide `block_comptime` for a few common type annotations. - .identifier => { - const ident_token = main_tokens[node]; - const ident_name_raw = tree.tokenSlice(ident_token); - if (primitive_instrs.get(ident_name_raw)) |zir_const_ref| { - // No need to worry about result location here, we're not creating a comptime block! - return rvalue(gz, ri, zir_const_ref, node); - } - }, - - // We can also avoid the block for a few trivial AST tags which are always comptime-known. - .number_literal, .string_literal, .multiline_string_literal, .enum_literal, .error_value => { - // No need to worry about result location here, we're not creating a comptime block! - return expr(gz, scope, ri, node); - }, - - // Lastly, for labelled blocks, avoid emitting a labelled block directly inside this - // comptime block, because that would be silly! Note that we don't bother doing this for - // unlabelled blocks, since they don't generate blocks at comptime anyway (see `blockExpr`). - .block_two, .block_two_semicolon, .block, .block_semicolon => { - const token_tags = tree.tokens.items(.tag); - const lbrace = main_tokens[node]; - // Careful! We can't pass in the real result location here, since it may - // refer to runtime memory. A runtime-to-comptime boundary has to remove - // result location information, compute the result, and copy it to the true - // result location at runtime. We do this below as well. - const ty_only_ri: ResultInfo = .{ - .ctx = ri.ctx, - .rl = if (try ri.rl.resultType(gz, node)) |res_ty| - .{ .coerced_ty = res_ty } - else - .none, - }; - if (token_tags[lbrace - 1] == .colon and - token_tags[lbrace - 2] == .identifier) - { - const node_datas = tree.nodes.items(.data); - switch (node_tags[node]) { - .block_two, .block_two_semicolon => { - const stmts: [2]Ast.Node.Index = .{ node_datas[node].lhs, node_datas[node].rhs }; - const stmt_slice = if (stmts[0] == 0) - stmts[0..0] - else if (stmts[1] == 0) - stmts[0..1] - else - stmts[0..2]; - - const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmt_slice, true); - return rvalue(gz, ri, block_ref, node); - }, - .block, .block_semicolon => { - const stmts = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; - // Replace result location and copy back later - see above. - const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmts, true); - return rvalue(gz, ri, block_ref, node); - }, - else => unreachable, - } - } - }, - - // In other cases, we don't optimize anything - we need a wrapper comptime block. - else => {}, - } - - var block_scope = gz.makeSubBlock(scope); - block_scope.is_comptime = true; - defer block_scope.unstack(); - - const block_inst = try gz.makeBlockInst(.block_comptime, node); - // Replace result location and copy back later - see above. - const ty_only_ri: ResultInfo = .{ - .ctx = ri.ctx, - .rl = if (try ri.rl.resultType(gz, node)) |res_ty| - .{ .coerced_ty = res_ty } - else - .none, - }; - const block_result = try expr(&block_scope, scope, ty_only_ri, node); - if (!gz.refIsNoReturn(block_result)) { - _ = try block_scope.addBreak(.@"break", block_inst, block_result); - } - try block_scope.setBlockBody(block_inst); - try gz.instructions.append(gz.astgen.gpa, block_inst); - - return rvalue(gz, ri, block_inst.toRef(), node); -} - -/// This one is for an actual `comptime` syntax, and will emit a compile error if -/// the scope is already known to be comptime-evaluated. -/// See `comptimeExpr` for the helper function for calling expr in a comptime scope. -fn comptimeExprAst( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - if (gz.is_comptime) { - return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); - } - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const body_node = node_datas[node].lhs; - return comptimeExpr(gz, scope, ri, body_node); -} - -/// Restore the error return trace index. Performs the restore only if the result is a non-error or -/// if the result location is a non-error-handling expression. -fn restoreErrRetIndex( - gz: *GenZir, - bt: GenZir.BranchTarget, - ri: ResultInfo, - node: Ast.Node.Index, - result: Zir.Inst.Ref, -) !void { - const op = switch (nodeMayEvalToError(gz.astgen.tree, node)) { - .always => return, // never restore/pop - .never => .none, // always restore/pop - .maybe => switch (ri.ctx) { - .error_handling_expr, .@"return", .fn_arg, .const_init => switch (ri.rl) { - .ptr => |ptr_res| try gz.addUnNode(.load, ptr_res.inst, node), - .inferred_ptr => blk: { - // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr - // before its type has been resolved. There is no valid operand to use here, so error - // traces will be popped prematurely. - // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. - break :blk .none; - }, - .destructure => return, // value must be a tuple or array, so never restore/pop - else => result, - }, - else => .none, // always restore/pop - }, - }; - _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }, node); -} - -fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const break_label = node_datas[node].lhs; - const rhs = node_datas[node].rhs; - - // Look for the label in the scope. - var scope = parent_scope; - while (true) { - switch (scope.tag) { - .gen_zir => { - const block_gz = scope.cast(GenZir).?; - - if (block_gz.cur_defer_node != 0) { - // We are breaking out of a `defer` block. - return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{ - try astgen.errNoteNode( - block_gz.cur_defer_node, - "defer expression here", - .{}, - ), - }); - } - - const block_inst = blk: { - if (break_label != 0) { - if (block_gz.label) |*label| { - if (try astgen.tokenIdentEql(label.token, break_label)) { - label.used = true; - break :blk label.block_inst; - } - } - } else if (block_gz.break_block.unwrap()) |i| { - break :blk i; - } - // If not the target, start over with the parent - scope = block_gz.parent; - continue; - }; - // If we made it here, this block is the target of the break expr - - const break_tag: Zir.Inst.Tag = if (block_gz.is_inline) - .break_inline - else - .@"break"; - - if (rhs == 0) { - _ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node); - - try genDefers(parent_gz, scope, parent_scope, .normal_only); - - // As our last action before the break, "pop" the error trace if needed - if (!block_gz.is_comptime) - _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, node); - - _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); - return Zir.Inst.Ref.unreachable_value; - } - - const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); - - try genDefers(parent_gz, scope, parent_scope, .normal_only); - - // As our last action before the break, "pop" the error trace if needed - if (!block_gz.is_comptime) - try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand); - - switch (block_gz.break_result_info.rl) { - .ptr => { - // In this case we don't have any mechanism to intercept it; - // we assume the result location is written, and we break with void. - _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); - }, - .discard => { - _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); - }, - else => { - _ = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs); - }, - } - return Zir.Inst.Ref.unreachable_value; - }, - .local_val => scope = scope.cast(Scope.LocalVal).?.parent, - .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .namespace, .enum_namespace => break, - .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, - .top => unreachable, - } - } - if (break_label != 0) { - const label_name = try astgen.identifierTokenString(break_label); - return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); - } else { - return astgen.failNode(node, "break expression outside loop", .{}); - } -} - -fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const break_label = node_datas[node].lhs; - - // Look for the label in the scope. - var scope = parent_scope; - while (true) { - switch (scope.tag) { - .gen_zir => { - const gen_zir = scope.cast(GenZir).?; - - if (gen_zir.cur_defer_node != 0) { - return astgen.failNodeNotes(node, "cannot continue out of defer expression", .{}, &.{ - try astgen.errNoteNode( - gen_zir.cur_defer_node, - "defer expression here", - .{}, - ), - }); - } - const continue_block = gen_zir.continue_block.unwrap() orelse { - scope = gen_zir.parent; - continue; - }; - if (break_label != 0) blk: { - if (gen_zir.label) |*label| { - if (try astgen.tokenIdentEql(label.token, break_label)) { - label.used = true; - break :blk; - } - } - // found continue but either it has a different label, or no label - scope = gen_zir.parent; - continue; - } - - const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline) - .break_inline - else - .@"break"; - if (break_tag == .break_inline) { - _ = try parent_gz.addUnNode(.check_comptime_control_flow, continue_block.toRef(), node); - } - - // As our last action before the continue, "pop" the error trace if needed - if (!gen_zir.is_comptime) - _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node); - - _ = try parent_gz.addBreak(break_tag, continue_block, .void_value); - return Zir.Inst.Ref.unreachable_value; - }, - .local_val => scope = scope.cast(Scope.LocalVal).?.parent, - .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .defer_normal => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - try parent_gz.addDefer(defer_scope.index, defer_scope.len); - }, - .defer_error => scope = scope.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => break, - .top => unreachable, - } - } - if (break_label != 0) { - const label_name = try astgen.identifierTokenString(break_label); - return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); - } else { - return astgen.failNode(node, "continue expression outside loop", .{}); - } -} - -fn blockExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - block_node: Ast.Node.Index, - statements: []const Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - const lbrace = main_tokens[block_node]; - if (token_tags[lbrace - 1] == .colon and - token_tags[lbrace - 2] == .identifier) - { - return labeledBlockExpr(gz, scope, ri, block_node, statements, false); - } - - if (!gz.is_comptime) { - // Since this block is unlabeled, its control flow is effectively linear and we - // can *almost* get away with inlining the block here. However, we actually need - // to preserve the .block for Sema, to properly pop the error return trace. - - const block_tag: Zir.Inst.Tag = .block; - const block_inst = try gz.makeBlockInst(block_tag, block_node); - try gz.instructions.append(astgen.gpa, block_inst); - - var block_scope = gz.makeSubBlock(scope); - defer block_scope.unstack(); - - try blockExprStmts(&block_scope, &block_scope.base, statements); - - if (!block_scope.endsWithNoReturn()) { - // As our last action before the break, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); - _ = try block_scope.addBreak(.@"break", block_inst, .void_value); - } - - try block_scope.setBlockBody(block_inst); - } else { - var sub_gz = gz.makeSubBlock(scope); - try blockExprStmts(&sub_gz, &sub_gz.base, statements); - } - - return rvalue(gz, ri, .void_value, block_node); -} - -fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void { - // Look for the label in the scope. - var scope = parent_scope; - while (true) { - switch (scope.tag) { - .gen_zir => { - const gen_zir = scope.cast(GenZir).?; - if (gen_zir.label) |prev_label| { - if (try astgen.tokenIdentEql(label, prev_label.token)) { - const label_name = try astgen.identifierTokenString(label); - return astgen.failTokNotes(label, "redefinition of label '{s}'", .{ - label_name, - }, &[_]u32{ - try astgen.errNoteTok( - prev_label.token, - "previous definition here", - .{}, - ), - }); - } - } - scope = gen_zir.parent; - }, - .local_val => scope = scope.cast(Scope.LocalVal).?.parent, - .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => break, - .top => unreachable, - } - } -} - -fn labeledBlockExpr( - gz: *GenZir, - parent_scope: *Scope, - ri: ResultInfo, - block_node: Ast.Node.Index, - statements: []const Ast.Node.Index, - force_comptime: bool, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - const lbrace = main_tokens[block_node]; - const label_token = lbrace - 2; - assert(token_tags[label_token] == .identifier); - - try astgen.checkLabelRedefinition(parent_scope, label_token); - - const need_rl = astgen.nodes_need_rl.contains(block_node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(gz, block_node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - // Reserve the Block ZIR instruction index so that we can put it into the GenZir struct - // so that break statements can reference it. - const block_tag: Zir.Inst.Tag = if (force_comptime) .block_comptime else .block; - const block_inst = try gz.makeBlockInst(block_tag, block_node); - try gz.instructions.append(astgen.gpa, block_inst); - var block_scope = gz.makeSubBlock(parent_scope); - block_scope.label = GenZir.Label{ - .token = label_token, - .block_inst = block_inst, - }; - block_scope.setBreakResultInfo(block_ri); - if (force_comptime) block_scope.is_comptime = true; - defer block_scope.unstack(); - - try blockExprStmts(&block_scope, &block_scope.base, statements); - if (!block_scope.endsWithNoReturn()) { - // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, block_node); - _ = try block_scope.addBreak(.@"break", block_inst, .void_value); - } - - if (!block_scope.label.?.used) { - try astgen.appendErrorTok(label_token, "unused block label", .{}); - } - - try block_scope.setBlockBody(block_inst); - if (need_result_rvalue) { - return rvalue(gz, ri, block_inst.toRef(), block_node); - } else { - return block_inst.toRef(); - } -} - -fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Node.Index) !void { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const node_data = tree.nodes.items(.data); - - if (statements.len == 0) return; - - var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); - defer block_arena.deinit(); - const block_arena_allocator = block_arena.allocator(); - - var noreturn_src_node: Ast.Node.Index = 0; - var scope = parent_scope; - for (statements) |statement| { - if (noreturn_src_node != 0) { - try astgen.appendErrorNodeNotes( - statement, - "unreachable code", - .{}, - &[_]u32{ - try astgen.errNoteNode( - noreturn_src_node, - "control flow is diverted here", - .{}, - ), - }, - ); - } - var inner_node = statement; - while (true) { - switch (node_tags[inner_node]) { - // zig fmt: off - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.fullVarDecl(statement).?), - - .assign_destructure => scope = try assignDestructureMaybeDecls(gz, scope, statement, block_arena_allocator), - - .@"defer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_normal), - .@"errdefer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_error), - - .assign => try assign(gz, scope, statement), - - .assign_shl => try assignShift(gz, scope, statement, .shl), - .assign_shr => try assignShift(gz, scope, statement, .shr), - - .assign_bit_and => try assignOp(gz, scope, statement, .bit_and), - .assign_bit_or => try assignOp(gz, scope, statement, .bit_or), - .assign_bit_xor => try assignOp(gz, scope, statement, .xor), - .assign_div => try assignOp(gz, scope, statement, .div), - .assign_sub => try assignOp(gz, scope, statement, .sub), - .assign_sub_wrap => try assignOp(gz, scope, statement, .subwrap), - .assign_mod => try assignOp(gz, scope, statement, .mod_rem), - .assign_add => try assignOp(gz, scope, statement, .add), - .assign_add_wrap => try assignOp(gz, scope, statement, .addwrap), - .assign_mul => try assignOp(gz, scope, statement, .mul), - .assign_mul_wrap => try assignOp(gz, scope, statement, .mulwrap), - - .grouped_expression => { - inner_node = node_data[statement].lhs; - continue; - }, - - .while_simple, - .while_cont, - .@"while", => _ = try whileExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullWhile(inner_node).?, true), - - .for_simple, - .@"for", => _ = try forExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullFor(inner_node).?, true), - - else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node), - // zig fmt: on - } - break; - } - } - - try genDefers(gz, parent_scope, scope, .normal_only); - try checkUsed(gz, parent_scope, scope); -} - -/// Returns AST source node of the thing that is noreturn if the statement is -/// definitely `noreturn`. Otherwise returns 0. -fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) InnerError!Ast.Node.Index { - try emitDbgNode(gz, statement); - // We need to emit an error if the result is not `noreturn` or `void`, but - // we want to avoid adding the ZIR instruction if possible for performance. - const maybe_unused_result = try expr(gz, scope, .{ .rl = .none }, statement); - return addEnsureResult(gz, maybe_unused_result, statement); -} - -fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: Ast.Node.Index) InnerError!Ast.Node.Index { - var noreturn_src_node: Ast.Node.Index = 0; - const elide_check = if (maybe_unused_result.toIndex()) |inst| b: { - // Note that this array becomes invalid after appending more items to it - // in the above while loop. - const zir_tags = gz.astgen.instructions.items(.tag); - switch (zir_tags[@intFromEnum(inst)]) { - // For some instructions, modify the zir data - // so we can avoid a separate ensure_result_used instruction. - .call, .field_call => { - const break_extra = gz.astgen.instructions.items(.data)[@intFromEnum(inst)].pl_node.payload_index; - comptime assert(std.meta.fieldIndex(Zir.Inst.Call, "flags") == - std.meta.fieldIndex(Zir.Inst.FieldCall, "flags")); - const flags: *Zir.Inst.Call.Flags = @ptrCast(&gz.astgen.extra.items[ - break_extra + std.meta.fieldIndex(Zir.Inst.Call, "flags").? - ]); - flags.ensure_result_used = true; - break :b true; - }, - .builtin_call => { - const break_extra = gz.astgen.instructions.items(.data)[@intFromEnum(inst)].pl_node.payload_index; - const flags: *Zir.Inst.BuiltinCall.Flags = @ptrCast(&gz.astgen.extra.items[ - break_extra + std.meta.fieldIndex(Zir.Inst.BuiltinCall, "flags").? - ]); - flags.ensure_result_used = true; - break :b true; - }, - - // ZIR instructions that might be a type other than `noreturn` or `void`. - .add, - .addwrap, - .add_sat, - .add_unsafe, - .param, - .param_comptime, - .param_anytype, - .param_anytype_comptime, - .alloc, - .alloc_mut, - .alloc_comptime_mut, - .alloc_inferred, - .alloc_inferred_mut, - .alloc_inferred_comptime, - .alloc_inferred_comptime_mut, - .make_ptr_const, - .array_cat, - .array_mul, - .array_type, - .array_type_sentinel, - .elem_type, - .indexable_ptr_elem_type, - .vector_elem_type, - .vector_type, - .indexable_ptr_len, - .anyframe_type, - .as_node, - .as_shift_operand, - .bit_and, - .bitcast, - .bit_or, - .block, - .block_comptime, - .block_inline, - .declaration, - .suspend_block, - .loop, - .bool_br_and, - .bool_br_or, - .bool_not, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .decl_ref, - .decl_val, - .load, - .div, - .elem_ptr, - .elem_val, - .elem_ptr_node, - .elem_val_node, - .elem_val_imm, - .field_ptr, - .field_val, - .field_ptr_named, - .field_val_named, - .func, - .func_inferred, - .func_fancy, - .int, - .int_big, - .float, - .float128, - .int_type, - .is_non_null, - .is_non_null_ptr, - .is_non_err, - .is_non_err_ptr, - .ret_is_non_err, - .mod_rem, - .mul, - .mulwrap, - .mul_sat, - .ref, - .shl, - .shl_sat, - .shr, - .str, - .sub, - .subwrap, - .sub_sat, - .negate, - .negate_wrap, - .typeof, - .typeof_builtin, - .xor, - .optional_type, - .optional_payload_safe, - .optional_payload_unsafe, - .optional_payload_safe_ptr, - .optional_payload_unsafe_ptr, - .err_union_payload_unsafe, - .err_union_payload_unsafe_ptr, - .err_union_code, - .err_union_code_ptr, - .ptr_type, - .enum_literal, - .merge_error_sets, - .error_union_type, - .bit_not, - .error_value, - .slice_start, - .slice_end, - .slice_sentinel, - .slice_length, - .import, - .switch_block, - .switch_block_ref, - .switch_block_err_union, - .union_init, - .field_type_ref, - .error_set_decl, - .error_set_decl_anon, - .error_set_decl_func, - .enum_from_int, - .int_from_enum, - .type_info, - .size_of, - .bit_size_of, - .typeof_log2_int_type, - .int_from_ptr, - .align_of, - .int_from_bool, - .embed_file, - .error_name, - .sqrt, - .sin, - .cos, - .tan, - .exp, - .exp2, - .log, - .log2, - .log10, - .abs, - .floor, - .ceil, - .trunc, - .round, - .tag_name, - .type_name, - .frame_type, - .frame_size, - .int_from_float, - .float_from_int, - .ptr_from_int, - .float_cast, - .int_cast, - .ptr_cast, - .truncate, - .has_decl, - .has_field, - .clz, - .ctz, - .pop_count, - .byte_swap, - .bit_reverse, - .div_exact, - .div_floor, - .div_trunc, - .mod, - .rem, - .shl_exact, - .shr_exact, - .bit_offset_of, - .offset_of, - .splat, - .reduce, - .shuffle, - .atomic_load, - .atomic_rmw, - .mul_add, - .field_parent_ptr, - .max, - .min, - .c_import, - .@"resume", - .@"await", - .ret_err_value_code, - .closure_get, - .ret_ptr, - .ret_type, - .for_len, - .@"try", - .try_ptr, - .opt_eu_base_ptr_init, - .coerce_ptr_elem_ty, - .struct_init_empty, - .struct_init_empty_result, - .struct_init_empty_ref_result, - .struct_init_anon, - .struct_init, - .struct_init_ref, - .struct_init_field_type, - .struct_init_field_ptr, - .array_init_anon, - .array_init, - .array_init_ref, - .validate_array_init_ref_ty, - .array_init_elem_type, - .array_init_elem_ptr, - => break :b false, - - .extended => switch (gz.astgen.instructions.items(.data)[@intFromEnum(inst)].extended.opcode) { - .breakpoint, - .fence, - .set_float_mode, - .set_align_stack, - .set_cold, - => break :b true, - else => break :b false, - }, - - // ZIR instructions that are always `noreturn`. - .@"break", - .break_inline, - .condbr, - .condbr_inline, - .compile_error, - .ret_node, - .ret_load, - .ret_implicit, - .ret_err_value, - .@"unreachable", - .repeat, - .repeat_inline, - .panic, - .trap, - .check_comptime_control_flow, - => { - noreturn_src_node = statement; - break :b true; - }, - - // ZIR instructions that are always `void`. - .dbg_stmt, - .dbg_var_ptr, - .dbg_var_val, - .ensure_result_used, - .ensure_result_non_error, - .ensure_err_union_payload_void, - .@"export", - .export_value, - .set_eval_branch_quota, - .atomic_store, - .store_node, - .store_to_inferred_ptr, - .resolve_inferred_alloc, - .set_runtime_safety, - .closure_capture, - .memcpy, - .memset, - .validate_deref, - .validate_destructure, - .save_err_ret_index, - .restore_err_ret_index_unconditional, - .restore_err_ret_index_fn_entry, - .validate_struct_init_ty, - .validate_struct_init_result_ty, - .validate_ptr_struct_init, - .validate_array_init_ty, - .validate_array_init_result_ty, - .validate_ptr_array_init, - .validate_ref_ty, - => break :b true, - - .@"defer" => unreachable, - .defer_err_code => unreachable, - } - } else switch (maybe_unused_result) { - .none => unreachable, - - .unreachable_value => b: { - noreturn_src_node = statement; - break :b true; - }, - - .void_value => true, - - else => false, - }; - if (!elide_check) { - _ = try gz.addUnNode(.ensure_result_used, maybe_unused_result, statement); - } - return noreturn_src_node; -} - -fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct { - have_any: bool, - have_normal: bool, - have_err: bool, - need_err_code: bool, -} { - var have_normal = false; - var have_err = false; - var need_err_code = false; - var scope = inner_scope; - while (scope != outer_scope) { - switch (scope.tag) { - .gen_zir => scope = scope.cast(GenZir).?.parent, - .local_val => scope = scope.cast(Scope.LocalVal).?.parent, - .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .defer_normal => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - - have_normal = true; - }, - .defer_error => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - - have_err = true; - - const have_err_payload = defer_scope.remapped_err_code != .none; - need_err_code = need_err_code or have_err_payload; - }, - .namespace, .enum_namespace => unreachable, - .top => unreachable, - } - } - return .{ - .have_any = have_normal or have_err, - .have_normal = have_normal, - .have_err = have_err, - .need_err_code = need_err_code, - }; -} - -const DefersToEmit = union(enum) { - both: Zir.Inst.Ref, // err code - both_sans_err, - normal_only, -}; - -fn genDefers( - gz: *GenZir, - outer_scope: *Scope, - inner_scope: *Scope, - which_ones: DefersToEmit, -) InnerError!void { - const gpa = gz.astgen.gpa; - - var scope = inner_scope; - while (scope != outer_scope) { - switch (scope.tag) { - .gen_zir => scope = scope.cast(GenZir).?.parent, - .local_val => scope = scope.cast(Scope.LocalVal).?.parent, - .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .defer_normal => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - try gz.addDefer(defer_scope.index, defer_scope.len); - }, - .defer_error => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - switch (which_ones) { - .both_sans_err => { - try gz.addDefer(defer_scope.index, defer_scope.len); - }, - .both => |err_code| { - if (defer_scope.remapped_err_code.unwrap()) |remapped_err_code| { - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const payload_index = try gz.astgen.addExtra(Zir.Inst.DeferErrCode{ - .remapped_err_code = remapped_err_code, - .index = defer_scope.index, - .len = defer_scope.len, - }); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = .defer_err_code, - .data = .{ .defer_err_code = .{ - .err_code = err_code, - .payload_index = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - } else { - try gz.addDefer(defer_scope.index, defer_scope.len); - } - }, - .normal_only => continue, - } - }, - .namespace, .enum_namespace => unreachable, - .top => unreachable, - } - } -} - -fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!void { - const astgen = gz.astgen; - - var scope = inner_scope; - while (scope != outer_scope) { - switch (scope.tag) { - .gen_zir => scope = scope.cast(GenZir).?.parent, - .local_val => { - const s = scope.cast(Scope.LocalVal).?; - if (s.used == 0 and s.discarded == 0) { - try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); - } else if (s.used != 0 and s.discarded != 0) { - try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ - try gz.astgen.errNoteTok(s.used, "used here", .{}), - }); - } - scope = s.parent; - }, - .local_ptr => { - const s = scope.cast(Scope.LocalPtr).?; - if (s.used == 0 and s.discarded == 0) { - try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); - } else { - if (s.used != 0 and s.discarded != 0) { - try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ - try astgen.errNoteTok(s.used, "used here", .{}), - }); - } - if (s.id_cat == .@"local variable" and !s.used_as_lvalue) { - try astgen.appendErrorTokNotes(s.token_src, "local variable is never mutated", .{}, &.{ - try astgen.errNoteTok(s.token_src, "consider using 'const'", .{}), - }); - } - } - - scope = s.parent; - }, - .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => unreachable, - .top => unreachable, - } - } -} - -fn deferStmt( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - block_arena: Allocator, - scope_tag: Scope.Tag, -) InnerError!*Scope { - var defer_gen = gz.makeSubBlock(scope); - defer_gen.cur_defer_node = node; - defer_gen.any_defer_node = node; - defer defer_gen.unstack(); - - const tree = gz.astgen.tree; - const node_datas = tree.nodes.items(.data); - const expr_node = node_datas[node].rhs; - - const payload_token = node_datas[node].lhs; - var local_val_scope: Scope.LocalVal = undefined; - var opt_remapped_err_code: Zir.Inst.OptionalIndex = .none; - const have_err_code = scope_tag == .defer_error and payload_token != 0; - const sub_scope = if (!have_err_code) &defer_gen.base else blk: { - const ident_name = try gz.astgen.identAsString(payload_token); - const remapped_err_code: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - opt_remapped_err_code = remapped_err_code.toOptional(); - try gz.astgen.instructions.append(gz.astgen.gpa, .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .value_placeholder, - .small = undefined, - .operand = undefined, - } }, - }); - const remapped_err_code_ref = remapped_err_code.toRef(); - local_val_scope = .{ - .parent = &defer_gen.base, - .gen_zir = gz, - .name = ident_name, - .inst = remapped_err_code_ref, - .token_src = payload_token, - .id_cat = .capture, - }; - try gz.addDbgVar(.dbg_var_val, ident_name, remapped_err_code_ref); - break :blk &local_val_scope.base; - }; - _ = try unusedResultExpr(&defer_gen, sub_scope, expr_node); - try checkUsed(gz, scope, sub_scope); - _ = try defer_gen.addBreak(.break_inline, @enumFromInt(0), .void_value); - - // We must handle ref_table for remapped_err_code manually. - const body = defer_gen.instructionsSlice(); - const body_len = blk: { - var refs: u32 = 0; - if (opt_remapped_err_code.unwrap()) |remapped_err_code| { - var cur_inst = remapped_err_code; - while (gz.astgen.ref_table.get(cur_inst)) |ref_inst| { - refs += 1; - cur_inst = ref_inst; - } - } - break :blk gz.astgen.countBodyLenAfterFixups(body) + refs; - }; - - const index: u32 = @intCast(gz.astgen.extra.items.len); - try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len); - if (opt_remapped_err_code.unwrap()) |remapped_err_code| { - if (gz.astgen.ref_table.fetchRemove(remapped_err_code)) |kv| { - gz.astgen.appendPossiblyRefdBodyInst(&gz.astgen.extra, kv.value); - } - } - gz.astgen.appendBodyWithFixups(body); - - const defer_scope = try block_arena.create(Scope.Defer); - - defer_scope.* = .{ - .base = .{ .tag = scope_tag }, - .parent = scope, - .index = index, - .len = body_len, - .remapped_err_code = opt_remapped_err_code, - }; - return &defer_scope.base; -} - -fn varDecl( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - block_arena: Allocator, - var_decl: Ast.full.VarDecl, -) InnerError!*Scope { - try emitDbgNode(gz, node); - const astgen = gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - - const name_token = var_decl.ast.mut_token + 1; - const ident_name_raw = tree.tokenSlice(name_token); - if (mem.eql(u8, ident_name_raw, "_")) { - return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); - } - const ident_name = try astgen.identAsString(name_token); - - try astgen.detectLocalShadowing( - scope, - ident_name, - name_token, - ident_name_raw, - if (token_tags[var_decl.ast.mut_token] == .keyword_const) .@"local constant" else .@"local variable", - ); - - if (var_decl.ast.init_node == 0) { - return astgen.failNode(node, "variables must be initialized", .{}); - } - - if (var_decl.ast.addrspace_node != 0) { - return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); - } - - if (var_decl.ast.section_node != 0) { - return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); - } - - const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) - try expr(gz, scope, coerced_align_ri, var_decl.ast.align_node) - else - .none; - - switch (token_tags[var_decl.ast.mut_token]) { - .keyword_const => { - if (var_decl.comptime_token) |comptime_token| { - try astgen.appendErrorTok(comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); - } - - // Depending on the type of AST the initialization expression is, we may need an lvalue - // or an rvalue as a result location. If it is an rvalue, we can use the instruction as - // the variable, no memory location needed. - const type_node = var_decl.ast.type_node; - if (align_inst == .none and - !astgen.nodes_need_rl.contains(node)) - { - const result_info: ResultInfo = if (type_node != 0) .{ - .rl = .{ .ty = try typeExpr(gz, scope, type_node) }, - .ctx = .const_init, - } else .{ .rl = .none, .ctx = .const_init }; - const prev_anon_name_strategy = gz.anon_name_strategy; - gz.anon_name_strategy = .dbg_var; - const init_inst = try reachableExpr(gz, scope, result_info, var_decl.ast.init_node, node); - gz.anon_name_strategy = prev_anon_name_strategy; - - try gz.addDbgVar(.dbg_var_val, ident_name, init_inst); - - // The const init expression may have modified the error return trace, so signal - // to Sema that it should save the new index for restoring later. - if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) - _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); - - const sub_scope = try block_arena.create(Scope.LocalVal); - sub_scope.* = .{ - .parent = scope, - .gen_zir = gz, - .name = ident_name, - .inst = init_inst, - .token_src = name_token, - .id_cat = .@"local constant", - }; - return &sub_scope.base; - } - - const is_comptime = gz.is_comptime or - tree.nodes.items(.tag)[var_decl.ast.init_node] == .@"comptime"; - - var resolve_inferred_alloc: Zir.Inst.Ref = .none; - var opt_type_inst: Zir.Inst.Ref = .none; - const init_rl: ResultInfo.Loc = if (type_node != 0) init_rl: { - const type_inst = try typeExpr(gz, scope, type_node); - opt_type_inst = type_inst; - if (align_inst == .none) { - break :init_rl .{ .ptr = .{ .inst = try gz.addUnNode(.alloc, type_inst, node) } }; - } else { - break :init_rl .{ .ptr = .{ .inst = try gz.addAllocExtended(.{ - .node = node, - .type_inst = type_inst, - .align_inst = align_inst, - .is_const = true, - .is_comptime = is_comptime, - }) } }; - } - } else init_rl: { - const alloc_inst = if (align_inst == .none) ptr: { - const tag: Zir.Inst.Tag = if (is_comptime) - .alloc_inferred_comptime - else - .alloc_inferred; - break :ptr try gz.addNode(tag, node); - } else ptr: { - break :ptr try gz.addAllocExtended(.{ - .node = node, - .type_inst = .none, - .align_inst = align_inst, - .is_const = true, - .is_comptime = is_comptime, - }); - }; - resolve_inferred_alloc = alloc_inst; - break :init_rl .{ .inferred_ptr = alloc_inst }; - }; - const var_ptr = switch (init_rl) { - .ptr => |ptr| ptr.inst, - .inferred_ptr => |inst| inst, - else => unreachable, - }; - const init_result_info: ResultInfo = .{ .rl = init_rl, .ctx = .const_init }; - - const prev_anon_name_strategy = gz.anon_name_strategy; - gz.anon_name_strategy = .dbg_var; - defer gz.anon_name_strategy = prev_anon_name_strategy; - const init_inst = try reachableExpr(gz, scope, init_result_info, var_decl.ast.init_node, node); - - // The const init expression may have modified the error return trace, so signal - // to Sema that it should save the new index for restoring later. - if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) - _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); - - const const_ptr = if (resolve_inferred_alloc != .none) p: { - _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); - break :p var_ptr; - } else try gz.addUnNode(.make_ptr_const, var_ptr, node); - - try gz.addDbgVar(.dbg_var_ptr, ident_name, const_ptr); - - const sub_scope = try block_arena.create(Scope.LocalPtr); - sub_scope.* = .{ - .parent = scope, - .gen_zir = gz, - .name = ident_name, - .ptr = const_ptr, - .token_src = name_token, - .maybe_comptime = true, - .id_cat = .@"local constant", - }; - return &sub_scope.base; - }, - .keyword_var => { - if (var_decl.comptime_token != null and gz.is_comptime) - return astgen.failTok(var_decl.comptime_token.?, "'comptime var' is redundant in comptime scope", .{}); - const is_comptime = var_decl.comptime_token != null or gz.is_comptime; - var resolve_inferred_alloc: Zir.Inst.Ref = .none; - const alloc: Zir.Inst.Ref, const result_info: ResultInfo = if (var_decl.ast.type_node != 0) a: { - const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node); - const alloc = alloc: { - if (align_inst == .none) { - const tag: Zir.Inst.Tag = if (is_comptime) - .alloc_comptime_mut - else - .alloc_mut; - break :alloc try gz.addUnNode(tag, type_inst, node); - } else { - break :alloc try gz.addAllocExtended(.{ - .node = node, - .type_inst = type_inst, - .align_inst = align_inst, - .is_const = false, - .is_comptime = is_comptime, - }); - } - }; - break :a .{ alloc, .{ .rl = .{ .ptr = .{ .inst = alloc } } } }; - } else a: { - const alloc = alloc: { - if (align_inst == .none) { - const tag: Zir.Inst.Tag = if (is_comptime) - .alloc_inferred_comptime_mut - else - .alloc_inferred_mut; - break :alloc try gz.addNode(tag, node); - } else { - break :alloc try gz.addAllocExtended(.{ - .node = node, - .type_inst = .none, - .align_inst = align_inst, - .is_const = false, - .is_comptime = is_comptime, - }); - } - }; - resolve_inferred_alloc = alloc; - break :a .{ alloc, .{ .rl = .{ .inferred_ptr = alloc } } }; - }; - const prev_anon_name_strategy = gz.anon_name_strategy; - gz.anon_name_strategy = .dbg_var; - _ = try reachableExprComptime(gz, scope, result_info, var_decl.ast.init_node, node, is_comptime); - gz.anon_name_strategy = prev_anon_name_strategy; - if (resolve_inferred_alloc != .none) { - _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); - } - - try gz.addDbgVar(.dbg_var_ptr, ident_name, alloc); - - const sub_scope = try block_arena.create(Scope.LocalPtr); - sub_scope.* = .{ - .parent = scope, - .gen_zir = gz, - .name = ident_name, - .ptr = alloc, - .token_src = name_token, - .maybe_comptime = is_comptime, - .id_cat = .@"local variable", - }; - return &sub_scope.base; - }, - else => unreachable, - } -} - -fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void { - // The instruction emitted here is for debugging runtime code. - // If the current block will be evaluated only during semantic analysis - // then no dbg_stmt ZIR instruction is needed. - if (gz.is_comptime) return; - const astgen = gz.astgen; - astgen.advanceSourceCursorToNode(node); - const line = astgen.source_line - gz.decl_line; - const column = astgen.source_column; - try emitDbgStmt(gz, .{ line, column }); -} - -fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { - try emitDbgNode(gz, infix_node); - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const main_tokens = tree.nodes.items(.main_token); - const node_tags = tree.nodes.items(.tag); - - const lhs = node_datas[infix_node].lhs; - const rhs = node_datas[infix_node].rhs; - if (node_tags[lhs] == .identifier) { - // This intentionally does not support `@"_"` syntax. - const ident_name = tree.tokenSlice(main_tokens[lhs]); - if (mem.eql(u8, ident_name, "_")) { - _ = try expr(gz, scope, .{ .rl = .discard, .ctx = .assignment }, rhs); - return; - } - } - const lvalue = try lvalExpr(gz, scope, lhs); - _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ - .inst = lvalue, - .src_node = infix_node, - } } }, rhs); -} - -/// Handles destructure assignments where no LHS is a `const` or `var` decl. -fn assignDestructure(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!void { - try emitDbgNode(gz, node); - const astgen = gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - const node_datas = tree.nodes.items(.data); - const main_tokens = tree.nodes.items(.main_token); - const node_tags = tree.nodes.items(.tag); - - const extra_index = node_datas[node].lhs; - const lhs_count = tree.extra_data[extra_index]; - const lhs_nodes: []const Ast.Node.Index = @ptrCast(tree.extra_data[extra_index + 1 ..][0..lhs_count]); - const rhs = node_datas[node].rhs; - - const maybe_comptime_token = tree.firstToken(node) - 1; - const declared_comptime = token_tags[maybe_comptime_token] == .keyword_comptime; - - if (declared_comptime and gz.is_comptime) { - return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); - } - - // If this expression is marked comptime, we must wrap the whole thing in a comptime block. - var gz_buf: GenZir = undefined; - const inner_gz = if (declared_comptime) bs: { - gz_buf = gz.makeSubBlock(scope); - gz_buf.is_comptime = true; - break :bs &gz_buf; - } else gz; - defer if (declared_comptime) inner_gz.unstack(); - - const rl_components = try astgen.arena.alloc(ResultInfo.Loc.DestructureComponent, lhs_nodes.len); - for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { - if (node_tags[lhs_node] == .identifier) { - // This intentionally does not support `@"_"` syntax. - const ident_name = tree.tokenSlice(main_tokens[lhs_node]); - if (mem.eql(u8, ident_name, "_")) { - lhs_rl.* = .discard; - continue; - } - } - lhs_rl.* = .{ .typed_ptr = .{ - .inst = try lvalExpr(inner_gz, scope, lhs_node), - .src_node = lhs_node, - } }; - } - - const ri: ResultInfo = .{ .rl = .{ .destructure = .{ - .src_node = node, - .components = rl_components, - } } }; - - _ = try expr(inner_gz, scope, ri, rhs); - - if (declared_comptime) { - const comptime_block_inst = try gz.makeBlockInst(.block_comptime, node); - _ = try inner_gz.addBreak(.@"break", comptime_block_inst, .void_value); - try inner_gz.setBlockBody(comptime_block_inst); - try gz.instructions.append(gz.astgen.gpa, comptime_block_inst); - } -} - -/// Handles destructure assignments where the LHS may contain `const` or `var` decls. -fn assignDestructureMaybeDecls( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - block_arena: Allocator, -) InnerError!*Scope { - try emitDbgNode(gz, node); - const astgen = gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - const node_datas = tree.nodes.items(.data); - const main_tokens = tree.nodes.items(.main_token); - const node_tags = tree.nodes.items(.tag); - - const extra_index = node_datas[node].lhs; - const lhs_count = tree.extra_data[extra_index]; - const lhs_nodes: []const Ast.Node.Index = @ptrCast(tree.extra_data[extra_index + 1 ..][0..lhs_count]); - const rhs = node_datas[node].rhs; - - const maybe_comptime_token = tree.firstToken(node) - 1; - const declared_comptime = token_tags[maybe_comptime_token] == .keyword_comptime; - if (declared_comptime and gz.is_comptime) { - return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); - } - - const is_comptime = declared_comptime or gz.is_comptime; - const rhs_is_comptime = tree.nodes.items(.tag)[rhs] == .@"comptime"; - - // When declaring consts via a destructure, we always use a result pointer. - // This avoids the need to create tuple types, and is also likely easier to - // optimize, since it's a bit tricky for the optimizer to "split up" the - // value into individual pointer writes down the line. - - // We know this rl information won't live past the evaluation of this - // expression, so it may as well go in the block arena. - const rl_components = try block_arena.alloc(ResultInfo.Loc.DestructureComponent, lhs_nodes.len); - var any_non_const_lhs = false; - var any_lvalue_expr = false; - for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { - switch (node_tags[lhs_node]) { - .identifier => { - // This intentionally does not support `@"_"` syntax. - const ident_name = tree.tokenSlice(main_tokens[lhs_node]); - if (mem.eql(u8, ident_name, "_")) { - any_non_const_lhs = true; - lhs_rl.* = .discard; - continue; - } - }, - .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl => { - const full = tree.fullVarDecl(lhs_node).?; - - const name_token = full.ast.mut_token + 1; - const ident_name_raw = tree.tokenSlice(name_token); - if (mem.eql(u8, ident_name_raw, "_")) { - return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); - } - - // We detect shadowing in the second pass over these, while we're creating scopes. - - if (full.ast.addrspace_node != 0) { - return astgen.failTok(main_tokens[full.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); - } - if (full.ast.section_node != 0) { - return astgen.failTok(main_tokens[full.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); - } - - const is_const = switch (token_tags[full.ast.mut_token]) { - .keyword_var => false, - .keyword_const => true, - else => unreachable, - }; - if (!is_const) any_non_const_lhs = true; - - // We also mark `const`s as comptime if the RHS is definitely comptime-known. - const this_lhs_comptime = is_comptime or (is_const and rhs_is_comptime); - - const align_inst: Zir.Inst.Ref = if (full.ast.align_node != 0) - try expr(gz, scope, coerced_align_ri, full.ast.align_node) - else - .none; - - if (full.ast.type_node != 0) { - // Typed alloc - const type_inst = try typeExpr(gz, scope, full.ast.type_node); - const ptr = if (align_inst == .none) ptr: { - const tag: Zir.Inst.Tag = if (is_const) - .alloc - else if (this_lhs_comptime) - .alloc_comptime_mut - else - .alloc_mut; - break :ptr try gz.addUnNode(tag, type_inst, node); - } else try gz.addAllocExtended(.{ - .node = node, - .type_inst = type_inst, - .align_inst = align_inst, - .is_const = is_const, - .is_comptime = this_lhs_comptime, - }); - lhs_rl.* = .{ .typed_ptr = .{ .inst = ptr } }; - } else { - // Inferred alloc - const ptr = if (align_inst == .none) ptr: { - const tag: Zir.Inst.Tag = if (is_const) tag: { - break :tag if (this_lhs_comptime) .alloc_inferred_comptime else .alloc_inferred; - } else tag: { - break :tag if (this_lhs_comptime) .alloc_inferred_comptime_mut else .alloc_inferred_mut; - }; - break :ptr try gz.addNode(tag, node); - } else try gz.addAllocExtended(.{ - .node = node, - .type_inst = .none, - .align_inst = align_inst, - .is_const = is_const, - .is_comptime = this_lhs_comptime, - }); - lhs_rl.* = .{ .inferred_ptr = ptr }; - } - - continue; - }, - else => {}, - } - // This LHS is just an lvalue expression. - // We will fill in its result pointer later, inside a comptime block. - any_non_const_lhs = true; - any_lvalue_expr = true; - lhs_rl.* = .{ .typed_ptr = .{ - .inst = undefined, - .src_node = lhs_node, - } }; - } - - if (declared_comptime and !any_non_const_lhs) { - try astgen.appendErrorTok(maybe_comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); - } - - // If this expression is marked comptime, we must wrap it in a comptime block. - var gz_buf: GenZir = undefined; - const inner_gz = if (declared_comptime) bs: { - gz_buf = gz.makeSubBlock(scope); - gz_buf.is_comptime = true; - break :bs &gz_buf; - } else gz; - defer if (declared_comptime) inner_gz.unstack(); - - if (any_lvalue_expr) { - // At least one LHS was an lvalue expr. Iterate again in order to - // evaluate the lvalues from within the possible block_comptime. - for (rl_components, lhs_nodes) |*lhs_rl, lhs_node| { - if (lhs_rl.* != .typed_ptr) continue; - switch (node_tags[lhs_node]) { - .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl => continue, - else => {}, - } - lhs_rl.typed_ptr.inst = try lvalExpr(inner_gz, scope, lhs_node); - } - } - - // We can't give a reasonable anon name strategy for destructured inits, so - // leave it at its default of `.anon`. - _ = try reachableExpr(inner_gz, scope, .{ .rl = .{ .destructure = .{ - .src_node = node, - .components = rl_components, - } } }, rhs, node); - - if (declared_comptime) { - // Finish the block_comptime. Inferred alloc resolution etc will occur - // in the parent block. - const comptime_block_inst = try gz.makeBlockInst(.block_comptime, node); - _ = try inner_gz.addBreak(.@"break", comptime_block_inst, .void_value); - try inner_gz.setBlockBody(comptime_block_inst); - try gz.instructions.append(gz.astgen.gpa, comptime_block_inst); - } - - // Now, iterate over the LHS exprs to construct any new scopes. - // If there were any inferred allocations, resolve them. - // If there were any `const` decls, make the pointer constant. - var cur_scope = scope; - for (rl_components, lhs_nodes) |lhs_rl, lhs_node| { - switch (node_tags[lhs_node]) { - .local_var_decl, .simple_var_decl, .aligned_var_decl => {}, - else => continue, // We were mutating an existing lvalue - nothing to do - } - const full = tree.fullVarDecl(lhs_node).?; - const raw_ptr = switch (lhs_rl) { - .discard => unreachable, - .typed_ptr => |typed_ptr| typed_ptr.inst, - .inferred_ptr => |ptr_inst| ptr_inst, - }; - // If the alloc was inferred, resolve it. - if (full.ast.type_node == 0) { - _ = try gz.addUnNode(.resolve_inferred_alloc, raw_ptr, lhs_node); - } - const is_const = switch (token_tags[full.ast.mut_token]) { - .keyword_var => false, - .keyword_const => true, - else => unreachable, - }; - // If the alloc was const, make it const. - const var_ptr = if (is_const and full.ast.type_node != 0) make_const: { - // Note that we don't do this if type_node == 0 since `resolve_inferred_alloc` - // handles it for us. - break :make_const try gz.addUnNode(.make_ptr_const, raw_ptr, node); - } else raw_ptr; - const name_token = full.ast.mut_token + 1; - const ident_name_raw = tree.tokenSlice(name_token); - const ident_name = try astgen.identAsString(name_token); - try astgen.detectLocalShadowing( - cur_scope, - ident_name, - name_token, - ident_name_raw, - if (is_const) .@"local constant" else .@"local variable", - ); - try gz.addDbgVar(.dbg_var_ptr, ident_name, var_ptr); - // Finally, create the scope. - const sub_scope = try block_arena.create(Scope.LocalPtr); - sub_scope.* = .{ - .parent = cur_scope, - .gen_zir = gz, - .name = ident_name, - .ptr = var_ptr, - .token_src = name_token, - .maybe_comptime = is_const or is_comptime, - .id_cat = if (is_const) .@"local constant" else .@"local variable", - }; - cur_scope = &sub_scope.base; - } - - return cur_scope; -} - -fn assignOp( - gz: *GenZir, - scope: *Scope, - infix_node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Tag, -) InnerError!void { - try emitDbgNode(gz, infix_node); - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); - - const cursor = switch (op_inst_tag) { - .add, .sub, .mul, .div, .mod_rem => maybeAdvanceSourceCursorToMainToken(gz, infix_node), - else => undefined, - }; - const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = lhs_type } }, node_datas[infix_node].rhs); - - switch (op_inst_tag) { - .add, .sub, .mul, .div, .mod_rem => { - try emitDbgStmt(gz, cursor); - }, - else => {}, - } - const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = rhs, - }); - _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ - .lhs = lhs_ptr, - .rhs = result, - }); -} - -fn assignShift( - gz: *GenZir, - scope: *Scope, - infix_node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Tag, -) InnerError!void { - try emitDbgNode(gz, infix_node); - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); - const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .rl = .{ .ty = rhs_type } }, node_datas[infix_node].rhs); - - const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = rhs, - }); - _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ - .lhs = lhs_ptr, - .rhs = result, - }); -} - -fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { - try emitDbgNode(gz, infix_node); - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); - const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - // Saturating shift-left allows any integer type for both the LHS and RHS. - const rhs = try expr(gz, scope, .{ .rl = .none }, node_datas[infix_node].rhs); - - const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = rhs, - }); - _ = try gz.addPlNode(.store_node, infix_node, Zir.Inst.Bin{ - .lhs = lhs_ptr, - .rhs = result, - }); -} - -fn ptrType( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - ptr_info: Ast.full.PtrType, -) InnerError!Zir.Inst.Ref { - if (ptr_info.size == .C and ptr_info.allowzero_token != null) { - return gz.astgen.failTok(ptr_info.allowzero_token.?, "C pointers always allow address zero", .{}); - } - - const source_offset = gz.astgen.source_offset; - const source_line = gz.astgen.source_line; - const source_column = gz.astgen.source_column; - const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type); - - var sentinel_ref: Zir.Inst.Ref = .none; - var align_ref: Zir.Inst.Ref = .none; - var addrspace_ref: Zir.Inst.Ref = .none; - var bit_start_ref: Zir.Inst.Ref = .none; - var bit_end_ref: Zir.Inst.Ref = .none; - var trailing_count: u32 = 0; - - if (ptr_info.ast.sentinel != 0) { - // These attributes can appear in any order and they all come before the - // element type so we need to reset the source cursor before generating them. - gz.astgen.source_offset = source_offset; - gz.astgen.source_line = source_line; - gz.astgen.source_column = source_column; - - sentinel_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel); - trailing_count += 1; - } - if (ptr_info.ast.addrspace_node != 0) { - gz.astgen.source_offset = source_offset; - gz.astgen.source_line = source_line; - gz.astgen.source_column = source_column; - - addrspace_ref = try expr(gz, scope, coerced_addrspace_ri, ptr_info.ast.addrspace_node); - trailing_count += 1; - } - if (ptr_info.ast.align_node != 0) { - gz.astgen.source_offset = source_offset; - gz.astgen.source_line = source_line; - gz.astgen.source_column = source_column; - - align_ref = try expr(gz, scope, coerced_align_ri, ptr_info.ast.align_node); - trailing_count += 1; - } - if (ptr_info.ast.bit_range_start != 0) { - assert(ptr_info.ast.bit_range_end != 0); - bit_start_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_start); - bit_end_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_end); - trailing_count += 2; - } - - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.PtrType).Struct.fields.len + - trailing_count); - - const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.PtrType{ - .elem_type = elem_type, - .src_node = gz.nodeIndexToRelative(node), - }); - if (sentinel_ref != .none) { - gz.astgen.extra.appendAssumeCapacity(@intFromEnum(sentinel_ref)); - } - if (align_ref != .none) { - gz.astgen.extra.appendAssumeCapacity(@intFromEnum(align_ref)); - } - if (addrspace_ref != .none) { - gz.astgen.extra.appendAssumeCapacity(@intFromEnum(addrspace_ref)); - } - if (bit_start_ref != .none) { - gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_start_ref)); - gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref)); - } - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - const result = new_index.toRef(); - gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ - .ptr_type = .{ - .flags = .{ - .is_allowzero = ptr_info.allowzero_token != null, - .is_mutable = ptr_info.const_token == null, - .is_volatile = ptr_info.volatile_token != null, - .has_sentinel = sentinel_ref != .none, - .has_align = align_ref != .none, - .has_addrspace = addrspace_ref != .none, - .has_bit_range = bit_start_ref != .none, - }, - .size = ptr_info.size, - .payload_index = payload_index, - }, - } }); - gz.instructions.appendAssumeCapacity(new_index); - - return rvalue(gz, ri, result, node); -} - -fn arrayType(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - - const len_node = node_datas[node].lhs; - if (node_tags[len_node] == .identifier and - mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) - { - return astgen.failNode(len_node, "unable to infer array size", .{}); - } - const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node); - const elem_type = try typeExpr(gz, scope, node_datas[node].rhs); - - const result = try gz.addPlNode(.array_type, node, Zir.Inst.Bin{ - .lhs = len, - .rhs = elem_type, - }); - return rvalue(gz, ri, result, node); -} - -fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel); - - const len_node = node_datas[node].lhs; - if (node_tags[len_node] == .identifier and - mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) - { - return astgen.failNode(len_node, "unable to infer array size", .{}); - } - const len = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node, node); - const elem_type = try typeExpr(gz, scope, extra.elem_type); - const sentinel = try reachableExprComptime(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node, true); - - const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{ - .len = len, - .elem_type = elem_type, - .sentinel = sentinel, - }); - return rvalue(gz, ri, result, node); -} - -const WipMembers = struct { - payload: *ArrayListUnmanaged(u32), - payload_top: usize, - field_bits_start: u32, - fields_start: u32, - fields_end: u32, - decl_index: u32 = 0, - field_index: u32 = 0, - - const Self = @This(); - - fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self { - const payload_top: u32 = @intCast(payload.items.len); - const field_bits_start = payload_top + decl_count; - const fields_start = field_bits_start + if (bits_per_field > 0) blk: { - const fields_per_u32 = 32 / bits_per_field; - break :blk (field_count + fields_per_u32 - 1) / fields_per_u32; - } else 0; - const payload_end = fields_start + field_count * max_field_size; - try payload.resize(gpa, payload_end); - return .{ - .payload = payload, - .payload_top = payload_top, - .field_bits_start = field_bits_start, - .fields_start = fields_start, - .fields_end = fields_start, - }; - } - - fn nextDecl(self: *Self, decl_inst: Zir.Inst.Index) void { - self.payload.items[self.payload_top + self.decl_index] = @intFromEnum(decl_inst); - self.decl_index += 1; - } - - fn nextField(self: *Self, comptime bits_per_field: u32, bits: [bits_per_field]bool) void { - const fields_per_u32 = 32 / bits_per_field; - const index = self.field_bits_start + self.field_index / fields_per_u32; - assert(index < self.fields_start); - var bit_bag: u32 = if (self.field_index % fields_per_u32 == 0) 0 else self.payload.items[index]; - bit_bag >>= bits_per_field; - comptime var i = 0; - inline while (i < bits_per_field) : (i += 1) { - bit_bag |= @as(u32, @intFromBool(bits[i])) << (32 - bits_per_field + i); - } - self.payload.items[index] = bit_bag; - self.field_index += 1; - } - - fn appendToField(self: *Self, data: u32) void { - assert(self.fields_end < self.payload.items.len); - self.payload.items[self.fields_end] = data; - self.fields_end += 1; - } - - fn finishBits(self: *Self, comptime bits_per_field: u32) void { - if (bits_per_field > 0) { - const fields_per_u32 = 32 / bits_per_field; - const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32); - if (self.field_index > 0 and empty_field_slots < fields_per_u32) { - const index = self.field_bits_start + self.field_index / fields_per_u32; - self.payload.items[index] >>= @intCast(empty_field_slots * bits_per_field); - } - } - } - - fn declsSlice(self: *Self) []u32 { - return self.payload.items[self.payload_top..][0..self.decl_index]; - } - - fn fieldsSlice(self: *Self) []u32 { - return self.payload.items[self.field_bits_start..self.fields_end]; - } - - fn deinit(self: *Self) void { - self.payload.items.len = self.payload_top; - } -}; - -fn fnDecl( - astgen: *AstGen, - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - decl_node: Ast.Node.Index, - body_node: Ast.Node.Index, - fn_proto: Ast.full.FnProto, -) InnerError!void { - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - // missing function name already happened in scanDecls() - const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail; - - // We insert this at the beginning so that its instruction index marks the - // start of the top level declaration. - const decl_inst = try gz.makeBlockInst(.declaration, fn_proto.ast.proto_node); - astgen.advanceSourceCursorToNode(decl_node); - - var decl_gz: GenZir = .{ - .is_comptime = true, - .decl_node_index = fn_proto.ast.proto_node, - .decl_line = astgen.source_line, - .parent = scope, - .astgen = astgen, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer decl_gz.unstack(); - - var fn_gz: GenZir = .{ - .is_comptime = false, - .decl_node_index = fn_proto.ast.proto_node, - .decl_line = decl_gz.decl_line, - .parent = &decl_gz.base, - .astgen = astgen, - .instructions = gz.instructions, - .instructions_top = GenZir.unstacked_top, - }; - defer fn_gz.unstack(); - - const is_pub = fn_proto.visib_token != null; - const is_export = blk: { - const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false; - break :blk token_tags[maybe_export_token] == .keyword_export; - }; - const is_extern = blk: { - const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; - break :blk token_tags[maybe_extern_token] == .keyword_extern; - }; - const has_inline_keyword = blk: { - const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false; - break :blk token_tags[maybe_inline_token] == .keyword_inline; - }; - const is_noinline = blk: { - const maybe_noinline_token = fn_proto.extern_export_inline_token orelse break :blk false; - break :blk token_tags[maybe_noinline_token] == .keyword_noinline; - }; - - const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken()); - - wip_members.nextDecl(decl_inst); - - var noalias_bits: u32 = 0; - var params_scope = &fn_gz.base; - const is_var_args = is_var_args: { - var param_type_i: usize = 0; - var it = fn_proto.iterate(tree); - while (it.next()) |param| : (param_type_i += 1) { - const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { - .keyword_noalias => is_comptime: { - noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse - return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); - break :is_comptime false; - }, - .keyword_comptime => true, - else => false, - } else false; - - const is_anytype = if (param.anytype_ellipsis3) |token| blk: { - switch (token_tags[token]) { - .keyword_anytype => break :blk true, - .ellipsis3 => break :is_var_args true, - else => unreachable, - } - } else false; - - const param_name: Zir.NullTerminatedString = if (param.name_token) |name_token| blk: { - const name_bytes = tree.tokenSlice(name_token); - if (mem.eql(u8, "_", name_bytes)) - break :blk .empty; - - const param_name = try astgen.identAsString(name_token); - if (!is_extern) { - try astgen.detectLocalShadowing(params_scope, param_name, name_token, name_bytes, .@"function parameter"); - } - break :blk param_name; - } else if (!is_extern) { - if (param.anytype_ellipsis3) |tok| { - return astgen.failTok(tok, "missing parameter name", .{}); - } else { - ambiguous: { - if (tree.nodes.items(.tag)[param.type_expr] != .identifier) break :ambiguous; - const main_token = tree.nodes.items(.main_token)[param.type_expr]; - const identifier_str = tree.tokenSlice(main_token); - if (isPrimitive(identifier_str)) break :ambiguous; - return astgen.failNodeNotes( - param.type_expr, - "missing parameter name or type", - .{}, - &[_]u32{ - try astgen.errNoteNode( - param.type_expr, - "if this is a name, annotate its type '{s}: T'", - .{identifier_str}, - ), - try astgen.errNoteNode( - param.type_expr, - "if this is a type, give it a name ': {s}'", - .{identifier_str}, - ), - }, - ); - } - return astgen.failNode(param.type_expr, "missing parameter name", .{}); - } - } else .empty; - - const param_inst = if (is_anytype) param: { - const name_token = param.name_token orelse param.anytype_ellipsis3.?; - const tag: Zir.Inst.Tag = if (is_comptime) - .param_anytype_comptime - else - .param_anytype; - break :param try decl_gz.addStrTok(tag, param_name, name_token); - } else param: { - const param_type_node = param.type_expr; - assert(param_type_node != 0); - var param_gz = decl_gz.makeSubBlock(scope); - defer param_gz.unstack(); - const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); - const param_inst_expected: Zir.Inst.Index = @enumFromInt(astgen.instructions.len + 1); - _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); - - const main_tokens = tree.nodes.items(.main_token); - const name_token = param.name_token orelse main_tokens[param_type_node]; - const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; - const param_inst = try decl_gz.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); - assert(param_inst_expected == param_inst); - break :param param_inst.toRef(); - }; - - if (param_name == .empty or is_extern) continue; - - const sub_scope = try astgen.arena.create(Scope.LocalVal); - sub_scope.* = .{ - .parent = params_scope, - .gen_zir = &decl_gz, - .name = param_name, - .inst = param_inst, - .token_src = param.name_token.?, - .id_cat = .@"function parameter", - }; - params_scope = &sub_scope.base; - } - break :is_var_args false; - }; - - const lib_name = if (fn_proto.lib_name) |lib_name_token| blk: { - const lib_name_str = try astgen.strLitAsString(lib_name_token); - const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; - if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { - return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); - } else if (lib_name_str.len == 0) { - return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); - } - break :blk lib_name_str.index; - } else .empty; - - const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; - const is_inferred_error = token_tags[maybe_bang] == .bang; - - // After creating the function ZIR instruction, it will need to update the break - // instructions inside the expression blocks for align, addrspace, cc, and ret_ty - // to use the function instruction as the "block" to break from. - - var align_gz = decl_gz.makeSubBlock(params_scope); - defer align_gz.unstack(); - const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, coerced_align_ri, fn_proto.ast.align_expr); - if (align_gz.instructionsSlice().len == 0) { - // In this case we will send a len=0 body which can be encoded more efficiently. - break :inst inst; - } - _ = try align_gz.addBreak(.break_inline, @enumFromInt(0), inst); - break :inst inst; - }; - - var addrspace_gz = decl_gz.makeSubBlock(params_scope); - defer addrspace_gz.unstack(); - const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, coerced_addrspace_ri, fn_proto.ast.addrspace_expr); - if (addrspace_gz.instructionsSlice().len == 0) { - // In this case we will send a len=0 body which can be encoded more efficiently. - break :inst inst; - } - _ = try addrspace_gz.addBreak(.break_inline, @enumFromInt(0), inst); - break :inst inst; - }; - - var section_gz = decl_gz.makeSubBlock(params_scope); - defer section_gz.unstack(); - const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); - if (section_gz.instructionsSlice().len == 0) { - // In this case we will send a len=0 body which can be encoded more efficiently. - break :inst inst; - } - _ = try section_gz.addBreak(.break_inline, @enumFromInt(0), inst); - break :inst inst; - }; - - var cc_gz = decl_gz.makeSubBlock(params_scope); - defer cc_gz.unstack(); - const cc_ref: Zir.Inst.Ref = blk: { - if (fn_proto.ast.callconv_expr != 0) { - if (has_inline_keyword) { - return astgen.failNode( - fn_proto.ast.callconv_expr, - "explicit callconv incompatible with inline keyword", - .{}, - ); - } - const inst = try expr( - &decl_gz, - params_scope, - .{ .rl = .{ .coerced_ty = .calling_convention_type } }, - fn_proto.ast.callconv_expr, - ); - if (cc_gz.instructionsSlice().len == 0) { - // In this case we will send a len=0 body which can be encoded more efficiently. - break :blk inst; - } - _ = try cc_gz.addBreak(.break_inline, @enumFromInt(0), inst); - break :blk inst; - } else if (is_extern) { - // note: https://github.com/ziglang/zig/issues/5269 - break :blk .calling_convention_c; - } else if (has_inline_keyword) { - break :blk .calling_convention_inline; - } else { - break :blk .none; - } - }; - - var ret_gz = decl_gz.makeSubBlock(params_scope); - defer ret_gz.unstack(); - const ret_ref: Zir.Inst.Ref = inst: { - const inst = try expr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type); - if (ret_gz.instructionsSlice().len == 0) { - // In this case we will send a len=0 body which can be encoded more efficiently. - break :inst inst; - } - _ = try ret_gz.addBreak(.break_inline, @enumFromInt(0), inst); - break :inst inst; - }; - - const func_inst: Zir.Inst.Ref = if (body_node == 0) func: { - if (!is_extern) { - return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{}); - } - if (is_inferred_error) { - return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); - } - break :func try decl_gz.addFunc(.{ - .src_node = decl_node, - .cc_ref = cc_ref, - .cc_gz = &cc_gz, - .align_ref = align_ref, - .align_gz = &align_gz, - .ret_ref = ret_ref, - .ret_gz = &ret_gz, - .section_ref = section_ref, - .section_gz = §ion_gz, - .addrspace_ref = addrspace_ref, - .addrspace_gz = &addrspace_gz, - .param_block = decl_inst, - .body_gz = null, - .lib_name = lib_name, - .is_var_args = is_var_args, - .is_inferred_error = false, - .is_test = false, - .is_extern = true, - .is_noinline = is_noinline, - .noalias_bits = noalias_bits, - }); - } else func: { - // as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz - fn_gz.instructions_top = ret_gz.instructions.items.len; - - const prev_fn_block = astgen.fn_block; - const prev_fn_ret_ty = astgen.fn_ret_ty; - astgen.fn_block = &fn_gz; - astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: { - // We're essentially guaranteed to need the return type at some point, - // since the return type is likely not `void` or `noreturn` so there - // will probably be an explicit return requiring RLS. Fetch this - // return type now so the rest of the function can use it. - break :r try fn_gz.addNode(.ret_type, decl_node); - } else ret_ref; - defer { - astgen.fn_block = prev_fn_block; - astgen.fn_ret_ty = prev_fn_ret_ty; - } - - const prev_var_args = astgen.fn_var_args; - astgen.fn_var_args = is_var_args; - defer astgen.fn_var_args = prev_var_args; - - astgen.advanceSourceCursorToNode(body_node); - const lbrace_line = astgen.source_line - decl_gz.decl_line; - const lbrace_column = astgen.source_column; - - _ = try expr(&fn_gz, params_scope, .{ .rl = .none }, body_node); - try checkUsed(gz, &fn_gz.base, params_scope); - - if (!fn_gz.endsWithNoReturn()) { - // As our last action before the return, "pop" the error trace if needed - _ = try fn_gz.addRestoreErrRetIndex(.ret, .always, decl_node); - - // Add implicit return at end of function. - _ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); - } - - break :func try decl_gz.addFunc(.{ - .src_node = decl_node, - .cc_ref = cc_ref, - .cc_gz = &cc_gz, - .align_ref = align_ref, - .align_gz = &align_gz, - .ret_ref = ret_ref, - .ret_gz = &ret_gz, - .section_ref = section_ref, - .section_gz = §ion_gz, - .addrspace_ref = addrspace_ref, - .addrspace_gz = &addrspace_gz, - .lbrace_line = lbrace_line, - .lbrace_column = lbrace_column, - .param_block = decl_inst, - .body_gz = &fn_gz, - .lib_name = lib_name, - .is_var_args = is_var_args, - .is_inferred_error = is_inferred_error, - .is_test = false, - .is_extern = false, - .is_noinline = is_noinline, - .noalias_bits = noalias_bits, - }); - }; - - // We add this at the end so that its instruction index marks the end range - // of the top level declaration. addFunc already unstacked fn_gz and ret_gz. - _ = try decl_gz.addBreak(.break_inline, decl_inst, func_inst); - - try setDeclaration( - decl_inst, - std.zig.hashSrc(tree.getNodeSource(decl_node)), - .{ .named = fn_name_token }, - decl_gz.decl_line - gz.decl_line, - is_pub, - is_export, - doc_comment_index, - &decl_gz, - // align, linksection, and addrspace are passed in the func instruction in this case. - // TODO: move them from the function instruction to the declaration instruction? - null, - ); -} - -fn globalVarDecl( - astgen: *AstGen, - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - node: Ast.Node.Index, - var_decl: Ast.full.VarDecl, -) InnerError!void { - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var; - // We do this at the beginning so that the instruction index marks the range start - // of the top level declaration. - const decl_inst = try gz.makeBlockInst(.declaration, node); - - const name_token = var_decl.ast.mut_token + 1; - astgen.advanceSourceCursorToNode(node); - - var block_scope: GenZir = .{ - .parent = scope, - .decl_node_index = node, - .decl_line = astgen.source_line, - .astgen = astgen, - .is_comptime = true, - .anon_name_strategy = .parent, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer block_scope.unstack(); - - const is_pub = var_decl.visib_token != null; - const is_export = blk: { - const maybe_export_token = var_decl.extern_export_token orelse break :blk false; - break :blk token_tags[maybe_export_token] == .keyword_export; - }; - const is_extern = blk: { - const maybe_extern_token = var_decl.extern_export_token orelse break :blk false; - break :blk token_tags[maybe_extern_token] == .keyword_extern; - }; - wip_members.nextDecl(decl_inst); - - const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: { - if (!is_mutable) { - return astgen.failTok(tok, "threadlocal variable cannot be constant", .{}); - } - break :blk true; - } else false; - - const lib_name = if (var_decl.lib_name) |lib_name_token| blk: { - const lib_name_str = try astgen.strLitAsString(lib_name_token); - const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; - if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { - return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); - } else if (lib_name_str.len == 0) { - return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); - } - break :blk lib_name_str.index; - } else .empty; - - const doc_comment_index = try astgen.docCommentAsString(var_decl.firstToken()); - - assert(var_decl.comptime_token == null); // handled by parser - - const var_inst: Zir.Inst.Ref = if (var_decl.ast.init_node != 0) vi: { - if (is_extern) { - return astgen.failNode( - var_decl.ast.init_node, - "extern variables have no initializers", - .{}, - ); - } - - const type_inst: Zir.Inst.Ref = if (var_decl.ast.type_node != 0) - try expr( - &block_scope, - &block_scope.base, - coerced_type_ri, - var_decl.ast.type_node, - ) - else - .none; - - const init_inst = try expr( - &block_scope, - &block_scope.base, - if (type_inst != .none) .{ .rl = .{ .ty = type_inst } } else .{ .rl = .none }, - var_decl.ast.init_node, - ); - - if (is_mutable) { - const var_inst = try block_scope.addVar(.{ - .var_type = type_inst, - .lib_name = .empty, - .align_inst = .none, // passed via the decls data - .init = init_inst, - .is_extern = false, - .is_const = !is_mutable, - .is_threadlocal = is_threadlocal, - }); - break :vi var_inst; - } else { - break :vi init_inst; - } - } else if (!is_extern) { - return astgen.failNode(node, "variables must be initialized", .{}); - } else if (var_decl.ast.type_node != 0) vi: { - // Extern variable which has an explicit type. - const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node); - - const var_inst = try block_scope.addVar(.{ - .var_type = type_inst, - .lib_name = lib_name, - .align_inst = .none, // passed via the decls data - .init = .none, - .is_extern = true, - .is_const = !is_mutable, - .is_threadlocal = is_threadlocal, - }); - break :vi var_inst; - } else { - return astgen.failNode(node, "unable to infer variable type", .{}); - }; - - // We do this at the end so that the instruction index marks the end - // range of a top level declaration. - _ = try block_scope.addBreakWithSrcNode(.break_inline, decl_inst, var_inst, node); - - var align_gz = block_scope.makeSubBlock(scope); - if (var_decl.ast.align_node != 0) { - const align_inst = try expr(&align_gz, &align_gz.base, coerced_align_ri, var_decl.ast.align_node); - _ = try align_gz.addBreakWithSrcNode(.break_inline, decl_inst, align_inst, node); - } - - var linksection_gz = align_gz.makeSubBlock(scope); - if (var_decl.ast.section_node != 0) { - const linksection_inst = try expr(&linksection_gz, &linksection_gz.base, coerced_linksection_ri, var_decl.ast.section_node); - _ = try linksection_gz.addBreakWithSrcNode(.break_inline, decl_inst, linksection_inst, node); - } - - var addrspace_gz = linksection_gz.makeSubBlock(scope); - if (var_decl.ast.addrspace_node != 0) { - const addrspace_inst = try expr(&addrspace_gz, &addrspace_gz.base, coerced_addrspace_ri, var_decl.ast.addrspace_node); - _ = try addrspace_gz.addBreakWithSrcNode(.break_inline, decl_inst, addrspace_inst, node); - } - - try setDeclaration( - decl_inst, - std.zig.hashSrc(tree.getNodeSource(node)), - .{ .named = name_token }, - block_scope.decl_line - gz.decl_line, - is_pub, - is_export, - doc_comment_index, - &block_scope, - .{ - .align_gz = &align_gz, - .linksection_gz = &linksection_gz, - .addrspace_gz = &addrspace_gz, - }, - ); -} - -fn comptimeDecl( - astgen: *AstGen, - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - node: Ast.Node.Index, -) InnerError!void { - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const body_node = node_datas[node].lhs; - - // Up top so the ZIR instruction index marks the start range of this - // top-level declaration. - const decl_inst = try gz.makeBlockInst(.declaration, node); - wip_members.nextDecl(decl_inst); - astgen.advanceSourceCursorToNode(node); - - var decl_block: GenZir = .{ - .is_comptime = true, - .decl_node_index = node, - .decl_line = astgen.source_line, - .parent = scope, - .astgen = astgen, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer decl_block.unstack(); - - const block_result = try expr(&decl_block, &decl_block.base, .{ .rl = .none }, body_node); - if (decl_block.isEmpty() or !decl_block.refIsNoReturn(block_result)) { - _ = try decl_block.addBreak(.break_inline, decl_inst, .void_value); - } - - try setDeclaration( - decl_inst, - std.zig.hashSrc(tree.getNodeSource(node)), - .@"comptime", - decl_block.decl_line - gz.decl_line, - false, - false, - .empty, - &decl_block, - null, - ); -} - -fn usingnamespaceDecl( - astgen: *AstGen, - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - node: Ast.Node.Index, -) InnerError!void { - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const type_expr = node_datas[node].lhs; - const is_pub = blk: { - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - const main_token = main_tokens[node]; - break :blk (main_token > 0 and token_tags[main_token - 1] == .keyword_pub); - }; - // Up top so the ZIR instruction index marks the start range of this - // top-level declaration. - const decl_inst = try gz.makeBlockInst(.declaration, node); - wip_members.nextDecl(decl_inst); - astgen.advanceSourceCursorToNode(node); - - var decl_block: GenZir = .{ - .is_comptime = true, - .decl_node_index = node, - .decl_line = astgen.source_line, - .parent = scope, - .astgen = astgen, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer decl_block.unstack(); - - const namespace_inst = try typeExpr(&decl_block, &decl_block.base, type_expr); - _ = try decl_block.addBreak(.break_inline, decl_inst, namespace_inst); - - try setDeclaration( - decl_inst, - std.zig.hashSrc(tree.getNodeSource(node)), - .@"usingnamespace", - decl_block.decl_line - gz.decl_line, - is_pub, - false, - .empty, - &decl_block, - null, - ); -} - -fn testDecl( - astgen: *AstGen, - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - node: Ast.Node.Index, -) InnerError!void { - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const body_node = node_datas[node].rhs; - - // Up top so the ZIR instruction index marks the start range of this - // top-level declaration. - const decl_inst = try gz.makeBlockInst(.declaration, node); - - wip_members.nextDecl(decl_inst); - astgen.advanceSourceCursorToNode(node); - - var decl_block: GenZir = .{ - .is_comptime = true, - .decl_node_index = node, - .decl_line = astgen.source_line, - .parent = scope, - .astgen = astgen, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer decl_block.unstack(); - - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - const test_token = main_tokens[node]; - const test_name_token = test_token + 1; - const test_name: DeclarationName = switch (token_tags[test_name_token]) { - else => .unnamed_test, - .string_literal => .{ .named_test = test_name_token }, - .identifier => blk: { - const ident_name_raw = tree.tokenSlice(test_name_token); - - if (mem.eql(u8, ident_name_raw, "_")) return astgen.failTok(test_name_token, "'_' used as an identifier without @\"_\" syntax", .{}); - - // if not @"" syntax, just use raw token slice - if (ident_name_raw[0] != '@') { - if (isPrimitive(ident_name_raw)) return astgen.failTok(test_name_token, "cannot test a primitive", .{}); - } - - // Local variables, including function parameters. - const name_str_index = try astgen.identAsString(test_name_token); - var s = scope; - var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already - var num_namespaces_out: u32 = 0; - var capturing_namespace: ?*Scope.Namespace = null; - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - if (local_val.name == name_str_index) { - local_val.used = test_name_token; - return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ - @tagName(local_val.id_cat), - }, &[_]u32{ - try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{ - @tagName(local_val.id_cat), - }), - }); - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == name_str_index) { - local_ptr.used = test_name_token; - return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ - @tagName(local_ptr.id_cat), - }, &[_]u32{ - try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{ - @tagName(local_ptr.id_cat), - }), - }); - } - s = local_ptr.parent; - }, - .gen_zir => s = s.cast(GenZir).?.parent, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => { - const ns = s.cast(Scope.Namespace).?; - if (ns.decls.get(name_str_index)) |i| { - if (found_already) |f| { - return astgen.failTokNotes(test_name_token, "ambiguous reference", .{}, &.{ - try astgen.errNoteNode(f, "declared here", .{}), - try astgen.errNoteNode(i, "also declared here", .{}), - }); - } - // We found a match but must continue looking for ambiguous references to decls. - found_already = i; - } - num_namespaces_out += 1; - capturing_namespace = ns; - s = ns.parent; - }, - .top => break, - }; - if (found_already == null) { - const ident_name = try astgen.identifierTokenString(test_name_token); - return astgen.failTok(test_name_token, "use of undeclared identifier '{s}'", .{ident_name}); - } - - break :blk .{ .decltest = name_str_index }; - }, - }; - - var fn_block: GenZir = .{ - .is_comptime = false, - .decl_node_index = node, - .decl_line = decl_block.decl_line, - .parent = &decl_block.base, - .astgen = astgen, - .instructions = decl_block.instructions, - .instructions_top = decl_block.instructions.items.len, - }; - defer fn_block.unstack(); - - const prev_fn_block = astgen.fn_block; - const prev_fn_ret_ty = astgen.fn_ret_ty; - astgen.fn_block = &fn_block; - astgen.fn_ret_ty = .anyerror_void_error_union_type; - defer { - astgen.fn_block = prev_fn_block; - astgen.fn_ret_ty = prev_fn_ret_ty; - } - - astgen.advanceSourceCursorToNode(body_node); - const lbrace_line = astgen.source_line - decl_block.decl_line; - const lbrace_column = astgen.source_column; - - const block_result = try expr(&fn_block, &fn_block.base, .{ .rl = .none }, body_node); - if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) { - - // As our last action before the return, "pop" the error trace if needed - _ = try fn_block.addRestoreErrRetIndex(.ret, .always, node); - - // Add implicit return at end of function. - _ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); - } - - const func_inst = try decl_block.addFunc(.{ - .src_node = node, - - .cc_ref = .none, - .cc_gz = null, - .align_ref = .none, - .align_gz = null, - .ret_ref = .anyerror_void_error_union_type, - .ret_gz = null, - .section_ref = .none, - .section_gz = null, - .addrspace_ref = .none, - .addrspace_gz = null, - - .lbrace_line = lbrace_line, - .lbrace_column = lbrace_column, - .param_block = decl_inst, - .body_gz = &fn_block, - .lib_name = .empty, - .is_var_args = false, - .is_inferred_error = false, - .is_test = true, - .is_extern = false, - .is_noinline = false, - .noalias_bits = 0, - }); - - _ = try decl_block.addBreak(.break_inline, decl_inst, func_inst); - - try setDeclaration( - decl_inst, - std.zig.hashSrc(tree.getNodeSource(node)), - test_name, - decl_block.decl_line - gz.decl_line, - false, - false, - .empty, - &decl_block, - null, - ); -} - -fn structDeclInner( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - container_decl: Ast.full.ContainerDecl, - layout: std.builtin.Type.ContainerLayout, - backing_int_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const decl_inst = try gz.reserveInstructionIndex(); - - if (container_decl.ast.members.len == 0 and backing_int_node == 0) { - try gz.setStruct(decl_inst, .{ - .src_node = node, - .layout = layout, - .fields_len = 0, - .decls_len = 0, - .backing_int_ref = .none, - .backing_int_body_len = 0, - .known_non_opv = false, - .known_comptime_only = false, - .is_tuple = false, - .any_comptime_fields = false, - .any_default_inits = false, - .any_aligned_fields = false, - .fields_hash = std.zig.hashSrc(@tagName(layout)), - }); - return decl_inst.toRef(); - } - - const astgen = gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - - var namespace: Scope.Namespace = .{ - .parent = scope, - .node = node, - .inst = decl_inst, - .declaring_gz = gz, - }; - defer namespace.deinit(gpa); - - // The struct_decl instruction introduces a scope in which the decls of the struct - // are in scope, so that field types, alignments, and default value expressions - // can refer to decls within the struct itself. - astgen.advanceSourceCursorToNode(node); - var block_scope: GenZir = .{ - .parent = &namespace.base, - .decl_node_index = node, - .decl_line = gz.decl_line, - .astgen = astgen, - .is_comptime = true, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer block_scope.unstack(); - - const scratch_top = astgen.scratch.items.len; - defer astgen.scratch.items.len = scratch_top; - - var backing_int_body_len: usize = 0; - const backing_int_ref: Zir.Inst.Ref = blk: { - if (backing_int_node != 0) { - if (layout != .Packed) { - return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{}); - } else { - const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node); - if (!block_scope.isEmpty()) { - if (!block_scope.endsWithNoReturn()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref); - } - - const body = block_scope.instructionsSlice(); - const old_scratch_len = astgen.scratch.items.len; - try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); - appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - backing_int_body_len = astgen.scratch.items.len - old_scratch_len; - block_scope.instructions.items.len = block_scope.instructions_top; - } - break :blk backing_int_ref; - } - } else { - break :blk .none; - } - }; - - const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); - const field_count: u32 = @intCast(container_decl.ast.members.len - decl_count); - - const bits_per_field = 4; - const max_field_size = 5; - var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); - defer wip_members.deinit(); - - // We will use the scratch buffer, starting here, for the bodies: - // bodies: { // for every fields_len - // field_type_body_inst: Inst, // for each field_type_body_len - // align_body_inst: Inst, // for each align_body_len - // init_body_inst: Inst, // for each init_body_len - // } - // Note that the scratch buffer is simultaneously being used by WipMembers, however - // it will not access any elements beyond this point in the ArrayList. It also - // accesses via the ArrayList items field so it can handle the scratch buffer being - // reallocated. - // No defer needed here because it is handled by `wip_members.deinit()` above. - const bodies_start = astgen.scratch.items.len; - - const node_tags = tree.nodes.items(.tag); - const is_tuple = for (container_decl.ast.members) |member_node| { - const container_field = tree.fullContainerField(member_node) orelse continue; - if (container_field.ast.tuple_like) break true; - } else false; - - if (is_tuple) switch (layout) { - .Auto => {}, - .Extern => return astgen.failNode(node, "extern tuples are not supported", .{}), - .Packed => return astgen.failNode(node, "packed tuples are not supported", .{}), - }; - - if (is_tuple) for (container_decl.ast.members) |member_node| { - switch (node_tags[member_node]) { - .container_field_init, - .container_field_align, - .container_field, - .@"comptime", - .test_decl, - => continue, - else => { - const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) { - .container_field_init, - .container_field_align, - .container_field, - => break maybe_tuple, - else => {}, - } else unreachable; - return astgen.failNodeNotes( - member_node, - "tuple declarations cannot contain declarations", - .{}, - &[_]u32{ - try astgen.errNoteNode(tuple_member, "tuple field here", .{}), - }, - ); - }, - } - }; - - var fields_hasher = std.zig.SrcHasher.init(.{}); - fields_hasher.update(@tagName(layout)); - if (backing_int_node != 0) { - fields_hasher.update(tree.getNodeSource(backing_int_node)); - } - - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); - - var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); - try duplicate_names.ensureTotalCapacity(field_count); - - // When there aren't errors, use this to avoid a second iteration. - var any_duplicate = false; - - var known_non_opv = false; - var known_comptime_only = false; - var any_comptime_fields = false; - var any_aligned_fields = false; - var any_default_inits = false; - for (container_decl.ast.members) |member_node| { - var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { - .decl => continue, - .field => |field| field, - }; - - fields_hasher.update(tree.getNodeSource(member_node)); - - if (!is_tuple) { - const field_name = try astgen.identAsString(member.ast.main_token); - - member.convertToNonTupleLike(astgen.tree.nodes); - assert(!member.ast.tuple_like); - - wip_members.appendToField(@intFromEnum(field_name)); - - const gop = try duplicate_names.getOrPut(field_name); - - if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - any_duplicate = true; - } else { - gop.value_ptr.* = .{}; - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - } - } else if (!member.ast.tuple_like) { - return astgen.failTok(member.ast.main_token, "tuple field has a name", .{}); - } - - const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); - wip_members.appendToField(@intFromEnum(doc_comment_index)); - - if (member.ast.type_expr == 0) { - return astgen.failTok(member.ast.main_token, "struct field missing type", .{}); - } - - const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); - const have_type_body = !block_scope.isEmpty(); - const have_align = member.ast.align_expr != 0; - const have_value = member.ast.value_expr != 0; - const is_comptime = member.comptime_token != null; - - if (is_comptime) { - switch (layout) { - .Packed => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}), - .Extern => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}), - .Auto => any_comptime_fields = true, - } - } else { - known_non_opv = known_non_opv or - nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr); - known_comptime_only = known_comptime_only or - nodeImpliesComptimeOnly(tree, member.ast.type_expr); - } - wip_members.nextField(bits_per_field, .{ have_align, have_value, is_comptime, have_type_body }); - - if (have_type_body) { - if (!block_scope.endsWithNoReturn()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, field_type); - } - const body = block_scope.instructionsSlice(); - const old_scratch_len = astgen.scratch.items.len; - try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); - appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); - block_scope.instructions.items.len = block_scope.instructions_top; - } else { - wip_members.appendToField(@intFromEnum(field_type)); - } - - if (have_align) { - if (layout == .Packed) { - try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{}); - } - any_aligned_fields = true; - const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr); - if (!block_scope.endsWithNoReturn()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, align_ref); - } - const body = block_scope.instructionsSlice(); - const old_scratch_len = astgen.scratch.items.len; - try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); - appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); - block_scope.instructions.items.len = block_scope.instructions_top; - } - - if (have_value) { - any_default_inits = true; - - // The decl_inst is used as here so that we can easily reconstruct a mapping - // between it and the field type when the fields inits are analzyed. - const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } }; - - const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); - if (!block_scope.endsWithNoReturn()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, default_inst); - } - const body = block_scope.instructionsSlice(); - const old_scratch_len = astgen.scratch.items.len; - try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); - appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - wip_members.appendToField(@intCast(astgen.scratch.items.len - old_scratch_len)); - block_scope.instructions.items.len = block_scope.instructions_top; - } else if (member.comptime_token) |comptime_token| { - return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); - } - } - - if (any_duplicate) { - var it = duplicate_names.iterator(); - - while (it.next()) |entry| { - const record = entry.value_ptr.*; - if (record.items.len > 1) { - var error_notes = std.ArrayList(u32).init(astgen.arena); - - for (record.items[1..]) |duplicate| { - try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); - } - - try error_notes.append(try astgen.errNoteNode(node, "struct declared here", .{})); - - try astgen.appendErrorTokNotes( - record.items[0], - "duplicate struct field name", - .{}, - error_notes.items, - ); - } - } - - return error.AnalysisFail; - } - - var fields_hash: std.zig.SrcHash = undefined; - fields_hasher.final(&fields_hash); - - try gz.setStruct(decl_inst, .{ - .src_node = node, - .layout = layout, - .fields_len = field_count, - .decls_len = decl_count, - .backing_int_ref = backing_int_ref, - .backing_int_body_len = @intCast(backing_int_body_len), - .known_non_opv = known_non_opv, - .known_comptime_only = known_comptime_only, - .is_tuple = is_tuple, - .any_comptime_fields = any_comptime_fields, - .any_default_inits = any_default_inits, - .any_aligned_fields = any_aligned_fields, - .fields_hash = fields_hash, - }); - - wip_members.finishBits(bits_per_field); - const decls_slice = wip_members.declsSlice(); - const fields_slice = wip_members.fieldsSlice(); - const bodies_slice = astgen.scratch.items[bodies_start..]; - try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + - decls_slice.len + fields_slice.len + bodies_slice.len); - astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]); - astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.extra.appendSliceAssumeCapacity(fields_slice); - astgen.extra.appendSliceAssumeCapacity(bodies_slice); - - block_scope.unstack(); - try gz.addNamespaceCaptures(&namespace); - return decl_inst.toRef(); -} - -fn unionDeclInner( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - members: []const Ast.Node.Index, - layout: std.builtin.Type.ContainerLayout, - arg_node: Ast.Node.Index, - auto_enum_tok: ?Ast.TokenIndex, -) InnerError!Zir.Inst.Ref { - const decl_inst = try gz.reserveInstructionIndex(); - - const astgen = gz.astgen; - const gpa = astgen.gpa; - - var namespace: Scope.Namespace = .{ - .parent = scope, - .node = node, - .inst = decl_inst, - .declaring_gz = gz, - }; - defer namespace.deinit(gpa); - - // The union_decl instruction introduces a scope in which the decls of the union - // are in scope, so that field types, alignments, and default value expressions - // can refer to decls within the union itself. - astgen.advanceSourceCursorToNode(node); - var block_scope: GenZir = .{ - .parent = &namespace.base, - .decl_node_index = node, - .decl_line = gz.decl_line, - .astgen = astgen, - .is_comptime = true, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer block_scope.unstack(); - - const decl_count = try astgen.scanDecls(&namespace, members); - const field_count: u32 = @intCast(members.len - decl_count); - - if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) { - const layout_str = if (layout == .Extern) "extern" else "packed"; - if (arg_node != 0) { - return astgen.failNode(arg_node, "{s} union does not support enum tag type", .{layout_str}); - } else { - return astgen.failTok(auto_enum_tok.?, "{s} union does not support enum tag type", .{layout_str}); - } - } - - const arg_inst: Zir.Inst.Ref = if (arg_node != 0) - try typeExpr(&block_scope, &namespace.base, arg_node) - else - .none; - - const bits_per_field = 4; - const max_field_size = 5; - var any_aligned_fields = false; - var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); - defer wip_members.deinit(); - - var fields_hasher = std.zig.SrcHasher.init(.{}); - fields_hasher.update(@tagName(layout)); - fields_hasher.update(&.{@intFromBool(auto_enum_tok != null)}); - if (arg_node != 0) { - fields_hasher.update(astgen.tree.getNodeSource(arg_node)); - } - - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); - - var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); - try duplicate_names.ensureTotalCapacity(field_count); - - // When there aren't errors, use this to avoid a second iteration. - var any_duplicate = false; - - for (members) |member_node| { - var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { - .decl => continue, - .field => |field| field, - }; - fields_hasher.update(astgen.tree.getNodeSource(member_node)); - member.convertToNonTupleLike(astgen.tree.nodes); - if (member.ast.tuple_like) { - return astgen.failTok(member.ast.main_token, "union field missing name", .{}); - } - if (member.comptime_token) |comptime_token| { - return astgen.failTok(comptime_token, "union fields cannot be marked comptime", .{}); - } - - const field_name = try astgen.identAsString(member.ast.main_token); - wip_members.appendToField(@intFromEnum(field_name)); - - const gop = try duplicate_names.getOrPut(field_name); - - if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - any_duplicate = true; - } else { - gop.value_ptr.* = .{}; - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - } - - const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); - wip_members.appendToField(@intFromEnum(doc_comment_index)); - - const have_type = member.ast.type_expr != 0; - const have_align = member.ast.align_expr != 0; - const have_value = member.ast.value_expr != 0; - const unused = false; - wip_members.nextField(bits_per_field, .{ have_type, have_align, have_value, unused }); - - if (have_type) { - const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); - wip_members.appendToField(@intFromEnum(field_type)); - } else if (arg_inst == .none and auto_enum_tok == null) { - return astgen.failNode(member_node, "union field missing type", .{}); - } - if (have_align) { - const align_inst = try expr(&block_scope, &block_scope.base, coerced_align_ri, member.ast.align_expr); - wip_members.appendToField(@intFromEnum(align_inst)); - any_aligned_fields = true; - } - if (have_value) { - if (arg_inst == .none) { - return astgen.failNodeNotes( - node, - "explicitly valued tagged union missing integer tag type", - .{}, - &[_]u32{ - try astgen.errNoteNode( - member.ast.value_expr, - "tag value specified here", - .{}, - ), - }, - ); - } - if (auto_enum_tok == null) { - return astgen.failNodeNotes( - node, - "explicitly valued tagged union requires inferred enum tag type", - .{}, - &[_]u32{ - try astgen.errNoteNode( - member.ast.value_expr, - "tag value specified here", - .{}, - ), - }, - ); - } - const tag_value = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); - wip_members.appendToField(@intFromEnum(tag_value)); - } - } - - if (any_duplicate) { - var it = duplicate_names.iterator(); - - while (it.next()) |entry| { - const record = entry.value_ptr.*; - if (record.items.len > 1) { - var error_notes = std.ArrayList(u32).init(astgen.arena); - - for (record.items[1..]) |duplicate| { - try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); - } - - try error_notes.append(try astgen.errNoteNode(node, "union declared here", .{})); - - try astgen.appendErrorTokNotes( - record.items[0], - "duplicate union field name", - .{}, - error_notes.items, - ); - } - } - - return error.AnalysisFail; - } - - var fields_hash: std.zig.SrcHash = undefined; - fields_hasher.final(&fields_hash); - - if (!block_scope.isEmpty()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); - } - - const body = block_scope.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - - try gz.setUnion(decl_inst, .{ - .src_node = node, - .layout = layout, - .tag_type = arg_inst, - .body_len = body_len, - .fields_len = field_count, - .decls_len = decl_count, - .auto_enum_tag = auto_enum_tok != null, - .any_aligned_fields = any_aligned_fields, - .fields_hash = fields_hash, - }); - - wip_members.finishBits(bits_per_field); - const decls_slice = wip_members.declsSlice(); - const fields_slice = wip_members.fieldsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); - astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(fields_slice); - - block_scope.unstack(); - try gz.addNamespaceCaptures(&namespace); - return decl_inst.toRef(); -} - -fn containerDecl( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - container_decl: Ast.full.ContainerDecl, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - const prev_fn_block = astgen.fn_block; - astgen.fn_block = null; - defer astgen.fn_block = prev_fn_block; - - // We must not create any types until Sema. Here the goal is only to generate - // ZIR for all the field types, alignments, and default value expressions. - - switch (token_tags[container_decl.ast.main_token]) { - .keyword_struct => { - const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { - .keyword_packed => std.builtin.Type.ContainerLayout.Packed, - .keyword_extern => std.builtin.Type.ContainerLayout.Extern, - else => unreachable, - } else std.builtin.Type.ContainerLayout.Auto; - - const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg); - return rvalue(gz, ri, result, node); - }, - .keyword_union => { - const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { - .keyword_packed => std.builtin.Type.ContainerLayout.Packed, - .keyword_extern => std.builtin.Type.ContainerLayout.Extern, - else => unreachable, - } else std.builtin.Type.ContainerLayout.Auto; - - const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, container_decl.ast.enum_token); - return rvalue(gz, ri, result, node); - }, - .keyword_enum => { - if (container_decl.layout_token) |t| { - return astgen.failTok(t, "enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type", .{}); - } - // Count total fields as well as how many have explicitly provided tag values. - const counts = blk: { - var values: usize = 0; - var total_fields: usize = 0; - var decls: usize = 0; - var nonexhaustive_node: Ast.Node.Index = 0; - var nonfinal_nonexhaustive = false; - for (container_decl.ast.members) |member_node| { - var member = tree.fullContainerField(member_node) orelse { - decls += 1; - continue; - }; - member.convertToNonTupleLike(astgen.tree.nodes); - if (member.ast.tuple_like) { - return astgen.failTok(member.ast.main_token, "enum field missing name", .{}); - } - if (member.comptime_token) |comptime_token| { - return astgen.failTok(comptime_token, "enum fields cannot be marked comptime", .{}); - } - if (member.ast.type_expr != 0) { - return astgen.failNodeNotes( - member.ast.type_expr, - "enum fields do not have types", - .{}, - &[_]u32{ - try astgen.errNoteNode( - node, - "consider 'union(enum)' here to make it a tagged union", - .{}, - ), - }, - ); - } - if (member.ast.align_expr != 0) { - return astgen.failNode(member.ast.align_expr, "enum fields cannot be aligned", .{}); - } - - const name_token = member.ast.main_token; - if (mem.eql(u8, tree.tokenSlice(name_token), "_")) { - if (nonexhaustive_node != 0) { - return astgen.failNodeNotes( - member_node, - "redundant non-exhaustive enum mark", - .{}, - &[_]u32{ - try astgen.errNoteNode( - nonexhaustive_node, - "other mark here", - .{}, - ), - }, - ); - } - nonexhaustive_node = member_node; - if (member.ast.value_expr != 0) { - return astgen.failNode(member.ast.value_expr, "'_' is used to mark an enum as non-exhaustive and cannot be assigned a value", .{}); - } - continue; - } else if (nonexhaustive_node != 0) { - nonfinal_nonexhaustive = true; - } - total_fields += 1; - if (member.ast.value_expr != 0) { - if (container_decl.ast.arg == 0) { - return astgen.failNode(member.ast.value_expr, "value assigned to enum tag with inferred tag type", .{}); - } - values += 1; - } - } - if (nonfinal_nonexhaustive) { - return astgen.failNode(nonexhaustive_node, "'_' field of non-exhaustive enum must be last", .{}); - } - break :blk .{ - .total_fields = total_fields, - .values = values, - .decls = decls, - .nonexhaustive_node = nonexhaustive_node, - }; - }; - if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) { - try astgen.appendErrorNodeNotes( - node, - "non-exhaustive enum missing integer tag type", - .{}, - &[_]u32{ - try astgen.errNoteNode( - counts.nonexhaustive_node, - "marked non-exhaustive here", - .{}, - ), - }, - ); - } - // In this case we must generate ZIR code for the tag values, similar to - // how structs are handled above. - const nonexhaustive = counts.nonexhaustive_node != 0; - - const decl_inst = try gz.reserveInstructionIndex(); - - var namespace: Scope.Namespace = .{ - .parent = scope, - .node = node, - .inst = decl_inst, - .declaring_gz = gz, - }; - defer namespace.deinit(gpa); - - // The enum_decl instruction introduces a scope in which the decls of the enum - // are in scope, so that tag values can refer to decls within the enum itself. - astgen.advanceSourceCursorToNode(node); - var block_scope: GenZir = .{ - .parent = &namespace.base, - .decl_node_index = node, - .decl_line = gz.decl_line, - .astgen = astgen, - .is_comptime = true, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer block_scope.unstack(); - - _ = try astgen.scanDecls(&namespace, container_decl.ast.members); - namespace.base.tag = .enum_namespace; - - const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) - try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg) - else - .none; - - const bits_per_field = 1; - const max_field_size = 3; - var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(counts.decls), @intCast(counts.total_fields), bits_per_field, max_field_size); - defer wip_members.deinit(); - - var fields_hasher = std.zig.SrcHasher.init(.{}); - if (container_decl.ast.arg != 0) { - fields_hasher.update(tree.getNodeSource(container_decl.ast.arg)); - } - fields_hasher.update(&.{@intFromBool(nonexhaustive)}); - - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); - - var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); - try duplicate_names.ensureTotalCapacity(counts.total_fields); - - // When there aren't errors, use this to avoid a second iteration. - var any_duplicate = false; - - for (container_decl.ast.members) |member_node| { - if (member_node == counts.nonexhaustive_node) - continue; - fields_hasher.update(tree.getNodeSource(member_node)); - namespace.base.tag = .namespace; - var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { - .decl => continue, - .field => |field| field, - }; - member.convertToNonTupleLike(astgen.tree.nodes); - assert(member.comptime_token == null); - assert(member.ast.type_expr == 0); - assert(member.ast.align_expr == 0); - - const field_name = try astgen.identAsString(member.ast.main_token); - wip_members.appendToField(@intFromEnum(field_name)); - - const gop = try duplicate_names.getOrPut(field_name); - - if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - any_duplicate = true; - } else { - gop.value_ptr.* = .{}; - try gop.value_ptr.append(sfba_allocator, member.ast.main_token); - } - - const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); - wip_members.appendToField(@intFromEnum(doc_comment_index)); - - const have_value = member.ast.value_expr != 0; - wip_members.nextField(bits_per_field, .{have_value}); - - if (have_value) { - if (arg_inst == .none) { - return astgen.failNodeNotes( - node, - "explicitly valued enum missing integer tag type", - .{}, - &[_]u32{ - try astgen.errNoteNode( - member.ast.value_expr, - "tag value specified here", - .{}, - ), - }, - ); - } - namespace.base.tag = .enum_namespace; - const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); - wip_members.appendToField(@intFromEnum(tag_value_inst)); - } - } - - if (any_duplicate) { - var it = duplicate_names.iterator(); - - while (it.next()) |entry| { - const record = entry.value_ptr.*; - if (record.items.len > 1) { - var error_notes = std.ArrayList(u32).init(astgen.arena); - - for (record.items[1..]) |duplicate| { - try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate field here", .{})); - } - - try error_notes.append(try astgen.errNoteNode(node, "enum declared here", .{})); - - try astgen.appendErrorTokNotes( - record.items[0], - "duplicate enum field name", - .{}, - error_notes.items, - ); - } - } - - return error.AnalysisFail; - } - - if (!block_scope.isEmpty()) { - _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); - } - - var fields_hash: std.zig.SrcHash = undefined; - fields_hasher.final(&fields_hash); - - const body = block_scope.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - - try gz.setEnum(decl_inst, .{ - .src_node = node, - .nonexhaustive = nonexhaustive, - .tag_type = arg_inst, - .body_len = body_len, - .fields_len = @intCast(counts.total_fields), - .decls_len = @intCast(counts.decls), - .fields_hash = fields_hash, - }); - - wip_members.finishBits(bits_per_field); - const decls_slice = wip_members.declsSlice(); - const fields_slice = wip_members.fieldsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); - astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(fields_slice); - - block_scope.unstack(); - try gz.addNamespaceCaptures(&namespace); - return rvalue(gz, ri, decl_inst.toRef(), node); - }, - .keyword_opaque => { - assert(container_decl.ast.arg == 0); - - const decl_inst = try gz.reserveInstructionIndex(); - - var namespace: Scope.Namespace = .{ - .parent = scope, - .node = node, - .inst = decl_inst, - .declaring_gz = gz, - }; - defer namespace.deinit(gpa); - - astgen.advanceSourceCursorToNode(node); - var block_scope: GenZir = .{ - .parent = &namespace.base, - .decl_node_index = node, - .decl_line = gz.decl_line, - .astgen = astgen, - .is_comptime = true, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - defer block_scope.unstack(); - - const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); - - var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, 0, 0, 0); - defer wip_members.deinit(); - - for (container_decl.ast.members) |member_node| { - const res = try containerMember(&block_scope, &namespace.base, &wip_members, member_node); - if (res == .field) { - return astgen.failNode(member_node, "opaque types cannot have fields", .{}); - } - } - - try gz.setOpaque(decl_inst, .{ - .src_node = node, - .decls_len = decl_count, - }); - - wip_members.finishBits(0); - const decls_slice = wip_members.declsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len); - astgen.extra.appendSliceAssumeCapacity(decls_slice); - - block_scope.unstack(); - try gz.addNamespaceCaptures(&namespace); - return rvalue(gz, ri, decl_inst.toRef(), node); - }, - else => unreachable, - } -} - -const ContainerMemberResult = union(enum) { decl, field: Ast.full.ContainerField }; - -fn containerMember( - gz: *GenZir, - scope: *Scope, - wip_members: *WipMembers, - member_node: Ast.Node.Index, -) InnerError!ContainerMemberResult { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - switch (node_tags[member_node]) { - .container_field_init, - .container_field_align, - .container_field, - => return ContainerMemberResult{ .field = tree.fullContainerField(member_node).? }, - - .fn_proto, - .fn_proto_multi, - .fn_proto_one, - .fn_proto_simple, - .fn_decl, - => { - var buf: [1]Ast.Node.Index = undefined; - const full = tree.fullFnProto(&buf, member_node).?; - const body = if (node_tags[member_node] == .fn_decl) node_datas[member_node].rhs else 0; - - astgen.fnDecl(gz, scope, wip_members, member_node, body, full) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, - }; - }, - - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - => { - astgen.globalVarDecl(gz, scope, wip_members, member_node, tree.fullVarDecl(member_node).?) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, - }; - }, - - .@"comptime" => { - astgen.comptimeDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, - }; - }, - .@"usingnamespace" => { - astgen.usingnamespaceDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, - }; - }, - .test_decl => { - astgen.testDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => {}, - }; - }, - else => unreachable, - } - return .decl; -} - -fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - const payload_index = try reserveExtra(astgen, @typeInfo(Zir.Inst.ErrorSetDecl).Struct.fields.len); - var fields_len: usize = 0; - { - var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}; - defer idents.deinit(gpa); - - const error_token = main_tokens[node]; - var tok_i = error_token + 2; - while (true) : (tok_i += 1) { - switch (token_tags[tok_i]) { - .doc_comment, .comma => {}, - .identifier => { - const str_index = try astgen.identAsString(tok_i); - const gop = try idents.getOrPut(gpa, str_index); - if (gop.found_existing) { - const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(str_index))); - defer gpa.free(name); - return astgen.failTokNotes( - tok_i, - "duplicate error set field '{s}'", - .{name}, - &[_]u32{ - try astgen.errNoteTok( - gop.value_ptr.*, - "previous declaration here", - .{}, - ), - }, - ); - } - gop.value_ptr.* = tok_i; - - try astgen.extra.ensureUnusedCapacity(gpa, 2); - astgen.extra.appendAssumeCapacity(@intFromEnum(str_index)); - const doc_comment_index = try astgen.docCommentAsString(tok_i); - astgen.extra.appendAssumeCapacity(@intFromEnum(doc_comment_index)); - fields_len += 1; - }, - .r_brace => break, - else => unreachable, - } - } - } - - setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{ - .fields_len = @intCast(fields_len), - }); - const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); - return rvalue(gz, ri, result, node); -} - -fn tryExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - - const fn_block = astgen.fn_block orelse { - return astgen.failNode(node, "'try' outside function scope", .{}); - }; - - if (parent_gz.any_defer_node != 0) { - return astgen.failNodeNotes(node, "'try' not allowed inside defer expression", .{}, &.{ - try astgen.errNoteNode( - parent_gz.any_defer_node, - "defer expression here", - .{}, - ), - }); - } - - // Ensure debug line/column information is emitted for this try expression. - // Then we will save the line/column so that we can emit another one that goes - // "backwards" because we want to evaluate the operand, but then put the debug - // info back at the try keyword for error return tracing. - if (!parent_gz.is_comptime) { - try emitDbgNode(parent_gz, node); - } - const try_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; - - const operand_ri: ResultInfo = switch (ri.rl) { - .ref, .ref_coerced_ty => .{ .rl = .ref, .ctx = .error_handling_expr }, - else => .{ .rl = .none, .ctx = .error_handling_expr }, - }; - // This could be a pointer or value depending on the `ri` parameter. - const operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, node); - const block_tag: Zir.Inst.Tag = if (operand_ri.rl == .ref) .try_ptr else .@"try"; - const try_inst = try parent_gz.makeBlockInst(block_tag, node); - try parent_gz.instructions.append(astgen.gpa, try_inst); - - var else_scope = parent_gz.makeSubBlock(scope); - defer else_scope.unstack(); - - const err_tag = switch (ri.rl) { - .ref, .ref_coerced_ty => Zir.Inst.Tag.err_union_code_ptr, - else => Zir.Inst.Tag.err_union_code, - }; - const err_code = try else_scope.addUnNode(err_tag, operand, node); - try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code }); - try emitDbgStmt(&else_scope, try_lc); - _ = try else_scope.addUnNode(.ret_node, err_code, node); - - try else_scope.setTryBody(try_inst, operand); - const result = try_inst.toRef(); - switch (ri.rl) { - .ref, .ref_coerced_ty => return result, - else => return rvalue(parent_gz, ri, result, node), - } -} - -fn orelseCatchExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs: Ast.Node.Index, - cond_op: Zir.Inst.Tag, - unwrap_op: Zir.Inst.Tag, - unwrap_code_op: Zir.Inst.Tag, - rhs: Ast.Node.Index, - payload_token: ?Ast.TokenIndex, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const tree = astgen.tree; - - const need_rl = astgen.nodes_need_rl.contains(node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - const do_err_trace = astgen.fn_block != null and (cond_op == .is_non_err or cond_op == .is_non_err_ptr); - - var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultInfo(block_ri); - defer block_scope.unstack(); - - const operand_ri: ResultInfo = switch (block_scope.break_result_info.rl) { - .ref, .ref_coerced_ty => .{ .rl = .ref, .ctx = if (do_err_trace) .error_handling_expr else .none }, - else => .{ .rl = .none, .ctx = if (do_err_trace) .error_handling_expr else .none }, - }; - // This could be a pointer or value depending on the `operand_ri` parameter. - // We cannot use `block_scope.break_result_info` because that has the bare - // type, whereas this expression has the optional type. Later we make - // up for this fact by calling rvalue on the else branch. - const operand = try reachableExpr(&block_scope, &block_scope.base, operand_ri, lhs, rhs); - const cond = try block_scope.addUnNode(cond_op, operand, node); - const condbr = try block_scope.addCondBr(.condbr, node); - - const block = try parent_gz.makeBlockInst(.block, node); - try block_scope.setBlockBody(block); - // block_scope unstacked now, can add new instructions to parent_gz - try parent_gz.instructions.append(astgen.gpa, block); - - var then_scope = block_scope.makeSubBlock(scope); - defer then_scope.unstack(); - - // This could be a pointer or value depending on `unwrap_op`. - const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); - const then_result = switch (ri.rl) { - .ref, .ref_coerced_ty => unwrapped_payload, - else => try rvalue(&then_scope, block_scope.break_result_info, unwrapped_payload, node), - }; - _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, node); - - var else_scope = block_scope.makeSubBlock(scope); - defer else_scope.unstack(); - - // We know that the operand (almost certainly) modified the error return trace, - // so signal to Sema that it should save the new index for restoring later. - if (do_err_trace and nodeMayAppendToErrorTrace(tree, lhs)) - _ = try else_scope.addSaveErrRetIndex(.always); - - var err_val_scope: Scope.LocalVal = undefined; - const else_sub_scope = blk: { - const payload = payload_token orelse break :blk &else_scope.base; - const err_str = tree.tokenSlice(payload); - if (mem.eql(u8, err_str, "_")) { - return astgen.failTok(payload, "discard of error capture; omit it instead", .{}); - } - const err_name = try astgen.identAsString(payload); - - try astgen.detectLocalShadowing(scope, err_name, payload, err_str, .capture); - - err_val_scope = .{ - .parent = &else_scope.base, - .gen_zir = &else_scope, - .name = err_name, - .inst = try else_scope.addUnNode(unwrap_code_op, operand, node), - .token_src = payload, - .id_cat = .capture, - }; - break :blk &err_val_scope.base; - }; - - const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs); - if (!else_scope.endsWithNoReturn()) { - // As our last action before the break, "pop" the error trace if needed - if (do_err_trace) - try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, rhs, else_result); - - _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, rhs); - } - try checkUsed(parent_gz, &else_scope.base, else_sub_scope); - - try setCondBrPayload(condbr, cond, &then_scope, &else_scope); - - if (need_result_rvalue) { - return rvalue(parent_gz, ri, block.toRef(), node); - } else { - return block.toRef(); - } -} - -/// Return whether the identifier names of two tokens are equal. Resolves @"" -/// tokens without allocating. -/// OK in theory it could do it without allocating. This implementation -/// allocates when the @"" form is used. -fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex) !bool { - const ident_name_1 = try astgen.identifierTokenString(token1); - const ident_name_2 = try astgen.identifierTokenString(token2); - return mem.eql(u8, ident_name_1, ident_name_2); -} - -fn fieldAccess( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - switch (ri.rl) { - .ref, .ref_coerced_ty => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node), - else => { - const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node); - return rvalue(gz, ri, access, node); - }, - } -} - -fn addFieldAccess( - tag: Zir.Inst.Tag, - gz: *GenZir, - scope: *Scope, - lhs_ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const node_datas = tree.nodes.items(.data); - - const object_node = node_datas[node].lhs; - const dot_token = main_tokens[node]; - const field_ident = dot_token + 1; - const str_index = try astgen.identAsString(field_ident); - const lhs = try expr(gz, scope, lhs_ri, object_node); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - try emitDbgStmt(gz, cursor); - - return gz.addPlNode(tag, node, Zir.Inst.Field{ - .lhs = lhs, - .field_name_start = str_index, - }); -} - -fn arrayAccess( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const tree = gz.astgen.tree; - const node_datas = tree.nodes.items(.data); - switch (ri.rl) { - .ref, .ref_coerced_ty => { - const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - - const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); - try emitDbgStmt(gz, cursor); - - return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); - }, - else => { - const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - - const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); - try emitDbgStmt(gz, cursor); - - return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node); - }, - } -} - -fn simpleBinOp( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - if (op_inst_tag == .cmp_neq or op_inst_tag == .cmp_eq) { - const node_tags = tree.nodes.items(.tag); - const str = if (op_inst_tag == .cmp_eq) "==" else "!="; - if (node_tags[node_datas[node].lhs] == .string_literal or - node_tags[node_datas[node].rhs] == .string_literal) - return astgen.failNode(node, "cannot compare strings with {s}", .{str}); - } - - const lhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].lhs, node); - const cursor = switch (op_inst_tag) { - .add, .sub, .mul, .div, .mod_rem => maybeAdvanceSourceCursorToMainToken(gz, node), - else => undefined, - }; - const rhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].rhs, node); - - switch (op_inst_tag) { - .add, .sub, .mul, .div, .mod_rem => { - try emitDbgStmt(gz, cursor); - }, - else => {}, - } - const result = try gz.addPlNode(op_inst_tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); - return rvalue(gz, ri, result, node); -} - -fn simpleStrTok( - gz: *GenZir, - ri: ResultInfo, - ident_token: Ast.TokenIndex, - node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const str_index = try astgen.identAsString(ident_token); - const result = try gz.addStrTok(op_inst_tag, str_index, ident_token); - return rvalue(gz, ri, result, node); -} - -fn boolBinOp( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - zir_tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const lhs = try expr(gz, scope, coerced_bool_ri, node_datas[node].lhs); - const bool_br = (try gz.addPlNodePayloadIndex(zir_tag, node, undefined)).toIndex().?; - - var rhs_scope = gz.makeSubBlock(scope); - defer rhs_scope.unstack(); - const rhs = try expr(&rhs_scope, &rhs_scope.base, coerced_bool_ri, node_datas[node].rhs); - if (!gz.refIsNoReturn(rhs)) { - _ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs); - } - try rhs_scope.setBoolBrBody(bool_br, lhs); - - const block_ref = bool_br.toRef(); - return rvalue(gz, ri, block_ref, node); -} - -fn ifExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - if_full: Ast.full.If, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - const do_err_trace = astgen.fn_block != null and if_full.error_token != null; - - const need_rl = astgen.nodes_need_rl.contains(node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - var block_scope = parent_gz.makeSubBlock(scope); - block_scope.setBreakResultInfo(block_ri); - defer block_scope.unstack(); - - const payload_is_ref = if (if_full.payload_token) |payload_token| - token_tags[payload_token] == .asterisk - else - false; - - try emitDbgNode(parent_gz, if_full.ast.cond_expr); - const cond: struct { - inst: Zir.Inst.Ref, - bool_bit: Zir.Inst.Ref, - } = c: { - if (if_full.error_token) |_| { - const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none, .ctx = .error_handling_expr }; - const err_union = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; - break :c .{ - .inst = err_union, - .bool_bit = try block_scope.addUnNode(tag, err_union, if_full.ast.cond_expr), - }; - } else if (if_full.payload_token) |_| { - const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; - const optional = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; - break :c .{ - .inst = optional, - .bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr), - }; - } else { - const cond = try expr(&block_scope, &block_scope.base, coerced_bool_ri, if_full.ast.cond_expr); - break :c .{ - .inst = cond, - .bool_bit = cond, - }; - } - }; - - const condbr = try block_scope.addCondBr(.condbr, node); - - const block = try parent_gz.makeBlockInst(.block, node); - try block_scope.setBlockBody(block); - // block_scope unstacked now, can add new instructions to parent_gz - try parent_gz.instructions.append(astgen.gpa, block); - - var then_scope = parent_gz.makeSubBlock(scope); - defer then_scope.unstack(); - - var payload_val_scope: Scope.LocalVal = undefined; - - const then_node = if_full.ast.then_expr; - const then_sub_scope = s: { - if (if_full.error_token != null) { - if (if_full.payload_token) |payload_token| { - const tag: Zir.Inst.Tag = if (payload_is_ref) - .err_union_payload_unsafe_ptr - else - .err_union_payload_unsafe; - const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); - const token_name_index = payload_token + @intFromBool(payload_is_ref); - const ident_name = try astgen.identAsString(token_name_index); - const token_name_str = tree.tokenSlice(token_name_index); - if (mem.eql(u8, "_", token_name_str)) - break :s &then_scope.base; - try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str, .capture); - payload_val_scope = .{ - .parent = &then_scope.base, - .gen_zir = &then_scope, - .name = ident_name, - .inst = payload_inst, - .token_src = token_name_index, - .id_cat = .capture, - }; - try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); - break :s &payload_val_scope.base; - } else { - _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); - break :s &then_scope.base; - } - } else if (if_full.payload_token) |payload_token| { - const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; - const tag: Zir.Inst.Tag = if (payload_is_ref) - .optional_payload_unsafe_ptr - else - .optional_payload_unsafe; - const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) - break :s &then_scope.base; - const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); - const ident_name = try astgen.identAsString(ident_token); - try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); - payload_val_scope = .{ - .parent = &then_scope.base, - .gen_zir = &then_scope, - .name = ident_name, - .inst = payload_inst, - .token_src = ident_token, - .id_cat = .capture, - }; - try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); - break :s &payload_val_scope.base; - } else { - break :s &then_scope.base; - } - }; - - const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_info, then_node); - try checkUsed(parent_gz, &then_scope.base, then_sub_scope); - if (!then_scope.endsWithNoReturn()) { - _ = try then_scope.addBreakWithSrcNode(.@"break", block, then_result, then_node); - } - - var else_scope = parent_gz.makeSubBlock(scope); - defer else_scope.unstack(); - - // We know that the operand (almost certainly) modified the error return trace, - // so signal to Sema that it should save the new index for restoring later. - if (do_err_trace and nodeMayAppendToErrorTrace(tree, if_full.ast.cond_expr)) - _ = try else_scope.addSaveErrRetIndex(.always); - - const else_node = if_full.ast.else_expr; - if (else_node != 0) { - const sub_scope = s: { - if (if_full.error_token) |error_token| { - const tag: Zir.Inst.Tag = if (payload_is_ref) - .err_union_code_ptr - else - .err_union_code; - const payload_inst = try else_scope.addUnNode(tag, cond.inst, if_full.ast.cond_expr); - const ident_name = try astgen.identAsString(error_token); - const error_token_str = tree.tokenSlice(error_token); - if (mem.eql(u8, "_", error_token_str)) - break :s &else_scope.base; - try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, error_token_str, .capture); - payload_val_scope = .{ - .parent = &else_scope.base, - .gen_zir = &else_scope, - .name = ident_name, - .inst = payload_inst, - .token_src = error_token, - .id_cat = .capture, - }; - try else_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); - break :s &payload_val_scope.base; - } else { - break :s &else_scope.base; - } - }; - const else_result = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); - if (!else_scope.endsWithNoReturn()) { - // As our last action before the break, "pop" the error trace if needed - if (do_err_trace) - try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, else_result); - _ = try else_scope.addBreakWithSrcNode(.@"break", block, else_result, else_node); - } - try checkUsed(parent_gz, &else_scope.base, sub_scope); - } else { - const result = try rvalue(&else_scope, ri, .void_value, node); - _ = try else_scope.addBreak(.@"break", block, result); - } - - try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); - - if (need_result_rvalue) { - return rvalue(parent_gz, ri, block.toRef(), node); - } else { - return block.toRef(); - } -} - -/// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. -fn setCondBrPayload( - condbr: Zir.Inst.Index, - cond: Zir.Inst.Ref, - then_scope: *GenZir, - else_scope: *GenZir, -) !void { - defer then_scope.unstack(); - defer else_scope.unstack(); - const astgen = then_scope.astgen; - const then_body = then_scope.instructionsSliceUpto(else_scope); - const else_body = else_scope.instructionsSlice(); - const then_body_len = astgen.countBodyLenAfterFixups(then_body); - const else_body_len = astgen.countBodyLenAfterFixups(else_body); - try astgen.extra.ensureUnusedCapacity( - astgen.gpa, - @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, - ); - - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(condbr)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ - .condition = cond, - .then_body_len = then_body_len, - .else_body_len = else_body_len, - }); - astgen.appendBodyWithFixups(then_body); - astgen.appendBodyWithFixups(else_body); -} - -fn whileExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - while_full: Ast.full.While, - is_statement: bool, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - - const need_rl = astgen.nodes_need_rl.contains(node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - if (while_full.label_token) |label_token| { - try astgen.checkLabelRedefinition(scope, label_token); - } - - const is_inline = while_full.inline_token != null; - if (parent_gz.is_comptime and is_inline) { - return astgen.failTok(while_full.inline_token.?, "redundant inline keyword in comptime scope", .{}); - } - const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; - const loop_block = try parent_gz.makeBlockInst(loop_tag, node); - try parent_gz.instructions.append(astgen.gpa, loop_block); - - var loop_scope = parent_gz.makeSubBlock(scope); - loop_scope.is_inline = is_inline; - loop_scope.setBreakResultInfo(block_ri); - defer loop_scope.unstack(); - - var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); - defer cond_scope.unstack(); - - const payload_is_ref = if (while_full.payload_token) |payload_token| - token_tags[payload_token] == .asterisk - else - false; - - try emitDbgNode(parent_gz, while_full.ast.cond_expr); - const cond: struct { - inst: Zir.Inst.Ref, - bool_bit: Zir.Inst.Ref, - } = c: { - if (while_full.error_token) |_| { - const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; - const err_union = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; - break :c .{ - .inst = err_union, - .bool_bit = try cond_scope.addUnNode(tag, err_union, while_full.ast.cond_expr), - }; - } else if (while_full.payload_token) |_| { - const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; - const optional = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; - break :c .{ - .inst = optional, - .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr), - }; - } else { - const cond = try expr(&cond_scope, &cond_scope.base, coerced_bool_ri, while_full.ast.cond_expr); - break :c .{ - .inst = cond, - .bool_bit = cond, - }; - } - }; - - const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; - const condbr = try cond_scope.addCondBr(condbr_tag, node); - const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; - const cond_block = try loop_scope.makeBlockInst(block_tag, node); - try cond_scope.setBlockBody(cond_block); - // cond_scope unstacked now, can add new instructions to loop_scope - try loop_scope.instructions.append(astgen.gpa, cond_block); - - // make scope now but don't stack on parent_gz until loop_scope - // gets unstacked after cont_expr is emitted and added below - var then_scope = parent_gz.makeSubBlock(&cond_scope.base); - then_scope.instructions_top = GenZir.unstacked_top; - defer then_scope.unstack(); - - var dbg_var_name: Zir.NullTerminatedString = .empty; - var dbg_var_inst: Zir.Inst.Ref = undefined; - var opt_payload_inst: Zir.Inst.OptionalIndex = .none; - var payload_val_scope: Scope.LocalVal = undefined; - const then_sub_scope = s: { - if (while_full.error_token != null) { - if (while_full.payload_token) |payload_token| { - const tag: Zir.Inst.Tag = if (payload_is_ref) - .err_union_payload_unsafe_ptr - else - .err_union_payload_unsafe; - // will add this instruction to then_scope.instructions below - const payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); - opt_payload_inst = payload_inst.toOptional(); - const ident_token = payload_token + @intFromBool(payload_is_ref); - const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) - break :s &then_scope.base; - const ident_name = try astgen.identAsString(ident_token); - try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); - payload_val_scope = .{ - .parent = &then_scope.base, - .gen_zir = &then_scope, - .name = ident_name, - .inst = payload_inst.toRef(), - .token_src = ident_token, - .id_cat = .capture, - }; - dbg_var_name = ident_name; - dbg_var_inst = payload_inst.toRef(); - break :s &payload_val_scope.base; - } else { - _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); - break :s &then_scope.base; - } - } else if (while_full.payload_token) |payload_token| { - const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; - const tag: Zir.Inst.Tag = if (payload_is_ref) - .optional_payload_unsafe_ptr - else - .optional_payload_unsafe; - // will add this instruction to then_scope.instructions below - const payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); - opt_payload_inst = payload_inst.toOptional(); - const ident_name = try astgen.identAsString(ident_token); - const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) - break :s &then_scope.base; - try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); - payload_val_scope = .{ - .parent = &then_scope.base, - .gen_zir = &then_scope, - .name = ident_name, - .inst = payload_inst.toRef(), - .token_src = ident_token, - .id_cat = .capture, - }; - dbg_var_name = ident_name; - dbg_var_inst = payload_inst.toRef(); - break :s &payload_val_scope.base; - } else { - break :s &then_scope.base; - } - }; - - var continue_scope = parent_gz.makeSubBlock(then_sub_scope); - continue_scope.instructions_top = GenZir.unstacked_top; - defer continue_scope.unstack(); - const continue_block = try then_scope.makeBlockInst(block_tag, node); - - const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; - _ = try loop_scope.addNode(repeat_tag, node); - - try loop_scope.setBlockBody(loop_block); - loop_scope.break_block = loop_block.toOptional(); - loop_scope.continue_block = continue_block.toOptional(); - if (while_full.label_token) |label_token| { - loop_scope.label = .{ - .token = label_token, - .block_inst = loop_block, - }; - } - - // done adding instructions to loop_scope, can now stack then_scope - then_scope.instructions_top = then_scope.instructions.items.len; - - const then_node = while_full.ast.then_expr; - if (opt_payload_inst.unwrap()) |payload_inst| { - try then_scope.instructions.append(astgen.gpa, payload_inst); - } - if (dbg_var_name != .empty) try then_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); - try then_scope.instructions.append(astgen.gpa, continue_block); - // This code could be improved to avoid emitting the continue expr when there - // are no jumps to it. This happens when the last statement of a while body is noreturn - // and there are no `continue` statements. - // Tracking issue: https://github.com/ziglang/zig/issues/9185 - if (while_full.ast.cont_expr != 0) { - _ = try unusedResultExpr(&then_scope, then_sub_scope, while_full.ast.cont_expr); - } - - continue_scope.instructions_top = continue_scope.instructions.items.len; - _ = try unusedResultExpr(&continue_scope, &continue_scope.base, then_node); - try checkUsed(parent_gz, &then_scope.base, then_sub_scope); - const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; - if (!continue_scope.endsWithNoReturn()) { - _ = try continue_scope.addBreak(break_tag, continue_block, .void_value); - } - try continue_scope.setBlockBody(continue_block); - _ = try then_scope.addBreak(break_tag, cond_block, .void_value); - - var else_scope = parent_gz.makeSubBlock(&cond_scope.base); - defer else_scope.unstack(); - - const else_node = while_full.ast.else_expr; - if (else_node != 0) { - const sub_scope = s: { - if (while_full.error_token) |error_token| { - const tag: Zir.Inst.Tag = if (payload_is_ref) - .err_union_code_ptr - else - .err_union_code; - const else_payload_inst = try else_scope.addUnNode(tag, cond.inst, while_full.ast.cond_expr); - const ident_name = try astgen.identAsString(error_token); - const ident_bytes = tree.tokenSlice(error_token); - if (mem.eql(u8, ident_bytes, "_")) - break :s &else_scope.base; - try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, ident_bytes, .capture); - payload_val_scope = .{ - .parent = &else_scope.base, - .gen_zir = &else_scope, - .name = ident_name, - .inst = else_payload_inst, - .token_src = error_token, - .id_cat = .capture, - }; - try else_scope.addDbgVar(.dbg_var_val, ident_name, else_payload_inst); - break :s &payload_val_scope.base; - } else { - break :s &else_scope.base; - } - }; - // Remove the continue block and break block so that `continue` and `break` - // control flow apply to outer loops; not this one. - loop_scope.continue_block = .none; - loop_scope.break_block = .none; - const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); - if (is_statement) { - _ = try addEnsureResult(&else_scope, else_result, else_node); - } - - try checkUsed(parent_gz, &else_scope.base, sub_scope); - if (!else_scope.endsWithNoReturn()) { - _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); - } - } else { - const result = try rvalue(&else_scope, ri, .void_value, node); - _ = try else_scope.addBreak(break_tag, loop_block, result); - } - - if (loop_scope.label) |some| { - if (!some.used) { - try astgen.appendErrorTok(some.token, "unused while loop label", .{}); - } - } - - try setCondBrPayload(condbr, cond.bool_bit, &then_scope, &else_scope); - - const result = if (need_result_rvalue) - try rvalue(parent_gz, ri, loop_block.toRef(), node) - else - loop_block.toRef(); - - if (is_statement) { - _ = try parent_gz.addUnNode(.ensure_result_used, result, node); - } - - return result; -} - -fn forExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - for_full: Ast.full.For, - is_statement: bool, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - - if (for_full.label_token) |label_token| { - try astgen.checkLabelRedefinition(scope, label_token); - } - - const need_rl = astgen.nodes_need_rl.contains(node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - const is_inline = for_full.inline_token != null; - if (parent_gz.is_comptime and is_inline) { - return astgen.failTok(for_full.inline_token.?, "redundant inline keyword in comptime scope", .{}); - } - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - const node_tags = tree.nodes.items(.tag); - const node_data = tree.nodes.items(.data); - const gpa = astgen.gpa; - - // For counters, this is the start value; for indexables, this is the base - // pointer that can be used with elem_ptr and similar instructions. - // Special value `none` means that this is a counter and its start value is - // zero, indicating that the main index counter can be used directly. - const indexables = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); - defer gpa.free(indexables); - // elements of this array can be `none`, indicating no length check. - const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); - defer gpa.free(lens); - - // We will use a single zero-based counter no matter how many indexables there are. - const index_ptr = blk: { - const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc; - const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node); - // initialize to zero - _ = try parent_gz.addPlNode(.store_node, node, Zir.Inst.Bin{ - .lhs = index_ptr, - .rhs = .zero_usize, - }); - break :blk index_ptr; - }; - - var any_len_checks = false; - - { - var capture_token = for_full.payload_token; - for (for_full.ast.inputs, indexables, lens) |input, *indexable_ref, *len_ref| { - const capture_is_ref = token_tags[capture_token] == .asterisk; - const ident_tok = capture_token + @intFromBool(capture_is_ref); - const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_"); - - if (is_discard and capture_is_ref) { - return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{}); - } - // Skip over the comma, and on to the next capture (or the ending pipe character). - capture_token = ident_tok + 2; - - try emitDbgNode(parent_gz, input); - if (node_tags[input] == .for_range) { - if (capture_is_ref) { - return astgen.failTok(ident_tok, "cannot capture reference to range", .{}); - } - const start_node = node_data[input].lhs; - const start_val = try expr(parent_gz, scope, .{ .rl = .{ .ty = .usize_type } }, start_node); - - const end_node = node_data[input].rhs; - const end_val = if (end_node != 0) - try expr(parent_gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_data[input].rhs) - else - .none; - - if (end_val == .none and is_discard) { - return astgen.failTok(ident_tok, "discard of unbounded counter", .{}); - } - - const start_is_zero = nodeIsTriviallyZero(tree, start_node); - const range_len = if (end_val == .none or start_is_zero) - end_val - else - try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{ - .lhs = end_val, - .rhs = start_val, - }); - - any_len_checks = any_len_checks or range_len != .none; - indexable_ref.* = if (start_is_zero) .none else start_val; - len_ref.* = range_len; - } else { - const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input); - - any_len_checks = true; - indexable_ref.* = indexable; - len_ref.* = indexable; - } - } - } - - if (!any_len_checks) { - return astgen.failNode(node, "unbounded for loop", .{}); - } - - // We use a dedicated ZIR instruction to assert the lengths to assist with - // nicer error reporting as well as fewer ZIR bytes emitted. - const len: Zir.Inst.Ref = len: { - const lens_len: u32 = @intCast(lens.len); - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len); - const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{ - .operands_len = lens_len, - }); - appendRefsAssumeCapacity(astgen, lens); - break :len len; - }; - - const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; - const loop_block = try parent_gz.makeBlockInst(loop_tag, node); - try parent_gz.instructions.append(gpa, loop_block); - - var loop_scope = parent_gz.makeSubBlock(scope); - loop_scope.is_inline = is_inline; - loop_scope.setBreakResultInfo(block_ri); - defer loop_scope.unstack(); - - // We need to finish loop_scope later once we have the deferred refs from then_scope. However, the - // load must be removed from instructions in the meantime or it appears to be part of parent_gz. - const index = try loop_scope.addUnNode(.load, index_ptr, node); - _ = loop_scope.instructions.pop(); - - var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); - defer cond_scope.unstack(); - - // Check the condition. - const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{ - .lhs = index, - .rhs = len, - }); - - const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; - const condbr = try cond_scope.addCondBr(condbr_tag, node); - const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; - const cond_block = try loop_scope.makeBlockInst(block_tag, node); - try cond_scope.setBlockBody(cond_block); - - loop_scope.break_block = loop_block.toOptional(); - loop_scope.continue_block = cond_block.toOptional(); - if (for_full.label_token) |label_token| { - loop_scope.label = .{ - .token = label_token, - .block_inst = loop_block, - }; - } - - const then_node = for_full.ast.then_expr; - var then_scope = parent_gz.makeSubBlock(&cond_scope.base); - defer then_scope.unstack(); - - const capture_scopes = try gpa.alloc(Scope.LocalVal, for_full.ast.inputs.len); - defer gpa.free(capture_scopes); - - const then_sub_scope = blk: { - var capture_token = for_full.payload_token; - var capture_sub_scope: *Scope = &then_scope.base; - for (for_full.ast.inputs, indexables, capture_scopes) |input, indexable_ref, *capture_scope| { - const capture_is_ref = token_tags[capture_token] == .asterisk; - const ident_tok = capture_token + @intFromBool(capture_is_ref); - const capture_name = tree.tokenSlice(ident_tok); - // Skip over the comma, and on to the next capture (or the ending pipe character). - capture_token = ident_tok + 2; - - if (mem.eql(u8, capture_name, "_")) continue; - - const name_str_index = try astgen.identAsString(ident_tok); - try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture); - - const capture_inst = inst: { - const is_counter = node_tags[input] == .for_range; - - if (indexable_ref == .none) { - // Special case: the main index can be used directly. - assert(is_counter); - assert(!capture_is_ref); - break :inst index; - } - - // For counters, we add the index variable to the start value; for - // indexables, we use it as an element index. This is so similar - // that they can share the same code paths, branching only on the - // ZIR tag. - const switch_cond = (@as(u2, @intFromBool(capture_is_ref)) << 1) | @intFromBool(is_counter); - const tag: Zir.Inst.Tag = switch (switch_cond) { - 0b00 => .elem_val, - 0b01 => .add, - 0b10 => .elem_ptr, - 0b11 => unreachable, // compile error emitted already - }; - break :inst try then_scope.addPlNode(tag, input, Zir.Inst.Bin{ - .lhs = indexable_ref, - .rhs = index, - }); - }; - - capture_scope.* = .{ - .parent = capture_sub_scope, - .gen_zir = &then_scope, - .name = name_str_index, - .inst = capture_inst, - .token_src = ident_tok, - .id_cat = .capture, - }; - - try then_scope.addDbgVar(.dbg_var_val, name_str_index, capture_inst); - capture_sub_scope = &capture_scope.base; - } - - break :blk capture_sub_scope; - }; - - const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, then_node); - _ = try addEnsureResult(&then_scope, then_result, then_node); - - try checkUsed(parent_gz, &then_scope.base, then_sub_scope); - - const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; - - _ = try then_scope.addBreak(break_tag, cond_block, .void_value); - - var else_scope = parent_gz.makeSubBlock(&cond_scope.base); - defer else_scope.unstack(); - - const else_node = for_full.ast.else_expr; - if (else_node != 0) { - const sub_scope = &else_scope.base; - // Remove the continue block and break block so that `continue` and `break` - // control flow apply to outer loops; not this one. - loop_scope.continue_block = .none; - loop_scope.break_block = .none; - const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); - if (is_statement) { - _ = try addEnsureResult(&else_scope, else_result, else_node); - } - if (!else_scope.endsWithNoReturn()) { - _ = try else_scope.addBreakWithSrcNode(break_tag, loop_block, else_result, else_node); - } - } else { - const result = try rvalue(&else_scope, ri, .void_value, node); - _ = try else_scope.addBreak(break_tag, loop_block, result); - } - - if (loop_scope.label) |some| { - if (!some.used) { - try astgen.appendErrorTok(some.token, "unused for loop label", .{}); - } - } - - try setCondBrPayload(condbr, cond, &then_scope, &else_scope); - - // then_block and else_block unstacked now, can resurrect loop_scope to finally finish it - { - loop_scope.instructions_top = loop_scope.instructions.items.len; - try loop_scope.instructions.appendSlice(gpa, &.{ index.toIndex().?, cond_block }); - - // Increment the index variable. - const index_plus_one = try loop_scope.addPlNode(.add_unsafe, node, Zir.Inst.Bin{ - .lhs = index, - .rhs = .one_usize, - }); - _ = try loop_scope.addPlNode(.store_node, node, Zir.Inst.Bin{ - .lhs = index_ptr, - .rhs = index_plus_one, - }); - const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; - _ = try loop_scope.addNode(repeat_tag, node); - - try loop_scope.setBlockBody(loop_block); - } - - const result = if (need_result_rvalue) - try rvalue(parent_gz, ri, loop_block.toRef(), node) - else - loop_block.toRef(); - - if (is_statement) { - _ = try parent_gz.addUnNode(.ensure_result_used, result, node); - } - return result; -} - -fn switchExprErrUnion( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - catch_or_if_node: Ast.Node.Index, - node_ty: enum { @"catch", @"if" }, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - const if_full = switch (node_ty) { - .@"catch" => undefined, - .@"if" => tree.fullIf(catch_or_if_node).?, - }; - - const switch_node, const operand_node, const error_payload = switch (node_ty) { - .@"catch" => .{ - node_datas[catch_or_if_node].rhs, - node_datas[catch_or_if_node].lhs, - main_tokens[catch_or_if_node] + 2, - }, - .@"if" => .{ - if_full.ast.else_expr, - if_full.ast.cond_expr, - if_full.error_token.?, - }, - }; - assert(node_tags[switch_node] == .@"switch" or node_tags[switch_node] == .switch_comma); - - const do_err_trace = astgen.fn_block != null; - - const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); - const case_nodes = tree.extra_data[extra.start..extra.end]; - - const need_rl = astgen.nodes_need_rl.contains(catch_or_if_node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, catch_or_if_node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - - const payload_is_ref = node_ty == .@"if" and - if_full.payload_token != null and token_tags[if_full.payload_token.?] == .asterisk; - - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - var scalar_cases_len: u32 = 0; - var multi_cases_len: u32 = 0; - var inline_cases_len: u32 = 0; - var has_else = false; - var else_node: Ast.Node.Index = 0; - var else_src: ?Ast.TokenIndex = null; - for (case_nodes) |case_node| { - const case = tree.fullSwitchCase(case_node).?; - - if (case.ast.values.len == 0) { - const case_src = case.ast.arrow_token - 1; - if (else_src) |src| { - return astgen.failTokNotes( - case_src, - "multiple else prongs in switch expression", - .{}, - &[_]u32{ - try astgen.errNoteTok( - src, - "previous else prong here", - .{}, - ), - }, - ); - } - has_else = true; - else_node = case_node; - else_src = case_src; - continue; - } else if (case.ast.values.len == 1 and - node_tags[case.ast.values[0]] == .identifier and - mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) - { - const case_src = case.ast.arrow_token - 1; - return astgen.failTokNotes( - case_src, - "'_' prong is not allowed when switching on errors", - .{}, - &[_]u32{ - try astgen.errNoteTok( - case_src, - "consider using 'else'", - .{}, - ), - }, - ); - } - - for (case.ast.values) |val| { - if (node_tags[val] == .string_literal) - return astgen.failNode(val, "cannot switch on strings", .{}); - } - - if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { - scalar_cases_len += 1; - } else { - multi_cases_len += 1; - } - if (case.inline_token != null) { - inline_cases_len += 1; - } - } - - const operand_ri: ResultInfo = .{ - .rl = if (payload_is_ref) .ref else .none, - .ctx = .error_handling_expr, - }; - - astgen.advanceSourceCursorToNode(operand_node); - const operand_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; - - const raw_operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, switch_node); - const item_ri: ResultInfo = .{ .rl = .none }; - - // This contains the data that goes into the `extra` array for the SwitchBlockErrUnion, except - // the first cases_nodes.len slots are a table that indexes payloads later in the array, - // with the non-error and else case indices coming first, then scalar_cases_len indexes, then - // multi_cases_len indexes - const payloads = &astgen.scratch; - const scratch_top = astgen.scratch.items.len; - const case_table_start = scratch_top; - const scalar_case_table = case_table_start + 1 + @intFromBool(has_else); - const multi_case_table = scalar_case_table + scalar_cases_len; - const case_table_end = multi_case_table + multi_cases_len; - - try astgen.scratch.resize(gpa, case_table_end); - defer astgen.scratch.items.len = scratch_top; - - var block_scope = parent_gz.makeSubBlock(scope); - // block_scope not used for collecting instructions - block_scope.instructions_top = GenZir.unstacked_top; - block_scope.setBreakResultInfo(block_ri); - - // Sema expects a dbg_stmt immediately before switch_block_err_union - try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); - // This gets added to the parent block later, after the item expressions. - const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node); - - // We re-use this same scope for all cases, including the special prong, if any. - var case_scope = parent_gz.makeSubBlock(&block_scope.base); - case_scope.instructions_top = GenZir.unstacked_top; - - { - const body_len_index: u32 = @intCast(payloads.items.len); - payloads.items[case_table_start] = body_len_index; - try payloads.resize(gpa, body_len_index + 1); // body_len - - case_scope.instructions_top = parent_gz.instructions.items.len; - defer case_scope.unstack(); - - const unwrap_payload_tag: Zir.Inst.Tag = if (payload_is_ref) - .err_union_payload_unsafe_ptr - else - .err_union_payload_unsafe; - - const unwrapped_payload = try case_scope.addUnNode( - unwrap_payload_tag, - raw_operand, - catch_or_if_node, - ); - - switch (node_ty) { - .@"catch" => { - const case_result = switch (ri.rl) { - .ref, .ref_coerced_ty => unwrapped_payload, - else => try rvalue( - &case_scope, - block_scope.break_result_info, - unwrapped_payload, - catch_or_if_node, - ), - }; - _ = try case_scope.addBreakWithSrcNode( - .@"break", - switch_block, - case_result, - catch_or_if_node, - ); - }, - .@"if" => { - var payload_val_scope: Scope.LocalVal = undefined; - - const then_node = if_full.ast.then_expr; - const then_sub_scope = s: { - assert(if_full.error_token != null); - if (if_full.payload_token) |payload_token| { - const token_name_index = payload_token + @intFromBool(payload_is_ref); - const ident_name = try astgen.identAsString(token_name_index); - const token_name_str = tree.tokenSlice(token_name_index); - if (mem.eql(u8, "_", token_name_str)) - break :s &case_scope.base; - try astgen.detectLocalShadowing( - &case_scope.base, - ident_name, - token_name_index, - token_name_str, - .capture, - ); - payload_val_scope = .{ - .parent = &case_scope.base, - .gen_zir = &case_scope, - .name = ident_name, - .inst = unwrapped_payload, - .token_src = token_name_index, - .id_cat = .capture, - }; - try case_scope.addDbgVar(.dbg_var_val, ident_name, unwrapped_payload); - break :s &payload_val_scope.base; - } else { - _ = try case_scope.addUnNode( - .ensure_err_union_payload_void, - raw_operand, - catch_or_if_node, - ); - break :s &case_scope.base; - } - }; - const then_result = try expr( - &case_scope, - then_sub_scope, - block_scope.break_result_info, - then_node, - ); - try checkUsed(parent_gz, &case_scope.base, then_sub_scope); - if (!case_scope.endsWithNoReturn()) { - _ = try case_scope.addBreakWithSrcNode( - .@"break", - switch_block, - then_result, - then_node, - ); - } - }, - } - - const case_slice = case_scope.instructionsSlice(); - // Since we use the switch_block_err_union instruction itself to refer - // to the capture, which will not be added to the child block, we need - // to handle ref_table manually. - const refs_len = refs: { - var n: usize = 0; - var check_inst = switch_block; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - n += 1; - check_inst = ref_inst; - } - break :refs n; - }; - const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); - try payloads.ensureUnusedCapacity(gpa, body_len); - const capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = switch (node_ty) { - .@"catch" => .none, - .@"if" => if (if_full.payload_token == null) - .none - else if (payload_is_ref) - .by_ref - else - .by_val, - }; - payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ - .body_len = @intCast(body_len), - .capture = capture, - .is_inline = false, - .has_tag_capture = false, - }); - if (astgen.ref_table.fetchRemove(switch_block)) |kv| { - appendPossiblyRefdBodyInst(astgen, payloads, kv.value); - } - appendBodyWithFixupsArrayList(astgen, payloads, case_slice); - } - - const err_name = blk: { - const err_str = tree.tokenSlice(error_payload); - if (mem.eql(u8, err_str, "_")) { - return astgen.failTok(error_payload, "discard of error capture; omit it instead", .{}); - } - const err_name = try astgen.identAsString(error_payload); - try astgen.detectLocalShadowing(scope, err_name, error_payload, err_str, .capture); - - break :blk err_name; - }; - - // allocate a shared dummy instruction for the error capture - const err_inst = err_inst: { - const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - try astgen.instructions.append(astgen.gpa, .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .value_placeholder, - .small = undefined, - .operand = undefined, - } }, - }); - break :err_inst inst; - }; - - // In this pass we generate all the item and prong expressions for error cases. - var multi_case_index: u32 = 0; - var scalar_case_index: u32 = 0; - var any_uses_err_capture = false; - for (case_nodes) |case_node| { - const case = tree.fullSwitchCase(case_node).?; - - const is_multi_case = case.ast.values.len > 1 or - (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); - - var dbg_var_name: Zir.NullTerminatedString = .empty; - var dbg_var_inst: Zir.Inst.Ref = undefined; - var err_scope: Scope.LocalVal = undefined; - var capture_scope: Scope.LocalVal = undefined; - - const sub_scope = blk: { - err_scope = .{ - .parent = &case_scope.base, - .gen_zir = &case_scope, - .name = err_name, - .inst = err_inst.toRef(), - .token_src = error_payload, - .id_cat = .capture, - }; - - const capture_token = case.payload_token orelse break :blk &err_scope.base; - if (token_tags[capture_token] != .identifier) { - return astgen.failTok(capture_token + 1, "error set cannot be captured by reference", .{}); - } - - const capture_slice = tree.tokenSlice(capture_token); - if (mem.eql(u8, capture_slice, "_")) { - return astgen.failTok(capture_token, "discard of error capture; omit it instead", .{}); - } - const tag_name = try astgen.identAsString(capture_token); - try astgen.detectLocalShadowing(&case_scope.base, tag_name, capture_token, capture_slice, .capture); - - capture_scope = .{ - .parent = &case_scope.base, - .gen_zir = &case_scope, - .name = tag_name, - .inst = switch_block.toRef(), - .token_src = capture_token, - .id_cat = .capture, - }; - dbg_var_name = tag_name; - dbg_var_inst = switch_block.toRef(); - - err_scope.parent = &capture_scope.base; - - break :blk &err_scope.base; - }; - - const header_index: u32 = @intCast(payloads.items.len); - const body_len_index = if (is_multi_case) blk: { - payloads.items[multi_case_table + multi_case_index] = header_index; - multi_case_index += 1; - try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len - - // items - var items_len: u32 = 0; - for (case.ast.values) |item_node| { - if (node_tags[item_node] == .switch_range) continue; - items_len += 1; - - const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); - try payloads.append(gpa, @intFromEnum(item_inst)); - } - - // ranges - var ranges_len: u32 = 0; - for (case.ast.values) |range| { - if (node_tags[range] != .switch_range) continue; - ranges_len += 1; - - const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); - const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); - try payloads.appendSlice(gpa, &[_]u32{ - @intFromEnum(first), @intFromEnum(last), - }); - } - - payloads.items[header_index] = items_len; - payloads.items[header_index + 1] = ranges_len; - break :blk header_index + 2; - } else if (case_node == else_node) blk: { - payloads.items[case_table_start + 1] = header_index; - try payloads.resize(gpa, header_index + 1); // body_len - break :blk header_index; - } else blk: { - payloads.items[scalar_case_table + scalar_case_index] = header_index; - scalar_case_index += 1; - try payloads.resize(gpa, header_index + 2); // item, body_len - const item_node = case.ast.values[0]; - const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); - payloads.items[header_index] = @intFromEnum(item_inst); - break :blk header_index + 1; - }; - - { - // temporarily stack case_scope on parent_gz - case_scope.instructions_top = parent_gz.instructions.items.len; - defer case_scope.unstack(); - - if (do_err_trace and nodeMayAppendToErrorTrace(tree, operand_node)) - _ = try case_scope.addSaveErrRetIndex(.always); - - if (dbg_var_name != .empty) { - try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); - } - - const target_expr_node = case.ast.target_expr; - const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node); - // check capture_scope, not err_scope to avoid false positive unused error capture - try checkUsed(parent_gz, &case_scope.base, err_scope.parent); - const uses_err = err_scope.used != 0 or err_scope.discarded != 0; - if (uses_err) { - try case_scope.addDbgVar(.dbg_var_val, err_name, err_inst.toRef()); - any_uses_err_capture = true; - } - - if (!parent_gz.refIsNoReturn(case_result)) { - if (do_err_trace) - try restoreErrRetIndex( - &case_scope, - .{ .block = switch_block }, - block_scope.break_result_info, - target_expr_node, - case_result, - ); - - _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); - } - - const case_slice = case_scope.instructionsSlice(); - // Since we use the switch_block_err_union instruction itself to refer - // to the capture, which will not be added to the child block, we need - // to handle ref_table manually. - const refs_len = refs: { - var n: usize = 0; - var check_inst = switch_block; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - n += 1; - check_inst = ref_inst; - } - if (uses_err) { - check_inst = err_inst; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - n += 1; - check_inst = ref_inst; - } - } - break :refs n; - }; - const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); - try payloads.ensureUnusedCapacity(gpa, body_len); - payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ - .body_len = @intCast(body_len), - .capture = if (case.payload_token != null) .by_val else .none, - .is_inline = case.inline_token != null, - .has_tag_capture = false, - }); - if (astgen.ref_table.fetchRemove(switch_block)) |kv| { - appendPossiblyRefdBodyInst(astgen, payloads, kv.value); - } - if (uses_err) { - if (astgen.ref_table.fetchRemove(err_inst)) |kv| { - appendPossiblyRefdBodyInst(astgen, payloads, kv.value); - } - } - appendBodyWithFixupsArrayList(astgen, payloads, case_slice); - } - } - // Now that the item expressions are generated we can add this. - try parent_gz.instructions.append(gpa, switch_block); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlockErrUnion).Struct.fields.len + - @intFromBool(multi_cases_len != 0) + - payloads.items.len - case_table_end + - (case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).Struct.fields.len); - - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlockErrUnion{ - .operand = raw_operand, - .bits = Zir.Inst.SwitchBlockErrUnion.Bits{ - .has_multi_cases = multi_cases_len != 0, - .has_else = has_else, - .scalar_cases_len = @intCast(scalar_cases_len), - .any_uses_err_capture = any_uses_err_capture, - .payload_is_ref = payload_is_ref, - }, - .main_src_node_offset = parent_gz.nodeIndexToRelative(catch_or_if_node), - }); - - if (multi_cases_len != 0) { - astgen.extra.appendAssumeCapacity(multi_cases_len); - } - - if (any_uses_err_capture) { - astgen.extra.appendAssumeCapacity(@intFromEnum(err_inst)); - } - - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index; - - for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { - var body_len_index = start_index; - var end_index = start_index; - const table_index = case_table_start + i; - if (table_index < scalar_case_table) { - end_index += 1; - } else if (table_index < multi_case_table) { - body_len_index += 1; - end_index += 2; - } else { - body_len_index += 2; - const items_len = payloads.items[start_index]; - const ranges_len = payloads.items[start_index + 1]; - end_index += 3 + items_len + 2 * ranges_len; - } - const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); - end_index += prong_info.body_len; - astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); - } - - if (need_result_rvalue) { - return rvalue(parent_gz, ri, switch_block.toRef(), switch_node); - } else { - return switch_block.toRef(); - } -} - -fn switchExpr( - parent_gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - switch_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = parent_gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - const operand_node = node_datas[switch_node].lhs; - const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); - const case_nodes = tree.extra_data[extra.start..extra.end]; - - const need_rl = astgen.nodes_need_rl.contains(switch_node); - const block_ri: ResultInfo = if (need_rl) ri else .{ - .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, switch_node)).? }, - .inferred_ptr => .none, - else => ri.rl, - }, - .ctx = ri.ctx, - }; - // We need to call `rvalue` to write through to the pointer only if we had a - // result pointer and aren't forwarding it. - const LocTag = @typeInfo(ResultInfo.Loc).Union.tag_type.?; - const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); - - // We perform two passes over the AST. This first pass is to collect information - // for the following variables, make note of the special prong AST node index, - // and bail out with a compile error if there are multiple special prongs present. - var any_payload_is_ref = false; - var any_has_tag_capture = false; - var scalar_cases_len: u32 = 0; - var multi_cases_len: u32 = 0; - var inline_cases_len: u32 = 0; - var special_prong: Zir.SpecialProng = .none; - var special_node: Ast.Node.Index = 0; - var else_src: ?Ast.TokenIndex = null; - var underscore_src: ?Ast.TokenIndex = null; - for (case_nodes) |case_node| { - const case = tree.fullSwitchCase(case_node).?; - if (case.payload_token) |payload_token| { - const ident = if (token_tags[payload_token] == .asterisk) blk: { - any_payload_is_ref = true; - break :blk payload_token + 1; - } else payload_token; - if (token_tags[ident + 1] == .comma) { - any_has_tag_capture = true; - } - } - // Check for else/`_` prong. - if (case.ast.values.len == 0) { - const case_src = case.ast.arrow_token - 1; - if (else_src) |src| { - return astgen.failTokNotes( - case_src, - "multiple else prongs in switch expression", - .{}, - &[_]u32{ - try astgen.errNoteTok( - src, - "previous else prong here", - .{}, - ), - }, - ); - } else if (underscore_src) |some_underscore| { - return astgen.failNodeNotes( - switch_node, - "else and '_' prong in switch expression", - .{}, - &[_]u32{ - try astgen.errNoteTok( - case_src, - "else prong here", - .{}, - ), - try astgen.errNoteTok( - some_underscore, - "'_' prong here", - .{}, - ), - }, - ); - } - special_node = case_node; - special_prong = .@"else"; - else_src = case_src; - continue; - } else if (case.ast.values.len == 1 and - node_tags[case.ast.values[0]] == .identifier and - mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) - { - const case_src = case.ast.arrow_token - 1; - if (underscore_src) |src| { - return astgen.failTokNotes( - case_src, - "multiple '_' prongs in switch expression", - .{}, - &[_]u32{ - try astgen.errNoteTok( - src, - "previous '_' prong here", - .{}, - ), - }, - ); - } else if (else_src) |some_else| { - return astgen.failNodeNotes( - switch_node, - "else and '_' prong in switch expression", - .{}, - &[_]u32{ - try astgen.errNoteTok( - some_else, - "else prong here", - .{}, - ), - try astgen.errNoteTok( - case_src, - "'_' prong here", - .{}, - ), - }, - ); - } - if (case.inline_token != null) { - return astgen.failTok(case_src, "cannot inline '_' prong", .{}); - } - special_node = case_node; - special_prong = .under; - underscore_src = case_src; - continue; - } - - for (case.ast.values) |val| { - if (node_tags[val] == .string_literal) - return astgen.failNode(val, "cannot switch on strings", .{}); - } - - if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { - scalar_cases_len += 1; - } else { - multi_cases_len += 1; - } - if (case.inline_token != null) { - inline_cases_len += 1; - } - } - - const operand_ri: ResultInfo = .{ .rl = if (any_payload_is_ref) .ref else .none }; - - astgen.advanceSourceCursorToNode(operand_node); - const operand_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column }; - - const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); - const item_ri: ResultInfo = .{ .rl = .none }; - - // This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti, - // except the first cases_nodes.len slots are a table that indexes payloads later in the array, with - // the special case index coming first, then scalar_case_len indexes, then multi_cases_len indexes - const payloads = &astgen.scratch; - const scratch_top = astgen.scratch.items.len; - const case_table_start = scratch_top; - const scalar_case_table = case_table_start + @intFromBool(special_prong != .none); - const multi_case_table = scalar_case_table + scalar_cases_len; - const case_table_end = multi_case_table + multi_cases_len; - try astgen.scratch.resize(gpa, case_table_end); - defer astgen.scratch.items.len = scratch_top; - - var block_scope = parent_gz.makeSubBlock(scope); - // block_scope not used for collecting instructions - block_scope.instructions_top = GenZir.unstacked_top; - block_scope.setBreakResultInfo(block_ri); - - // Sema expects a dbg_stmt immediately before switch_block(_ref) - try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); - // This gets added to the parent block later, after the item expressions. - const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block; - const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node); - - // We re-use this same scope for all cases, including the special prong, if any. - var case_scope = parent_gz.makeSubBlock(&block_scope.base); - case_scope.instructions_top = GenZir.unstacked_top; - - // If any prong has an inline tag capture, allocate a shared dummy instruction for it - const tag_inst = if (any_has_tag_capture) tag_inst: { - const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - try astgen.instructions.append(astgen.gpa, .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .value_placeholder, - .small = undefined, - .operand = undefined, - } }, - }); - break :tag_inst inst; - } else undefined; - - // In this pass we generate all the item and prong expressions. - var multi_case_index: u32 = 0; - var scalar_case_index: u32 = 0; - for (case_nodes) |case_node| { - const case = tree.fullSwitchCase(case_node).?; - - const is_multi_case = case.ast.values.len > 1 or - (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); - - var dbg_var_name: Zir.NullTerminatedString = .empty; - var dbg_var_inst: Zir.Inst.Ref = undefined; - var dbg_var_tag_name: Zir.NullTerminatedString = .empty; - var dbg_var_tag_inst: Zir.Inst.Ref = undefined; - var has_tag_capture = false; - var capture_val_scope: Scope.LocalVal = undefined; - var tag_scope: Scope.LocalVal = undefined; - - var capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = .none; - - const sub_scope = blk: { - const payload_token = case.payload_token orelse break :blk &case_scope.base; - const ident = if (token_tags[payload_token] == .asterisk) - payload_token + 1 - else - payload_token; - - const is_ptr = ident != payload_token; - capture = if (is_ptr) .by_ref else .by_val; - - const ident_slice = tree.tokenSlice(ident); - var payload_sub_scope: *Scope = undefined; - if (mem.eql(u8, ident_slice, "_")) { - if (is_ptr) { - return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); - } - payload_sub_scope = &case_scope.base; - } else { - const capture_name = try astgen.identAsString(ident); - try astgen.detectLocalShadowing(&case_scope.base, capture_name, ident, ident_slice, .capture); - capture_val_scope = .{ - .parent = &case_scope.base, - .gen_zir = &case_scope, - .name = capture_name, - .inst = switch_block.toRef(), - .token_src = ident, - .id_cat = .capture, - }; - dbg_var_name = capture_name; - dbg_var_inst = switch_block.toRef(); - payload_sub_scope = &capture_val_scope.base; - } - - const tag_token = if (token_tags[ident + 1] == .comma) - ident + 2 - else - break :blk payload_sub_scope; - const tag_slice = tree.tokenSlice(tag_token); - if (mem.eql(u8, tag_slice, "_")) { - return astgen.failTok(tag_token, "discard of tag capture; omit it instead", .{}); - } else if (case.inline_token == null) { - return astgen.failTok(tag_token, "tag capture on non-inline prong", .{}); - } - const tag_name = try astgen.identAsString(tag_token); - try astgen.detectLocalShadowing(payload_sub_scope, tag_name, tag_token, tag_slice, .@"switch tag capture"); - - assert(any_has_tag_capture); - has_tag_capture = true; - - tag_scope = .{ - .parent = payload_sub_scope, - .gen_zir = &case_scope, - .name = tag_name, - .inst = tag_inst.toRef(), - .token_src = tag_token, - .id_cat = .@"switch tag capture", - }; - dbg_var_tag_name = tag_name; - dbg_var_tag_inst = tag_inst.toRef(); - break :blk &tag_scope.base; - }; - - const header_index: u32 = @intCast(payloads.items.len); - const body_len_index = if (is_multi_case) blk: { - payloads.items[multi_case_table + multi_case_index] = header_index; - multi_case_index += 1; - try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len - - // items - var items_len: u32 = 0; - for (case.ast.values) |item_node| { - if (node_tags[item_node] == .switch_range) continue; - items_len += 1; - - const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); - try payloads.append(gpa, @intFromEnum(item_inst)); - } - - // ranges - var ranges_len: u32 = 0; - for (case.ast.values) |range| { - if (node_tags[range] != .switch_range) continue; - ranges_len += 1; - - const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); - const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); - try payloads.appendSlice(gpa, &[_]u32{ - @intFromEnum(first), @intFromEnum(last), - }); - } - - payloads.items[header_index] = items_len; - payloads.items[header_index + 1] = ranges_len; - break :blk header_index + 2; - } else if (case_node == special_node) blk: { - payloads.items[case_table_start] = header_index; - try payloads.resize(gpa, header_index + 1); // body_len - break :blk header_index; - } else blk: { - payloads.items[scalar_case_table + scalar_case_index] = header_index; - scalar_case_index += 1; - try payloads.resize(gpa, header_index + 2); // item, body_len - const item_node = case.ast.values[0]; - const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); - payloads.items[header_index] = @intFromEnum(item_inst); - break :blk header_index + 1; - }; - - { - // temporarily stack case_scope on parent_gz - case_scope.instructions_top = parent_gz.instructions.items.len; - defer case_scope.unstack(); - - if (dbg_var_name != .empty) { - try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst); - } - if (dbg_var_tag_name != .empty) { - try case_scope.addDbgVar(.dbg_var_val, dbg_var_tag_name, dbg_var_tag_inst); - } - const target_expr_node = case.ast.target_expr; - const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node); - try checkUsed(parent_gz, &case_scope.base, sub_scope); - if (!parent_gz.refIsNoReturn(case_result)) { - _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); - } - - const case_slice = case_scope.instructionsSlice(); - // Since we use the switch_block instruction itself to refer to the - // capture, which will not be added to the child block, we need to - // handle ref_table manually, and the same for the inline tag - // capture instruction. - const refs_len = refs: { - var n: usize = 0; - var check_inst = switch_block; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - n += 1; - check_inst = ref_inst; - } - if (has_tag_capture) { - check_inst = tag_inst; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - n += 1; - check_inst = ref_inst; - } - } - break :refs n; - }; - const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice); - try payloads.ensureUnusedCapacity(gpa, body_len); - payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{ - .body_len = @intCast(body_len), - .capture = capture, - .is_inline = case.inline_token != null, - .has_tag_capture = has_tag_capture, - }); - if (astgen.ref_table.fetchRemove(switch_block)) |kv| { - appendPossiblyRefdBodyInst(astgen, payloads, kv.value); - } - if (has_tag_capture) { - if (astgen.ref_table.fetchRemove(tag_inst)) |kv| { - appendPossiblyRefdBodyInst(astgen, payloads, kv.value); - } - } - appendBodyWithFixupsArrayList(astgen, payloads, case_slice); - } - } - // Now that the item expressions are generated we can add this. - try parent_gz.instructions.append(gpa, switch_block); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len + - @intFromBool(multi_cases_len != 0) + - @intFromBool(any_has_tag_capture) + - payloads.items.len - case_table_end + - (case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).Struct.fields.len); - - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{ - .operand = raw_operand, - .bits = Zir.Inst.SwitchBlock.Bits{ - .has_multi_cases = multi_cases_len != 0, - .has_else = special_prong == .@"else", - .has_under = special_prong == .under, - .any_has_tag_capture = any_has_tag_capture, - .scalar_cases_len = @intCast(scalar_cases_len), - }, - }); - - if (multi_cases_len != 0) { - astgen.extra.appendAssumeCapacity(multi_cases_len); - } - - if (any_has_tag_capture) { - astgen.extra.appendAssumeCapacity(@intFromEnum(tag_inst)); - } - - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index; - - for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { - var body_len_index = start_index; - var end_index = start_index; - const table_index = case_table_start + i; - if (table_index < scalar_case_table) { - end_index += 1; - } else if (table_index < multi_case_table) { - body_len_index += 1; - end_index += 2; - } else { - body_len_index += 2; - const items_len = payloads.items[start_index]; - const ranges_len = payloads.items[start_index + 1]; - end_index += 3 + items_len + 2 * ranges_len; - } - const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]); - end_index += prong_info.body_len; - astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); - } - - if (need_result_rvalue) { - return rvalue(parent_gz, ri, switch_block.toRef(), switch_node); - } else { - return switch_block.toRef(); - } -} - -fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - - if (astgen.fn_block == null) { - return astgen.failNode(node, "'return' outside function scope", .{}); - } - - if (gz.any_defer_node != 0) { - return astgen.failNodeNotes(node, "cannot return from defer expression", .{}, &.{ - try astgen.errNoteNode( - gz.any_defer_node, - "defer expression here", - .{}, - ), - }); - } - - // Ensure debug line/column information is emitted for this return expression. - // Then we will save the line/column so that we can emit another one that goes - // "backwards" because we want to evaluate the operand, but then put the debug - // info back at the return keyword for error return tracing. - if (!gz.is_comptime) { - try emitDbgNode(gz, node); - } - const ret_lc = LineColumn{ astgen.source_line - gz.decl_line, astgen.source_column }; - - const defer_outer = &astgen.fn_block.?.base; - - const operand_node = node_datas[node].lhs; - if (operand_node == 0) { - // Returning a void value; skip error defers. - try genDefers(gz, defer_outer, scope, .normal_only); - - // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always, node); - - _ = try gz.addUnNode(.ret_node, .void_value, node); - return Zir.Inst.Ref.unreachable_value; - } - - if (node_tags[operand_node] == .error_value) { - // Hot path for `return error.Foo`. This bypasses result location logic as well as logic - // for detecting whether to add something to the function's inferred error set. - const ident_token = node_datas[operand_node].rhs; - const err_name_str_index = try astgen.identAsString(ident_token); - const defer_counts = countDefers(defer_outer, scope); - if (!defer_counts.need_err_code) { - try genDefers(gz, defer_outer, scope, .both_sans_err); - try emitDbgStmt(gz, ret_lc); - _ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token); - return Zir.Inst.Ref.unreachable_value; - } - const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token); - try genDefers(gz, defer_outer, scope, .{ .both = err_code }); - try emitDbgStmt(gz, ret_lc); - _ = try gz.addUnNode(.ret_node, err_code, node); - return Zir.Inst.Ref.unreachable_value; - } - - const ri: ResultInfo = if (astgen.nodes_need_rl.contains(node)) .{ - .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, - .ctx = .@"return", - } else .{ - .rl = .{ .coerced_ty = astgen.fn_ret_ty }, - .ctx = .@"return", - }; - const prev_anon_name_strategy = gz.anon_name_strategy; - gz.anon_name_strategy = .func; - const operand = try reachableExpr(gz, scope, ri, operand_node, node); - gz.anon_name_strategy = prev_anon_name_strategy; - - switch (nodeMayEvalToError(tree, operand_node)) { - .never => { - // Returning a value that cannot be an error; skip error defers. - try genDefers(gz, defer_outer, scope, .normal_only); - - // As our last action before the return, "pop" the error trace if needed - _ = try gz.addRestoreErrRetIndex(.ret, .always, node); - - try emitDbgStmt(gz, ret_lc); - try gz.addRet(ri, operand, node); - return Zir.Inst.Ref.unreachable_value; - }, - .always => { - // Value is always an error. Emit both error defers and regular defers. - const err_code = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; - try genDefers(gz, defer_outer, scope, .{ .both = err_code }); - try emitDbgStmt(gz, ret_lc); - try gz.addRet(ri, operand, node); - return Zir.Inst.Ref.unreachable_value; - }, - .maybe => { - const defer_counts = countDefers(defer_outer, scope); - if (!defer_counts.have_err) { - // Only regular defers; no branch needed. - try genDefers(gz, defer_outer, scope, .normal_only); - try emitDbgStmt(gz, ret_lc); - - // As our last action before the return, "pop" the error trace if needed - const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; - _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }, node); - - try gz.addRet(ri, operand, node); - return Zir.Inst.Ref.unreachable_value; - } - - // Emit conditional branch for generating errdefers. - const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; - const is_non_err = try gz.addUnNode(.ret_is_non_err, result, node); - const condbr = try gz.addCondBr(.condbr, node); - - var then_scope = gz.makeSubBlock(scope); - defer then_scope.unstack(); - - try genDefers(&then_scope, defer_outer, scope, .normal_only); - - // As our last action before the return, "pop" the error trace if needed - _ = try then_scope.addRestoreErrRetIndex(.ret, .always, node); - - try emitDbgStmt(&then_scope, ret_lc); - try then_scope.addRet(ri, operand, node); - - var else_scope = gz.makeSubBlock(scope); - defer else_scope.unstack(); - - const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ - .both = try else_scope.addUnNode(.err_union_code, result, node), - }; - try genDefers(&else_scope, defer_outer, scope, which_ones); - try emitDbgStmt(&else_scope, ret_lc); - try else_scope.addRet(ri, operand, node); - - try setCondBrPayload(condbr, is_non_err, &then_scope, &else_scope); - - return Zir.Inst.Ref.unreachable_value; - }, - } -} - -/// Parses the string `buf` as a base 10 integer of type `u16`. -/// -/// Unlike std.fmt.parseInt, does not allow the '_' character in `buf`. -fn parseBitCount(buf: []const u8) std.fmt.ParseIntError!u16 { - if (buf.len == 0) return error.InvalidCharacter; - - var x: u16 = 0; - - for (buf) |c| { - const digit = switch (c) { - '0'...'9' => c - '0', - else => return error.InvalidCharacter, - }; - - if (x != 0) x = try std.math.mul(u16, x, 10); - x = try std.math.add(u16, x, digit); - } - - return x; -} - -fn identifier( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - ident: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - - const ident_token = main_tokens[ident]; - const ident_name_raw = tree.tokenSlice(ident_token); - if (mem.eql(u8, ident_name_raw, "_")) { - return astgen.failNode(ident, "'_' used as an identifier without @\"_\" syntax", .{}); - } - - // if not @"" syntax, just use raw token slice - if (ident_name_raw[0] != '@') { - if (primitive_instrs.get(ident_name_raw)) |zir_const_ref| { - return rvalue(gz, ri, zir_const_ref, ident); - } - - if (ident_name_raw.len >= 2) integer: { - const first_c = ident_name_raw[0]; - if (first_c == 'i' or first_c == 'u') { - const signedness: std.builtin.Signedness = switch (first_c == 'i') { - true => .signed, - false => .unsigned, - }; - if (ident_name_raw.len >= 3 and ident_name_raw[1] == '0') { - return astgen.failNode( - ident, - "primitive integer type '{s}' has leading zero", - .{ident_name_raw}, - ); - } - const bit_count = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) { - error.Overflow => return astgen.failNode( - ident, - "primitive integer type '{s}' exceeds maximum bit width of 65535", - .{ident_name_raw}, - ), - error.InvalidCharacter => break :integer, - }; - const result = try gz.add(.{ - .tag = .int_type, - .data = .{ .int_type = .{ - .src_node = gz.nodeIndexToRelative(ident), - .signedness = signedness, - .bit_count = bit_count, - } }, - }); - return rvalue(gz, ri, result, ident); - } - } - } - - // Local variables, including function parameters. - return localVarRef(gz, scope, ri, ident, ident_token); -} - -fn localVarRef( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - ident: Ast.Node.Index, - ident_token: Ast.TokenIndex, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const name_str_index = try astgen.identAsString(ident_token); - var s = scope; - var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already - var num_namespaces_out: u32 = 0; - var capturing_namespace: ?*Scope.Namespace = null; - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - - if (local_val.name == name_str_index) { - // Locals cannot shadow anything, so we do not need to look for ambiguous - // references in this case. - if (ri.rl == .discard and ri.ctx == .assignment) { - local_val.discarded = ident_token; - } else { - local_val.used = ident_token; - } - - const value_inst = try tunnelThroughClosure( - gz, - ident, - num_namespaces_out, - capturing_namespace, - local_val.inst, - local_val.token_src, - gpa, - ); - - return rvalueNoCoercePreRef(gz, ri, value_inst, ident); - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == name_str_index) { - if (ri.rl == .discard and ri.ctx == .assignment) { - local_ptr.discarded = ident_token; - } else { - local_ptr.used = ident_token; - } - - // Can't close over a runtime variable - if (num_namespaces_out != 0 and !local_ptr.maybe_comptime and !gz.is_typeof) { - const ident_name = try astgen.identifierTokenString(ident_token); - return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{ - try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}), - try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}), - }); - } - - const ptr_inst = try tunnelThroughClosure( - gz, - ident, - num_namespaces_out, - capturing_namespace, - local_ptr.ptr, - local_ptr.token_src, - gpa, - ); - - switch (ri.rl) { - .ref, .ref_coerced_ty => { - local_ptr.used_as_lvalue = true; - return ptr_inst; - }, - else => { - const loaded = try gz.addUnNode(.load, ptr_inst, ident); - return rvalueNoCoercePreRef(gz, ri, loaded, ident); - }, - } - } - s = local_ptr.parent; - }, - .gen_zir => s = s.cast(GenZir).?.parent, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => { - const ns = s.cast(Scope.Namespace).?; - if (ns.decls.get(name_str_index)) |i| { - if (found_already) |f| { - return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{ - try astgen.errNoteNode(f, "declared here", .{}), - try astgen.errNoteNode(i, "also declared here", .{}), - }); - } - // We found a match but must continue looking for ambiguous references to decls. - found_already = i; - } - if (s.tag == .namespace) num_namespaces_out += 1; - capturing_namespace = ns; - s = ns.parent; - }, - .top => break, - }; - if (found_already == null) { - const ident_name = try astgen.identifierTokenString(ident_token); - return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name}); - } - - // Decl references happen by name rather than ZIR index so that when unrelated - // decls are modified, ZIR code containing references to them can be unmodified. - switch (ri.rl) { - .ref, .ref_coerced_ty => return gz.addStrTok(.decl_ref, name_str_index, ident_token), - else => { - const result = try gz.addStrTok(.decl_val, name_str_index, ident_token); - return rvalueNoCoercePreRef(gz, ri, result, ident); - }, - } -} - -/// Adds a capture to a namespace, if needed. -/// Returns the index of the closure_capture instruction. -fn tunnelThroughClosure( - gz: *GenZir, - inner_ref_node: Ast.Node.Index, - num_tunnels: u32, - ns: ?*Scope.Namespace, - value: Zir.Inst.Ref, - token: Ast.TokenIndex, - gpa: Allocator, -) !Zir.Inst.Ref { - // For trivial values, we don't need a tunnel. - // Just return the ref. - if (num_tunnels == 0 or value.toIndex() == null) { - return value; - } - - // Otherwise we need a tunnel. Check if this namespace - // already has one for this value. - const gop = try ns.?.captures.getOrPut(gpa, value.toIndex().?); - if (!gop.found_existing) { - // Make a new capture for this value but don't add it to the declaring_gz yet - try gz.astgen.instructions.append(gz.astgen.gpa, .{ - .tag = .closure_capture, - .data = .{ .un_tok = .{ - .operand = value, - .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token), - } }, - }); - gop.value_ptr.* = @enumFromInt(gz.astgen.instructions.len - 1); - } - - // Add an instruction to get the value from the closure into - // our current context - return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node); -} - -fn stringLiteral( - gz: *GenZir, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const str_lit_token = main_tokens[node]; - const str = try astgen.strLitAsString(str_lit_token); - const result = try gz.add(.{ - .tag = .str, - .data = .{ .str = .{ - .start = str.index, - .len = str.len, - } }, - }); - return rvalue(gz, ri, result, node); -} - -fn multilineStringLiteral( - gz: *GenZir, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const str = try astgen.strLitNodeAsString(node); - const result = try gz.add(.{ - .tag = .str, - .data = .{ .str = .{ - .start = str.index, - .len = str.len, - } }, - }); - return rvalue(gz, ri, result, node); -} - -fn charLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const main_token = main_tokens[node]; - const slice = tree.tokenSlice(main_token); - - switch (std.zig.parseCharLiteral(slice)) { - .success => |codepoint| { - const result = try gz.addInt(codepoint); - return rvalue(gz, ri, result, node); - }, - .failure => |err| return astgen.failWithStrLitError(err, main_token, slice, 0), - } -} - -const Sign = enum { negative, positive }; - -fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: Ast.Node.Index, sign: Sign) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const num_token = main_tokens[node]; - const bytes = tree.tokenSlice(num_token); - - const result: Zir.Inst.Ref = switch (std.zig.parseNumberLiteral(bytes)) { - .int => |num| switch (num) { - 0 => if (sign == .positive) .zero else return astgen.failTokNotes( - num_token, - "integer literal '-0' is ambiguous", - .{}, - &.{ - try astgen.errNoteTok(num_token, "use '0' for an integer zero", .{}), - try astgen.errNoteTok(num_token, "use '-0.0' for a floating-point signed zero", .{}), - }, - ), - 1 => .one, - else => try gz.addInt(num), - }, - .big_int => |base| big: { - const gpa = astgen.gpa; - var big_int = try std.math.big.int.Managed.init(gpa); - defer big_int.deinit(); - const prefix_offset: usize = if (base == .decimal) 0 else 2; - big_int.setString(@intFromEnum(base), bytes[prefix_offset..]) catch |err| switch (err) { - error.InvalidCharacter => unreachable, // caught in `parseNumberLiteral` - error.InvalidBase => unreachable, // we only pass 16, 8, 2, see above - error.OutOfMemory => return error.OutOfMemory, - }; - - const limbs = big_int.limbs[0..big_int.len()]; - assert(big_int.isPositive()); - break :big try gz.addIntBig(limbs); - }, - .float => { - const unsigned_float_number = std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { - error.InvalidCharacter => unreachable, // validated by tokenizer - }; - const float_number = switch (sign) { - .negative => -unsigned_float_number, - .positive => unsigned_float_number, - }; - // If the value fits into a f64 without losing any precision, store it that way. - @setFloatMode(.Strict); - const smaller_float: f64 = @floatCast(float_number); - const bigger_again: f128 = smaller_float; - if (bigger_again == float_number) { - const result = try gz.addFloat(smaller_float); - return rvalue(gz, ri, result, source_node); - } - // We need to use 128 bits. Break the float into 4 u32 values so we can - // put it into the `extra` array. - const int_bits: u128 = @bitCast(float_number); - const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ - .piece0 = @truncate(int_bits), - .piece1 = @truncate(int_bits >> 32), - .piece2 = @truncate(int_bits >> 64), - .piece3 = @truncate(int_bits >> 96), - }); - return rvalue(gz, ri, result, source_node); - }, - .failure => |err| return astgen.failWithNumberError(err, num_token, bytes), - }; - - if (sign == .positive) { - return rvalue(gz, ri, result, source_node); - } else { - const negated = try gz.addUnNode(.negate, result, source_node); - return rvalue(gz, ri, negated, source_node); - } -} - -fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) InnerError { - const is_float = std.mem.indexOfScalar(u8, bytes, '.') != null; - switch (err) { - .leading_zero => if (is_float) { - return astgen.failTok(token, "number '{s}' has leading zero", .{bytes}); - } else { - return astgen.failTokNotes(token, "number '{s}' has leading zero", .{bytes}, &.{ - try astgen.errNoteTok(token, "use '0o' prefix for octal literals", .{}), - }); - }, - .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}), - .upper_case_base => |i| return astgen.failOff(token, @intCast(i), "base prefix must be lowercase", .{}), - .invalid_float_base => |i| return astgen.failOff(token, @intCast(i), "invalid base for float literal", .{}), - .repeated_underscore => |i| return astgen.failOff(token, @intCast(i), "repeated digit separator", .{}), - .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(i), "expected digit before digit separator", .{}), - .invalid_digit => |info| return astgen.failOff(token, @intCast(info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), - .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(i), "invalid digit '{c}' in exponent", .{bytes[i]}), - .duplicate_exponent => |i| return astgen.failOff(token, @intCast(i), "duplicate exponent", .{}), - .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(i), "expected digit before exponent", .{}), - .special_after_underscore => |i| return astgen.failOff(token, @intCast(i), "expected digit before '{c}'", .{bytes[i]}), - .trailing_special => |i| return astgen.failOff(token, @intCast(i), "expected digit after '{c}'", .{bytes[i - 1]}), - .trailing_underscore => |i| return astgen.failOff(token, @intCast(i), "trailing digit separator", .{}), - .duplicate_period => unreachable, // Validated by tokenizer - .invalid_character => unreachable, // Validated by tokenizer - .invalid_exponent_sign => |i| { - assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer - return astgen.failOff(token, @intCast(i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] }); - }, - } -} - -fn asmExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - full: Ast.full.Asm, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - const token_tags = tree.tokens.items(.tag); - - const TagAndTmpl = struct { tag: Zir.Inst.Extended, tmpl: Zir.NullTerminatedString }; - const tag_and_tmpl: TagAndTmpl = switch (node_tags[full.ast.template]) { - .string_literal => .{ - .tag = .@"asm", - .tmpl = (try astgen.strLitAsString(main_tokens[full.ast.template])).index, - }, - .multiline_string_literal => .{ - .tag = .@"asm", - .tmpl = (try astgen.strLitNodeAsString(full.ast.template)).index, - }, - else => .{ - .tag = .asm_expr, - .tmpl = @enumFromInt(@intFromEnum(try comptimeExpr(gz, scope, .{ .rl = .none }, full.ast.template))), - }, - }; - - // See https://github.com/ziglang/zig/issues/215 and related issues discussing - // possible inline assembly improvements. Until then here is status quo AstGen - // for assembly syntax. It's used by std lib crypto aesni.zig. - const is_container_asm = astgen.fn_block == null; - if (is_container_asm) { - if (full.volatile_token) |t| - return astgen.failTok(t, "volatile is meaningless on global assembly", .{}); - if (full.outputs.len != 0 or full.inputs.len != 0 or full.first_clobber != null) - return astgen.failNode(node, "global assembly cannot have inputs, outputs, or clobbers", .{}); - } else { - if (full.outputs.len == 0 and full.volatile_token == null) { - return astgen.failNode(node, "assembly expression with no output must be marked volatile", .{}); - } - } - if (full.outputs.len > 32) { - return astgen.failNode(full.outputs[32], "too many asm outputs", .{}); - } - var outputs_buffer: [32]Zir.Inst.Asm.Output = undefined; - const outputs = outputs_buffer[0..full.outputs.len]; - - var output_type_bits: u32 = 0; - - for (full.outputs, 0..) |output_node, i| { - const symbolic_name = main_tokens[output_node]; - const name = try astgen.identAsString(symbolic_name); - const constraint_token = symbolic_name + 2; - const constraint = (try astgen.strLitAsString(constraint_token)).index; - const has_arrow = token_tags[symbolic_name + 4] == .arrow; - if (has_arrow) { - if (output_type_bits != 0) { - return astgen.failNode(output_node, "inline assembly allows up to one output value", .{}); - } - output_type_bits |= @as(u32, 1) << @intCast(i); - const out_type_node = node_datas[output_node].lhs; - const out_type_inst = try typeExpr(gz, scope, out_type_node); - outputs[i] = .{ - .name = name, - .constraint = constraint, - .operand = out_type_inst, - }; - } else { - const ident_token = symbolic_name + 4; - // TODO have a look at #215 and related issues and decide how to - // handle outputs. Do we want this to be identifiers? - // Or maybe we want to force this to be expressions with a pointer type. - outputs[i] = .{ - .name = name, - .constraint = constraint, - .operand = try localVarRef(gz, scope, .{ .rl = .ref }, node, ident_token), - }; - } - } - - if (full.inputs.len > 32) { - return astgen.failNode(full.inputs[32], "too many asm inputs", .{}); - } - var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined; - const inputs = inputs_buffer[0..full.inputs.len]; - - for (full.inputs, 0..) |input_node, i| { - const symbolic_name = main_tokens[input_node]; - const name = try astgen.identAsString(symbolic_name); - const constraint_token = symbolic_name + 2; - const constraint = (try astgen.strLitAsString(constraint_token)).index; - const operand = try expr(gz, scope, .{ .rl = .none }, node_datas[input_node].lhs); - inputs[i] = .{ - .name = name, - .constraint = constraint, - .operand = operand, - }; - } - - var clobbers_buffer: [32]u32 = undefined; - var clobber_i: usize = 0; - if (full.first_clobber) |first_clobber| clobbers: { - // asm ("foo" ::: "a", "b") - // asm ("foo" ::: "a", "b",) - var tok_i = first_clobber; - while (true) : (tok_i += 1) { - if (clobber_i >= clobbers_buffer.len) { - return astgen.failTok(tok_i, "too many asm clobbers", .{}); - } - clobbers_buffer[clobber_i] = @intFromEnum((try astgen.strLitAsString(tok_i)).index); - clobber_i += 1; - tok_i += 1; - switch (token_tags[tok_i]) { - .r_paren => break :clobbers, - .comma => { - if (token_tags[tok_i + 1] == .r_paren) { - break :clobbers; - } else { - continue; - } - }, - else => unreachable, - } - } - } - - const result = try gz.addAsm(.{ - .tag = tag_and_tmpl.tag, - .node = node, - .asm_source = tag_and_tmpl.tmpl, - .is_volatile = full.volatile_token != null, - .output_type_bits = output_type_bits, - .outputs = outputs, - .inputs = inputs, - .clobbers = clobbers_buffer[0..clobber_i], - }); - return rvalue(gz, ri, result, node); -} - -fn as( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs: Ast.Node.Index, - rhs: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const dest_type = try typeExpr(gz, scope, lhs); - const result = try reachableExpr(gz, scope, .{ .rl = .{ .ty = dest_type } }, rhs, node); - return rvalue(gz, ri, result, node); -} - -fn unionInit( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - params: []const Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); - const field_type = try gz.addPlNode(.field_type_ref, node, Zir.Inst.FieldTypeRef{ - .container_type = union_type, - .field_name = field_name, - }); - const init = try reachableExpr(gz, scope, .{ .rl = .{ .ty = field_type } }, params[2], node); - const result = try gz.addPlNode(.union_init, node, Zir.Inst.UnionInit{ - .union_type = union_type, - .init = init, - .field_name = field_name, - }); - return rvalue(gz, ri, result, node); -} - -fn bitCast( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const dest_type = try ri.rl.resultTypeForCast(gz, node, "@bitCast"); - const operand = try reachableExpr(gz, scope, .{ .rl = .none }, operand_node, node); - const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ - .lhs = dest_type, - .rhs = operand, - }); - return rvalue(gz, ri, result, node); -} - -/// Handle one or more nested pointer cast builtins: -/// * @ptrCast -/// * @alignCast -/// * @addrSpaceCast -/// * @constCast -/// * @volatileCast -/// Any sequence of such builtins is treated as a single operation. This allowed -/// for sequences like `@ptrCast(@alignCast(ptr))` to work correctly despite the -/// intermediate result type being unknown. -fn ptrCast( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - root_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - const node_datas = tree.nodes.items(.data); - const node_tags = tree.nodes.items(.tag); - - var flags: Zir.Inst.FullPtrCastFlags = .{}; - - // Note that all pointer cast builtins have one parameter, so we only need - // to handle `builtin_call_two`. - var node = root_node; - while (true) { - switch (node_tags[node]) { - .builtin_call_two, .builtin_call_two_comma => {}, - .grouped_expression => { - // Handle the chaining even with redundant parentheses - node = node_datas[node].lhs; - continue; - }, - else => break, - } - - if (node_datas[node].lhs == 0) break; // 0 args - if (node_datas[node].rhs != 0) break; // 2 args - - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - const info = BuiltinFn.list.get(builtin_name) orelse break; - if (info.param_count != 1) break; - - switch (info.tag) { - else => break, - inline .ptr_cast, - .align_cast, - .addrspace_cast, - .const_cast, - .volatile_cast, - => |tag| { - if (@field(flags, @tagName(tag))) { - return astgen.failNode(node, "redundant {s}", .{builtin_name}); - } - @field(flags, @tagName(tag)) = true; - }, - } - - node = node_datas[node].lhs; - } - - const flags_i: u5 = @bitCast(flags); - assert(flags_i != 0); - - const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true }; - if (flags_i == @as(u5, @bitCast(ptr_only))) { - // Special case: simpler representation - return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast"); - } - - const no_result_ty_flags: Zir.Inst.FullPtrCastFlags = .{ - .const_cast = true, - .volatile_cast = true, - }; - if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) { - // Result type not needed - const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); - const operand = try expr(gz, scope, .{ .rl = .none }, node); - try emitDbgStmt(gz, cursor); - const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_i, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(root_node), - .operand = operand, - }); - return rvalue(gz, ri, result, root_node); - } - - // Full cast including result type - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node); - const result_type = try ri.rl.resultTypeForCast(gz, root_node, flags.needResultTypeBuiltinName()); - const operand = try expr(gz, scope, .{ .rl = .none }, node); - try emitDbgStmt(gz, cursor); - const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_i, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(root_node), - .lhs = result_type, - .rhs = operand, - }); - return rvalue(gz, ri, result, root_node); -} - -fn typeOf( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - args: []const Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - if (args.len < 1) { - return astgen.failNode(node, "expected at least 1 argument, found 0", .{}); - } - const gpa = astgen.gpa; - if (args.len == 1) { - const typeof_inst = try gz.makeBlockInst(.typeof_builtin, node); - - var typeof_scope = gz.makeSubBlock(scope); - typeof_scope.is_comptime = false; - typeof_scope.is_typeof = true; - typeof_scope.c_import = false; - defer typeof_scope.unstack(); - - const ty_expr = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, args[0], node); - if (!gz.refIsNoReturn(ty_expr)) { - _ = try typeof_scope.addBreak(.break_inline, typeof_inst, ty_expr); - } - try typeof_scope.setBlockBody(typeof_inst); - - // typeof_scope unstacked now, can add new instructions to gz - try gz.instructions.append(gpa, typeof_inst); - return rvalue(gz, ri, typeof_inst.toRef(), node); - } - const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; - const payload_index = try reserveExtra(astgen, payload_size + args.len); - const args_index = payload_index + payload_size; - - const typeof_inst = try gz.addExtendedMultiOpPayloadIndex(.typeof_peer, payload_index, args.len); - - var typeof_scope = gz.makeSubBlock(scope); - typeof_scope.is_comptime = false; - - for (args, 0..) |arg, i| { - const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node); - astgen.extra.items[args_index + i] = @intFromEnum(param_ref); - } - _ = try typeof_scope.addBreak(.break_inline, typeof_inst.toIndex().?, .void_value); - - const body = typeof_scope.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ - .body_len = @intCast(body_len), - .body_index = @intCast(astgen.extra.items.len), - .src_node = gz.nodeIndexToRelative(node), - }); - try astgen.extra.ensureUnusedCapacity(gpa, body_len); - astgen.appendBodyWithFixups(body); - typeof_scope.unstack(); - - return rvalue(gz, ri, typeof_inst, node); -} - -fn minMax( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - args: []const Ast.Node.Index, - comptime op: enum { min, max }, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - if (args.len < 2) { - return astgen.failNode(node, "expected at least 2 arguments, found 0", .{}); - } - if (args.len == 2) { - const tag: Zir.Inst.Tag = switch (op) { - .min => .min, - .max => .max, - }; - const a = try expr(gz, scope, .{ .rl = .none }, args[0]); - const b = try expr(gz, scope, .{ .rl = .none }, args[1]); - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = a, - .rhs = b, - }); - return rvalue(gz, ri, result, node); - } - const payload_index = try addExtra(astgen, Zir.Inst.NodeMultiOp{ - .src_node = gz.nodeIndexToRelative(node), - }); - var extra_index = try reserveExtra(gz.astgen, args.len); - for (args) |arg| { - const arg_ref = try expr(gz, scope, .{ .rl = .none }, arg); - astgen.extra.items[extra_index] = @intFromEnum(arg_ref); - extra_index += 1; - } - const tag: Zir.Inst.Extended = switch (op) { - .min => .min_multi, - .max => .max_multi, - }; - const result = try gz.addExtendedMultiOpPayloadIndex(tag, payload_index, args.len); - return rvalue(gz, ri, result, node); -} - -fn builtinCall( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - params: []const Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const main_tokens = tree.nodes.items(.main_token); - - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - - // We handle the different builtins manually because they have different semantics depending - // on the function. For example, `@as` and others participate in result location semantics, - // and `@cImport` creates a special scope that collects a .c source code text buffer. - // Also, some builtins have a variable number of parameters. - - const info = BuiltinFn.list.get(builtin_name) orelse { - return astgen.failNode(node, "invalid builtin function: '{s}'", .{ - builtin_name, - }); - }; - if (info.param_count) |expected| { - if (expected != params.len) { - const s = if (expected == 1) "" else "s"; - return astgen.failNode(node, "expected {d} argument{s}, found {d}", .{ - expected, s, params.len, - }); - } - } - - // Check function scope-only builtins - - if (astgen.fn_block == null and info.illegal_outside_function) - return astgen.failNode(node, "'{s}' outside function scope", .{builtin_name}); - - switch (info.tag) { - .import => { - const node_tags = tree.nodes.items(.tag); - const operand_node = params[0]; - - if (node_tags[operand_node] != .string_literal) { - // Spec reference: https://github.com/ziglang/zig/issues/2206 - return astgen.failNode(operand_node, "@import operand must be a string literal", .{}); - } - const str_lit_token = main_tokens[operand_node]; - const str = try astgen.strLitAsString(str_lit_token); - const str_slice = astgen.string_bytes.items[@intFromEnum(str.index)..][0..str.len]; - if (mem.indexOfScalar(u8, str_slice, 0) != null) { - return astgen.failTok(str_lit_token, "import path cannot contain null bytes", .{}); - } else if (str.len == 0) { - return astgen.failTok(str_lit_token, "import path cannot be empty", .{}); - } - const result = try gz.addStrTok(.import, str.index, str_lit_token); - const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); - if (!gop.found_existing) { - gop.value_ptr.* = str_lit_token; - } - return rvalue(gz, ri, result, node); - }, - .compile_log => { - const payload_index = try addExtra(gz.astgen, Zir.Inst.NodeMultiOp{ - .src_node = gz.nodeIndexToRelative(node), - }); - var extra_index = try reserveExtra(gz.astgen, params.len); - for (params) |param| { - const param_ref = try expr(gz, scope, .{ .rl = .none }, param); - astgen.extra.items[extra_index] = @intFromEnum(param_ref); - extra_index += 1; - } - const result = try gz.addExtendedMultiOpPayloadIndex(.compile_log, payload_index, params.len); - return rvalue(gz, ri, result, node); - }, - .field => { - if (ri.rl == .ref or ri.rl == .ref_coerced_ty) { - return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ - .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), - }); - } - const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ - .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]), - }); - return rvalue(gz, ri, result, node); - }, - - // zig fmt: off - .as => return as( gz, scope, ri, node, params[0], params[1]), - .bit_cast => return bitCast( gz, scope, ri, node, params[0]), - .TypeOf => return typeOf( gz, scope, ri, node, params), - .union_init => return unionInit(gz, scope, ri, node, params), - .c_import => return cImport( gz, scope, node, params[0]), - .min => return minMax( gz, scope, ri, node, params, .min), - .max => return minMax( gz, scope, ri, node, params, .max), - // zig fmt: on - - .@"export" => { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - // This function causes a Decl to be exported. The first parameter is not an expression, - // but an identifier of the Decl to be exported. - var namespace: Zir.Inst.Ref = .none; - var decl_name: Zir.NullTerminatedString = .empty; - switch (node_tags[params[0]]) { - .identifier => { - const ident_token = main_tokens[params[0]]; - if (isPrimitive(tree.tokenSlice(ident_token))) { - return astgen.failTok(ident_token, "unable to export primitive value", .{}); - } - decl_name = try astgen.identAsString(ident_token); - - var s = scope; - var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - if (local_val.name == decl_name) { - local_val.used = ident_token; - _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ - .operand = local_val.inst, - .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), - }); - return rvalue(gz, ri, .void_value, node); - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == decl_name) { - if (!local_ptr.maybe_comptime) - return astgen.failNode(params[0], "unable to export runtime-known value", .{}); - local_ptr.used = ident_token; - const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); - _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ - .operand = loaded, - .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), - }); - return rvalue(gz, ri, .void_value, node); - } - s = local_ptr.parent; - }, - .gen_zir => s = s.cast(GenZir).?.parent, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .namespace, .enum_namespace => { - const ns = s.cast(Scope.Namespace).?; - if (ns.decls.get(decl_name)) |i| { - if (found_already) |f| { - return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{ - try astgen.errNoteNode(f, "declared here", .{}), - try astgen.errNoteNode(i, "also declared here", .{}), - }); - } - // We found a match but must continue looking for ambiguous references to decls. - found_already = i; - } - s = ns.parent; - }, - .top => break, - }; - if (found_already == null) { - const ident_name = try astgen.identifierTokenString(ident_token); - return astgen.failNode(params[0], "use of undeclared identifier '{s}'", .{ident_name}); - } - }, - .field_access => { - const namespace_node = node_datas[params[0]].lhs; - namespace = try typeExpr(gz, scope, namespace_node); - const dot_token = main_tokens[params[0]]; - const field_ident = dot_token + 1; - decl_name = try astgen.identAsString(field_ident); - }, - else => return astgen.failNode(params[0], "symbol to export must identify a declaration", .{}), - } - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]); - _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ - .namespace = namespace, - .decl_name = decl_name, - .options = options, - }); - return rvalue(gz, ri, .void_value, node); - }, - .@"extern" => { - const type_inst = try typeExpr(gz, scope, params[0]); - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .extern_options_type } }, params[1]); - const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = type_inst, - .rhs = options, - }); - return rvalue(gz, ri, result, node); - }, - .fence => { - const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]); - _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = order, - }); - return rvalue(gz, ri, .void_value, node); - }, - .set_float_mode => { - const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]); - _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = order, - }); - return rvalue(gz, ri, .void_value, node); - }, - .set_align_stack => { - const order = try expr(gz, scope, coerced_align_ri, params[0]); - _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = order, - }); - return rvalue(gz, ri, .void_value, node); - }, - .set_cold => { - const order = try expr(gz, scope, ri, params[0]); - _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = order, - }); - return rvalue(gz, ri, .void_value, node); - }, - - .src => { - const token_starts = tree.tokens.items(.start); - const node_start = token_starts[tree.firstToken(node)]; - astgen.advanceSourceCursor(node_start); - const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.Src{ - .node = gz.nodeIndexToRelative(node), - .line = astgen.source_line, - .column = astgen.source_column, - }); - return rvalue(gz, ri, result, node); - }, - - // zig fmt: off - .This => return rvalue(gz, ri, try gz.addNodeExtended(.this, node), node), - .return_address => return rvalue(gz, ri, try gz.addNodeExtended(.ret_addr, node), node), - .error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node), - .frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node), - .frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node), - .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node), - .in_comptime => return rvalue(gz, ri, try gz.addNodeExtended(.in_comptime, node), node), - - .type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info), - .size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of), - .bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of), - .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), - - .int_from_ptr => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_ptr), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .compile_error), - .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), - .int_from_enum => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_enum), - .int_from_bool => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .int_from_bool), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .embed_file), - .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .anyerror_type } }, params[0], .error_name), - .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, coerced_bool_ri, params[0], .set_runtime_safety), - .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), - .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), - .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), - .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), - .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), - .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), - .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), - .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), - .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), - .abs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .abs), - .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), - .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), - .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), - .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), - .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), - .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), - .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), - .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), - - .int_from_float => return typeCast(gz, scope, ri, node, params[0], .int_from_float, builtin_name), - .float_from_int => return typeCast(gz, scope, ri, node, params[0], .float_from_int, builtin_name), - .ptr_from_int => return typeCast(gz, scope, ri, node, params[0], .ptr_from_int, builtin_name), - .enum_from_int => return typeCast(gz, scope, ri, node, params[0], .enum_from_int, builtin_name), - .float_cast => return typeCast(gz, scope, ri, node, params[0], .float_cast, builtin_name), - .int_cast => return typeCast(gz, scope, ri, node, params[0], .int_cast, builtin_name), - .truncate => return typeCast(gz, scope, ri, node, params[0], .truncate, builtin_name), - // zig fmt: on - - .Type => { - const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .type_info_type } }, params[0]); - - const gpa = gz.astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const payload_index = try gz.astgen.addExtra(Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .reify, - .small = @intFromEnum(gz.anon_name_strategy), - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - const result = new_index.toRef(); - return rvalue(gz, ri, result, node); - }, - .panic => { - try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .panic); - }, - .trap => { - try emitDbgNode(gz, node); - _ = try gz.addNode(.trap, node); - return rvalue(gz, ri, .unreachable_value, node); - }, - .int_from_error => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.int_from_error, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .error_from_int => { - const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); - const result = try gz.addExtendedPayload(.error_from_int, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .error_cast => { - try emitDbgNode(gz, node); - - const result = try gz.addExtendedPayload(.error_cast, Zir.Inst.BinNode{ - .lhs = try ri.rl.resultTypeForCast(gz, node, "@errorCast"), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .node = gz.nodeIndexToRelative(node), - }); - return rvalue(gz, ri, result, node); - }, - .ptr_cast, - .align_cast, - .addrspace_cast, - .const_cast, - .volatile_cast, - => return ptrCast(gz, scope, ri, node), - - // zig fmt: off - .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), - .has_field => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_field), - - .clz => return bitBuiltin(gz, scope, ri, node, params[0], .clz), - .ctz => return bitBuiltin(gz, scope, ri, node, params[0], .ctz), - .pop_count => return bitBuiltin(gz, scope, ri, node, params[0], .pop_count), - .byte_swap => return bitBuiltin(gz, scope, ri, node, params[0], .byte_swap), - .bit_reverse => return bitBuiltin(gz, scope, ri, node, params[0], .bit_reverse), - - .div_exact => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_exact), - .div_floor => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_floor), - .div_trunc => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_trunc), - .mod => return divBuiltin(gz, scope, ri, node, params[0], params[1], .mod), - .rem => return divBuiltin(gz, scope, ri, node, params[0], params[1], .rem), - - .shl_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shl_exact), - .shr_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shr_exact), - - .bit_offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .bit_offset_of), - .offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .offset_of), - - .c_undef => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_undef), - .c_include => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_include), - - .cmpxchg_strong => return cmpxchg(gz, scope, ri, node, params, 1), - .cmpxchg_weak => return cmpxchg(gz, scope, ri, node, params, 0), - // zig fmt: on - - .wasm_memory_size => { - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); - const result = try gz.addExtendedPayload(.wasm_memory_size, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .wasm_memory_grow => { - const index_arg = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); - const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[1]); - const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = index_arg, - .rhs = delta_arg, - }); - return rvalue(gz, ri, result, node); - }, - .c_define => { - if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0]); - const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = name, - .rhs = value, - }); - return rvalue(gz, ri, result, node); - }, - - .splat => { - const result_type = try ri.rl.resultTypeForCast(gz, node, "@splat"); - const elem_type = try gz.addUnNode(.vector_elem_type, result_type, node); - const scalar = try expr(gz, scope, .{ .rl = .{ .ty = elem_type } }, params[0]); - const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ - .lhs = result_type, - .rhs = scalar, - }); - return rvalue(gz, ri, result, node); - }, - .reduce => { - const op = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .reduce_op_type } }, params[0]); - const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ - .lhs = op, - .rhs = scalar, - }); - return rvalue(gz, ri, result, node); - }, - - .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), - .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), - .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), - .shl_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .shl_with_overflow), - - .atomic_load => { - const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{ - // zig fmt: off - .elem_type = try typeExpr(gz, scope, params[0]), - .ptr = try expr (gz, scope, .{ .rl = .none }, params[1]), - .ordering = try expr (gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[2]), - // zig fmt: on - }); - return rvalue(gz, ri, result, node); - }, - .atomic_rmw => { - const int_type = try typeExpr(gz, scope, params[0]); - const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ - // zig fmt: off - .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), - .operation = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_rmw_op_type } }, params[2]), - .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[3]), - .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), - // zig fmt: on - }); - return rvalue(gz, ri, result, node); - }, - .atomic_store => { - const int_type = try typeExpr(gz, scope, params[0]); - _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ - // zig fmt: off - .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), - .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), - .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]), - // zig fmt: on - }); - return rvalue(gz, ri, .void_value, node); - }, - .mul_add => { - const float_type = try typeExpr(gz, scope, params[0]); - const mulend1 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[1]); - const mulend2 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[2]); - const addend = try expr(gz, scope, .{ .rl = .{ .ty = float_type } }, params[3]); - const result = try gz.addPlNode(.mul_add, node, Zir.Inst.MulAdd{ - .mulend1 = mulend1, - .mulend2 = mulend2, - .addend = addend, - }); - return rvalue(gz, ri, result, node); - }, - .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); - const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); - const args = try expr(gz, scope, .{ .rl = .none }, params[2]); - const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ - .modifier = modifier, - .callee = callee, - .args = args, - .flags = .{ - .is_nosuspend = gz.nosuspend_node != 0, - .ensure_result_used = false, - }, - }); - return rvalue(gz, ri, result, node); - }, - .field_parent_ptr => { - const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1]); - const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ - .parent_type = parent_type, - .field_name = field_name, - .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), - }); - return rvalue(gz, ri, result, node); - }, - .memcpy => { - _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Bin{ - .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), - }); - return rvalue(gz, ri, .void_value, node); - }, - .memset => { - const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]); - const lhs_ty = try gz.addUnNode(.typeof, lhs, params[0]); - const elem_ty = try gz.addUnNode(.indexable_ptr_elem_type, lhs_ty, params[0]); - _ = try gz.addPlNode(.memset, node, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = elem_ty } }, params[1]), - }); - return rvalue(gz, ri, .void_value, node); - }, - .shuffle => { - const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{ - .elem_type = try typeExpr(gz, scope, params[0]), - .a = try expr(gz, scope, .{ .rl = .none }, params[1]), - .b = try expr(gz, scope, .{ .rl = .none }, params[2]), - .mask = try comptimeExpr(gz, scope, .{ .rl = .none }, params[3]), - }); - return rvalue(gz, ri, result, node); - }, - .select => { - const result = try gz.addExtendedPayload(.select, Zir.Inst.Select{ - .node = gz.nodeIndexToRelative(node), - .elem_type = try typeExpr(gz, scope, params[0]), - .pred = try expr(gz, scope, .{ .rl = .none }, params[1]), - .a = try expr(gz, scope, .{ .rl = .none }, params[2]), - .b = try expr(gz, scope, .{ .rl = .none }, params[3]), - }); - return rvalue(gz, ri, result, node); - }, - .async_call => { - const result = try gz.addExtendedPayload(.builtin_async_call, Zir.Inst.AsyncCall{ - .node = gz.nodeIndexToRelative(node), - .frame_buffer = try expr(gz, scope, .{ .rl = .none }, params[0]), - .result_ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), - .fn_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), - .args = try expr(gz, scope, .{ .rl = .none }, params[3]), - }); - return rvalue(gz, ri, result, node); - }, - .Vector => { - const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ - .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]), - .rhs = try typeExpr(gz, scope, params[1]), - }); - return rvalue(gz, ri, result, node); - }, - .prefetch => { - const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]); - const options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .prefetch_options_type } }, params[1]); - _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = ptr, - .rhs = options, - }); - return rvalue(gz, ri, .void_value, node); - }, - .c_va_arg => { - const result = try gz.addExtendedPayload(.c_va_arg, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .rhs = try typeExpr(gz, scope, params[1]), - }); - return rvalue(gz, ri, result, node); - }, - .c_va_copy => { - const result = try gz.addExtendedPayload(.c_va_copy, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), - }); - return rvalue(gz, ri, result, node); - }, - .c_va_end => { - const result = try gz.addExtendedPayload(.c_va_end, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), - }); - return rvalue(gz, ri, result, node); - }, - .c_va_start => { - if (!astgen.fn_var_args) { - return astgen.failNode(node, "'@cVaStart' in a non-variadic function", .{}); - } - return rvalue(gz, ri, try gz.addNodeExtended(.c_va_start, node), node); - }, - - .work_item_id => { - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); - const result = try gz.addExtendedPayload(.work_item_id, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .work_group_size => { - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); - const result = try gz.addExtendedPayload(.work_group_size, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - .work_group_id => { - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); - const result = try gz.addExtendedPayload(.work_group_id, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, result, node); - }, - } -} - -fn hasDeclOrField( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = container_type, - .rhs = name, - }); - return rvalue(gz, ri, result, node); -} - -fn typeCast( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, - tag: Zir.Inst.Tag, - builtin_name: []const u8, -) InnerError!Zir.Inst.Ref { - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const result_type = try ri.rl.resultTypeForCast(gz, node, builtin_name); - const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); - - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = result_type, - .rhs = operand, - }); - return rvalue(gz, ri, result, node); -} - -fn simpleUnOpType( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const operand = try typeExpr(gz, scope, operand_node); - const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, ri, result, node); -} - -fn simpleUnOp( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_ri: ResultInfo, - operand_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const operand = if (tag == .compile_error) - try comptimeExpr(gz, scope, operand_ri, operand_node) - else - try expr(gz, scope, operand_ri, operand_node); - switch (tag) { - .tag_name, .error_name, .int_from_ptr => try emitDbgStmt(gz, cursor), - else => {}, - } - const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, ri, result, node); -} - -fn negation( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - - // Check for float literal as the sub-expression because we want to preserve - // its negativity rather than having it go through comptime subtraction. - const operand_node = node_datas[node].lhs; - if (node_tags[operand_node] == .number_literal) { - return numberLiteral(gz, ri, operand_node, node, .negative); - } - - const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); - const result = try gz.addUnNode(.negate, operand, node); - return rvalue(gz, ri, result, node); -} - -fn cmpxchg( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - params: []const Ast.Node.Index, - small: u16, -) InnerError!Zir.Inst.Ref { - const int_type = try typeExpr(gz, scope, params[0]); - const result = try gz.addExtendedPayloadSmall(.cmpxchg, small, Zir.Inst.Cmpxchg{ - // zig fmt: off - .node = gz.nodeIndexToRelative(node), - .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), - .expected_value = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), - .new_value = try expr(gz, scope, .{ .rl = .{ .coerced_ty = int_type } }, params[3]), - .success_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), - .failure_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[5]), - // zig fmt: on - }); - return rvalue(gz, ri, result, node); -} - -fn bitBuiltin( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); - const result = try gz.addUnNode(tag, operand, node); - return rvalue(gz, ri, result, node); -} - -fn divBuiltin( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); - const rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node); - - try emitDbgStmt(gz, cursor); - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); - return rvalue(gz, ri, result, node); -} - -fn simpleCBuiltin( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - operand_node: Ast.Node.Index, - tag: Zir.Inst.Extended, -) InnerError!Zir.Inst.Ref { - const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; - if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, operand_node); - _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ - .node = gz.nodeIndexToRelative(node), - .operand = operand, - }); - return rvalue(gz, ri, .void_value, node); -} - -fn offsetOf( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, rhs_node); - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = type_inst, - .rhs = field_name, - }); - return rvalue(gz, ri, result, node); -} - -fn shiftOp( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Tag, -) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); - - const cursor = switch (gz.astgen.tree.nodes.items(.tag)[node]) { - .shl, .shr => maybeAdvanceSourceCursorToMainToken(gz, node), - else => undefined, - }; - - const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node); - const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type }, .ctx = .shift_op }, rhs_node); - - switch (gz.astgen.tree.nodes.items(.tag)[node]) { - .shl, .shr => try emitDbgStmt(gz, cursor), - else => undefined, - } - - const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = rhs, - }); - return rvalue(gz, ri, result, node); -} - -fn cImport( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, - body_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - if (gz.c_import) return gz.astgen.failNode(node, "cannot nest @cImport", .{}); - - var block_scope = gz.makeSubBlock(scope); - block_scope.is_comptime = true; - block_scope.c_import = true; - defer block_scope.unstack(); - - const block_inst = try gz.makeBlockInst(.c_import, node); - const block_result = try expr(&block_scope, &block_scope.base, .{ .rl = .none }, body_node); - _ = try gz.addUnNode(.ensure_result_used, block_result, node); - if (!gz.refIsNoReturn(block_result)) { - _ = try block_scope.addBreak(.break_inline, block_inst, .void_value); - } - try block_scope.setBlockBody(block_inst); - // block_scope unstacked now, can add new instructions to gz - try gz.instructions.append(gpa, block_inst); - - return block_inst.toRef(); -} - -fn overflowArithmetic( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - params: []const Ast.Node.Index, - tag: Zir.Inst.Extended, -) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]); - const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addExtendedPayload(tag, Zir.Inst.BinNode{ - .node = gz.nodeIndexToRelative(node), - .lhs = lhs, - .rhs = rhs, - }); - return rvalue(gz, ri, result, node); -} - -fn callExpr( - gz: *GenZir, - scope: *Scope, - ri: ResultInfo, - node: Ast.Node.Index, - call: Ast.full.Call, -) InnerError!Zir.Inst.Ref { - const astgen = gz.astgen; - - const callee = try calleeExpr(gz, scope, call.ast.fn_expr); - const modifier: std.builtin.CallModifier = blk: { - if (gz.is_comptime) { - break :blk .compile_time; - } - if (call.async_token != null) { - break :blk .async_kw; - } - if (gz.nosuspend_node != 0) { - break :blk .no_async; - } - break :blk .auto; - }; - - { - astgen.advanceSourceCursor(astgen.tree.tokens.items(.start)[call.ast.lparen]); - const line = astgen.source_line - gz.decl_line; - const column = astgen.source_column; - // Sema expects a dbg_stmt immediately before call, - try emitDbgStmtForceCurrentIndex(gz, .{ line, column }); - } - - switch (callee) { - .direct => |obj| assert(obj != .none), - .field => |field| assert(field.obj_ptr != .none), - } - assert(node != 0); - - const call_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - const call_inst = call_index.toRef(); - try gz.astgen.instructions.append(astgen.gpa, undefined); - try gz.instructions.append(astgen.gpa, call_index); - - const scratch_top = astgen.scratch.items.len; - defer astgen.scratch.items.len = scratch_top; - - var scratch_index = scratch_top; - try astgen.scratch.resize(astgen.gpa, scratch_top + call.ast.params.len); - - for (call.ast.params) |param_node| { - var arg_block = gz.makeSubBlock(scope); - defer arg_block.unstack(); - - // `call_inst` is reused to provide the param type. - const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); - _ = try arg_block.addBreakWithSrcNode(.break_inline, call_index, arg_ref, param_node); - - const body = arg_block.instructionsSlice(); - try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body)); - appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); - - astgen.scratch.items[scratch_index] = @intCast(astgen.scratch.items.len - scratch_top); - scratch_index += 1; - } - - // If our result location is a try/catch/error-union-if/return, a function argument, - // or an initializer for a `const` variable, the error trace propagates. - // Otherwise, it should always be popped (handled in Sema). - const propagate_error_trace = switch (ri.ctx) { - .error_handling_expr, .@"return", .fn_arg, .const_init => true, - else => false, - }; - - switch (callee) { - .direct => |callee_obj| { - const payload_index = try addExtra(astgen, Zir.Inst.Call{ - .callee = callee_obj, - .flags = .{ - .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(@intFromEnum(modifier)), - .args_len = @intCast(call.ast.params.len), - }, - }); - if (call.ast.params.len != 0) { - try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]); - } - gz.astgen.instructions.set(@intFromEnum(call_index), .{ - .tag = .call, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(node), - .payload_index = payload_index, - } }, - }); - }, - .field => |callee_field| { - const payload_index = try addExtra(astgen, Zir.Inst.FieldCall{ - .obj_ptr = callee_field.obj_ptr, - .field_name_start = callee_field.field_name_start, - .flags = .{ - .pop_error_return_trace = !propagate_error_trace, - .packed_modifier = @intCast(@intFromEnum(modifier)), - .args_len = @intCast(call.ast.params.len), - }, - }); - if (call.ast.params.len != 0) { - try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]); - } - gz.astgen.instructions.set(@intFromEnum(call_index), .{ - .tag = .field_call, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(node), - .payload_index = payload_index, - } }, - }); - }, - } - return rvalue(gz, ri, call_inst, node); // TODO function call with result location -} - -const Callee = union(enum) { - field: struct { - /// A *pointer* to the object the field is fetched on, so that we can - /// promote the lvalue to an address if the first parameter requires it. - obj_ptr: Zir.Inst.Ref, - /// Offset into `string_bytes`. - field_name_start: Zir.NullTerminatedString, - }, - direct: Zir.Inst.Ref, -}; - -/// calleeExpr generates the function part of a call expression (f in f(x)), but -/// *not* the callee argument to the @call() builtin. Its purpose is to -/// distinguish between standard calls and method call syntax `a.b()`. Thus, if -/// the lhs is a field access, we return using the `field` union field; -/// otherwise, we use the `direct` union field. -fn calleeExpr( - gz: *GenZir, - scope: *Scope, - node: Ast.Node.Index, -) InnerError!Callee { - const astgen = gz.astgen; - const tree = astgen.tree; - - const tag = tree.nodes.items(.tag)[node]; - switch (tag) { - .field_access => { - const main_tokens = tree.nodes.items(.main_token); - const node_datas = tree.nodes.items(.data); - const object_node = node_datas[node].lhs; - const dot_token = main_tokens[node]; - const field_ident = dot_token + 1; - const str_index = try astgen.identAsString(field_ident); - // Capture the object by reference so we can promote it to an - // address in Sema if needed. - const lhs = try expr(gz, scope, .{ .rl = .ref }, object_node); - - const cursor = maybeAdvanceSourceCursorToMainToken(gz, node); - try emitDbgStmt(gz, cursor); - - return .{ .field = .{ - .obj_ptr = lhs, - .field_name_start = str_index, - } }; - }, - else => return .{ .direct = try expr(gz, scope, .{ .rl = .none }, node) }, - } -} - -const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{ - .{ "anyerror", .anyerror_type }, - .{ "anyframe", .anyframe_type }, - .{ "anyopaque", .anyopaque_type }, - .{ "bool", .bool_type }, - .{ "c_int", .c_int_type }, - .{ "c_long", .c_long_type }, - .{ "c_longdouble", .c_longdouble_type }, - .{ "c_longlong", .c_longlong_type }, - .{ "c_char", .c_char_type }, - .{ "c_short", .c_short_type }, - .{ "c_uint", .c_uint_type }, - .{ "c_ulong", .c_ulong_type }, - .{ "c_ulonglong", .c_ulonglong_type }, - .{ "c_ushort", .c_ushort_type }, - .{ "comptime_float", .comptime_float_type }, - .{ "comptime_int", .comptime_int_type }, - .{ "f128", .f128_type }, - .{ "f16", .f16_type }, - .{ "f32", .f32_type }, - .{ "f64", .f64_type }, - .{ "f80", .f80_type }, - .{ "false", .bool_false }, - .{ "i16", .i16_type }, - .{ "i32", .i32_type }, - .{ "i64", .i64_type }, - .{ "i128", .i128_type }, - .{ "i8", .i8_type }, - .{ "isize", .isize_type }, - .{ "noreturn", .noreturn_type }, - .{ "null", .null_value }, - .{ "true", .bool_true }, - .{ "type", .type_type }, - .{ "u16", .u16_type }, - .{ "u29", .u29_type }, - .{ "u32", .u32_type }, - .{ "u64", .u64_type }, - .{ "u128", .u128_type }, - .{ "u1", .u1_type }, - .{ "u8", .u8_type }, - .{ "undefined", .undef }, - .{ "usize", .usize_type }, - .{ "void", .void_type }, -}); - -comptime { - // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map. - const primitives = std.zig.primitives; - for (primitive_instrs.kvs) |kv| { - if (!primitives.isPrimitive(kv.key)) { - @compileError("std.zig.isPrimitive() is not aware of Zir instr '" ++ @tagName(kv.value) ++ "'"); - } - } - for (primitives.names.kvs) |kv| { - if (primitive_instrs.get(kv.key) == null) { - @compileError("std.zig.primitives entry '" ++ kv.key ++ "' does not have a corresponding Zir instr"); - } - } -} - -fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool { - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - - switch (node_tags[node]) { - .number_literal => { - const ident = main_tokens[node]; - return switch (std.zig.parseNumberLiteral(tree.tokenSlice(ident))) { - .int => |number| switch (number) { - 0 => true, - else => false, - }, - else => false, - }; - }, - else => return false, - } -} - -fn nodeMayAppendToErrorTrace(tree: *const Ast, start_node: Ast.Node.Index) bool { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - - var node = start_node; - while (true) { - switch (node_tags[node]) { - // These don't have the opportunity to call any runtime functions. - .error_value, - .identifier, - .@"comptime", - => return false, - - // Forward the question to the LHS sub-expression. - .grouped_expression, - .@"try", - .@"nosuspend", - .unwrap_optional, - => node = node_datas[node].lhs, - - // Anything that does not eval to an error is guaranteed to pop any - // additions to the error trace, so it effectively does not append. - else => return nodeMayEvalToError(tree, start_node) != .never, - } - } -} - -fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - - var node = start_node; - while (true) { - switch (node_tags[node]) { - .root, - .@"usingnamespace", - .test_decl, - .switch_case, - .switch_case_inline, - .switch_case_one, - .switch_case_inline_one, - .container_field_init, - .container_field_align, - .container_field, - .asm_output, - .asm_input, - => unreachable, - - .error_value => return .always, - - .@"asm", - .asm_simple, - .identifier, - .field_access, - .deref, - .array_access, - .while_simple, - .while_cont, - .for_simple, - .if_simple, - .@"while", - .@"if", - .@"for", - .@"switch", - .switch_comma, - .call_one, - .call_one_comma, - .async_call_one, - .async_call_one_comma, - .call, - .call_comma, - .async_call, - .async_call_comma, - => return .maybe, - - .@"return", - .@"break", - .@"continue", - .bit_not, - .bool_not, - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - .@"defer", - .@"errdefer", - .address_of, - .optional_type, - .negation, - .negation_wrap, - .@"resume", - .array_type, - .array_type_sentinel, - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - .@"suspend", - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - .fn_decl, - .anyframe_type, - .anyframe_literal, - .number_literal, - .enum_literal, - .string_literal, - .multiline_string_literal, - .char_literal, - .unreachable_literal, - .error_set_decl, - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .add, - .add_wrap, - .add_sat, - .array_cat, - .array_mult, - .assign, - .assign_destructure, - .assign_bit_and, - .assign_bit_or, - .assign_shl, - .assign_shl_sat, - .assign_shr, - .assign_bit_xor, - .assign_div, - .assign_sub, - .assign_sub_wrap, - .assign_sub_sat, - .assign_mod, - .assign_add, - .assign_add_wrap, - .assign_add_sat, - .assign_mul, - .assign_mul_wrap, - .assign_mul_sat, - .bang_equal, - .bit_and, - .bit_or, - .shl, - .shl_sat, - .shr, - .bit_xor, - .bool_and, - .bool_or, - .div, - .equal_equal, - .error_union, - .greater_or_equal, - .greater_than, - .less_or_equal, - .less_than, - .merge_error_sets, - .mod, - .mul, - .mul_wrap, - .mul_sat, - .switch_range, - .for_range, - .sub, - .sub_wrap, - .sub_sat, - .slice, - .slice_open, - .slice_sentinel, - .array_init_one, - .array_init_one_comma, - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .array_init, - .array_init_comma, - .struct_init_one, - .struct_init_one_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - .struct_init, - .struct_init_comma, - => return .never, - - // Forward the question to the LHS sub-expression. - .grouped_expression, - .@"try", - .@"await", - .@"comptime", - .@"nosuspend", - .unwrap_optional, - => node = node_datas[node].lhs, - - // LHS sub-expression may still be an error under the outer optional or error union - .@"catch", - .@"orelse", - => return .maybe, - - .block_two, - .block_two_semicolon, - .block, - .block_semicolon, - => { - const lbrace = main_tokens[node]; - if (token_tags[lbrace - 1] == .colon) { - // Labeled blocks may need a memory location to forward - // to their break statements. - return .maybe; - } else { - return .never; - } - }, - - .builtin_call, - .builtin_call_comma, - .builtin_call_two, - .builtin_call_two_comma, - => { - const builtin_token = main_tokens[node]; - const builtin_name = tree.tokenSlice(builtin_token); - // If the builtin is an invalid name, we don't cause an error here; instead - // let it pass, and the error will be "invalid builtin function" later. - const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe; - return builtin_info.eval_to_error; - }, - } - } -} - -/// Returns `true` if it is known the type expression has more than one possible value; -/// `false` otherwise. -fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - - var node = start_node; - while (true) { - switch (node_tags[node]) { - .root, - .@"usingnamespace", - .test_decl, - .switch_case, - .switch_case_inline, - .switch_case_one, - .switch_case_inline_one, - .container_field_init, - .container_field_align, - .container_field, - .asm_output, - .asm_input, - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - => unreachable, - - .@"return", - .@"break", - .@"continue", - .bit_not, - .bool_not, - .@"defer", - .@"errdefer", - .address_of, - .negation, - .negation_wrap, - .@"resume", - .array_type, - .@"suspend", - .fn_decl, - .anyframe_literal, - .number_literal, - .enum_literal, - .string_literal, - .multiline_string_literal, - .char_literal, - .unreachable_literal, - .error_set_decl, - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .@"asm", - .asm_simple, - .add, - .add_wrap, - .add_sat, - .array_cat, - .array_mult, - .assign, - .assign_destructure, - .assign_bit_and, - .assign_bit_or, - .assign_shl, - .assign_shl_sat, - .assign_shr, - .assign_bit_xor, - .assign_div, - .assign_sub, - .assign_sub_wrap, - .assign_sub_sat, - .assign_mod, - .assign_add, - .assign_add_wrap, - .assign_add_sat, - .assign_mul, - .assign_mul_wrap, - .assign_mul_sat, - .bang_equal, - .bit_and, - .bit_or, - .shl, - .shl_sat, - .shr, - .bit_xor, - .bool_and, - .bool_or, - .div, - .equal_equal, - .error_union, - .greater_or_equal, - .greater_than, - .less_or_equal, - .less_than, - .merge_error_sets, - .mod, - .mul, - .mul_wrap, - .mul_sat, - .switch_range, - .for_range, - .field_access, - .sub, - .sub_wrap, - .sub_sat, - .slice, - .slice_open, - .slice_sentinel, - .deref, - .array_access, - .error_value, - .while_simple, - .while_cont, - .for_simple, - .if_simple, - .@"catch", - .@"orelse", - .array_init_one, - .array_init_one_comma, - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .array_init, - .array_init_comma, - .struct_init_one, - .struct_init_one_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - .struct_init, - .struct_init_comma, - .@"while", - .@"if", - .@"for", - .@"switch", - .switch_comma, - .call_one, - .call_one_comma, - .async_call_one, - .async_call_one_comma, - .call, - .call_comma, - .async_call, - .async_call_comma, - .block_two, - .block_two_semicolon, - .block, - .block_semicolon, - .builtin_call, - .builtin_call_comma, - .builtin_call_two, - .builtin_call_two_comma, - // these are function bodies, not pointers - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - => return false, - - // Forward the question to the LHS sub-expression. - .grouped_expression, - .@"try", - .@"await", - .@"comptime", - .@"nosuspend", - .unwrap_optional, - => node = node_datas[node].lhs, - - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - .optional_type, - .anyframe_type, - .array_type_sentinel, - => return true, - - .identifier => { - const main_tokens = tree.nodes.items(.main_token); - const ident_bytes = tree.tokenSlice(main_tokens[node]); - if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { - .anyerror_type, - .anyframe_type, - .anyopaque_type, - .bool_type, - .c_int_type, - .c_long_type, - .c_longdouble_type, - .c_longlong_type, - .c_char_type, - .c_short_type, - .c_uint_type, - .c_ulong_type, - .c_ulonglong_type, - .c_ushort_type, - .comptime_float_type, - .comptime_int_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .i16_type, - .i32_type, - .i64_type, - .i128_type, - .i8_type, - .isize_type, - .type_type, - .u16_type, - .u29_type, - .u32_type, - .u64_type, - .u128_type, - .u1_type, - .u8_type, - .usize_type, - => return true, - - .void_type, - .bool_false, - .bool_true, - .null_value, - .undef, - .noreturn_type, - => return false, - - else => unreachable, // that's all the values from `primitives`. - } else { - return false; - } - }, - } - } -} - -/// Returns `true` if it is known the expression is a type that cannot be used at runtime; -/// `false` otherwise. -fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool { - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - - var node = start_node; - while (true) { - switch (node_tags[node]) { - .root, - .@"usingnamespace", - .test_decl, - .switch_case, - .switch_case_inline, - .switch_case_one, - .switch_case_inline_one, - .container_field_init, - .container_field_align, - .container_field, - .asm_output, - .asm_input, - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - => unreachable, - - .@"return", - .@"break", - .@"continue", - .bit_not, - .bool_not, - .@"defer", - .@"errdefer", - .address_of, - .negation, - .negation_wrap, - .@"resume", - .array_type, - .@"suspend", - .fn_decl, - .anyframe_literal, - .number_literal, - .enum_literal, - .string_literal, - .multiline_string_literal, - .char_literal, - .unreachable_literal, - .error_set_decl, - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - .@"asm", - .asm_simple, - .add, - .add_wrap, - .add_sat, - .array_cat, - .array_mult, - .assign, - .assign_destructure, - .assign_bit_and, - .assign_bit_or, - .assign_shl, - .assign_shl_sat, - .assign_shr, - .assign_bit_xor, - .assign_div, - .assign_sub, - .assign_sub_wrap, - .assign_sub_sat, - .assign_mod, - .assign_add, - .assign_add_wrap, - .assign_add_sat, - .assign_mul, - .assign_mul_wrap, - .assign_mul_sat, - .bang_equal, - .bit_and, - .bit_or, - .shl, - .shl_sat, - .shr, - .bit_xor, - .bool_and, - .bool_or, - .div, - .equal_equal, - .error_union, - .greater_or_equal, - .greater_than, - .less_or_equal, - .less_than, - .merge_error_sets, - .mod, - .mul, - .mul_wrap, - .mul_sat, - .switch_range, - .for_range, - .field_access, - .sub, - .sub_wrap, - .sub_sat, - .slice, - .slice_open, - .slice_sentinel, - .deref, - .array_access, - .error_value, - .while_simple, - .while_cont, - .for_simple, - .if_simple, - .@"catch", - .@"orelse", - .array_init_one, - .array_init_one_comma, - .array_init_dot_two, - .array_init_dot_two_comma, - .array_init_dot, - .array_init_dot_comma, - .array_init, - .array_init_comma, - .struct_init_one, - .struct_init_one_comma, - .struct_init_dot_two, - .struct_init_dot_two_comma, - .struct_init_dot, - .struct_init_dot_comma, - .struct_init, - .struct_init_comma, - .@"while", - .@"if", - .@"for", - .@"switch", - .switch_comma, - .call_one, - .call_one_comma, - .async_call_one, - .async_call_one_comma, - .call, - .call_comma, - .async_call, - .async_call_comma, - .block_two, - .block_two_semicolon, - .block, - .block_semicolon, - .builtin_call, - .builtin_call_comma, - .builtin_call_two, - .builtin_call_two_comma, - .ptr_type_aligned, - .ptr_type_sentinel, - .ptr_type, - .ptr_type_bit_range, - .optional_type, - .anyframe_type, - .array_type_sentinel, - => return false, - - // these are function bodies, not pointers - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - => return true, - - // Forward the question to the LHS sub-expression. - .grouped_expression, - .@"try", - .@"await", - .@"comptime", - .@"nosuspend", - .unwrap_optional, - => node = node_datas[node].lhs, - - .identifier => { - const main_tokens = tree.nodes.items(.main_token); - const ident_bytes = tree.tokenSlice(main_tokens[node]); - if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { - .anyerror_type, - .anyframe_type, - .anyopaque_type, - .bool_type, - .c_int_type, - .c_long_type, - .c_longdouble_type, - .c_longlong_type, - .c_char_type, - .c_short_type, - .c_uint_type, - .c_ulong_type, - .c_ulonglong_type, - .c_ushort_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .i16_type, - .i32_type, - .i64_type, - .i128_type, - .i8_type, - .isize_type, - .u16_type, - .u29_type, - .u32_type, - .u64_type, - .u128_type, - .u1_type, - .u8_type, - .usize_type, - .void_type, - .bool_false, - .bool_true, - .null_value, - .undef, - .noreturn_type, - => return false, - - .comptime_float_type, - .comptime_int_type, - .type_type, - => return true, - - else => unreachable, // that's all the values from `primitives`. - } else { - return false; - } - }, - } - } -} - -/// Returns `true` if the node uses `gz.anon_name_strategy`. -fn nodeUsesAnonNameStrategy(tree: *const Ast, node: Ast.Node.Index) bool { - const node_tags = tree.nodes.items(.tag); - switch (node_tags[node]) { - .container_decl, - .container_decl_trailing, - .container_decl_two, - .container_decl_two_trailing, - .container_decl_arg, - .container_decl_arg_trailing, - .tagged_union, - .tagged_union_trailing, - .tagged_union_two, - .tagged_union_two_trailing, - .tagged_union_enum_tag, - .tagged_union_enum_tag_trailing, - => return true, - .builtin_call_two, .builtin_call_two_comma, .builtin_call, .builtin_call_comma => { - const builtin_token = tree.nodes.items(.main_token)[node]; - const builtin_name = tree.tokenSlice(builtin_token); - return std.mem.eql(u8, builtin_name, "@Type"); - }, - else => return false, - } -} - -/// Applies `rl` semantics to `result`. Expressions which do not do their own handling of -/// result locations must call this function on their result. -/// As an example, if `ri.rl` is `.ptr`, it will write the result to the pointer. -/// If `ri.rl` is `.ty`, it will coerce the result to the type. -/// Assumes nothing stacked on `gz`. -fn rvalue( - gz: *GenZir, - ri: ResultInfo, - raw_result: Zir.Inst.Ref, - src_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - return rvalueInner(gz, ri, raw_result, src_node, true); -} - -/// Like `rvalue`, but refuses to perform coercions before taking references for -/// the `ref_coerced_ty` result type. This is used for local variables which do -/// not have `alloc`s, because we want variables to have consistent addresses, -/// i.e. we want them to act like lvalues. -fn rvalueNoCoercePreRef( - gz: *GenZir, - ri: ResultInfo, - raw_result: Zir.Inst.Ref, - src_node: Ast.Node.Index, -) InnerError!Zir.Inst.Ref { - return rvalueInner(gz, ri, raw_result, src_node, false); -} - -fn rvalueInner( - gz: *GenZir, - ri: ResultInfo, - raw_result: Zir.Inst.Ref, - src_node: Ast.Node.Index, - allow_coerce_pre_ref: bool, -) InnerError!Zir.Inst.Ref { - const result = r: { - if (raw_result.toIndex()) |result_index| { - const zir_tags = gz.astgen.instructions.items(.tag); - const data = gz.astgen.instructions.items(.data)[@intFromEnum(result_index)]; - if (zir_tags[@intFromEnum(result_index)].isAlwaysVoid(data)) { - break :r Zir.Inst.Ref.void_value; - } - } - break :r raw_result; - }; - if (gz.endsWithNoReturn()) return result; - switch (ri.rl) { - .none, .coerced_ty => return result, - .discard => { - // Emit a compile error for discarding error values. - _ = try gz.addUnNode(.ensure_result_non_error, result, src_node); - return .void_value; - }, - .ref, .ref_coerced_ty => { - const coerced_result = if (allow_coerce_pre_ref and ri.rl == .ref_coerced_ty) res: { - const ptr_ty = ri.rl.ref_coerced_ty; - break :res try gz.addPlNode(.coerce_ptr_elem_ty, src_node, Zir.Inst.Bin{ - .lhs = ptr_ty, - .rhs = result, - }); - } else result; - // We need a pointer but we have a value. - // Unfortunately it's not quite as simple as directly emitting a ref - // instruction here because we need subsequent address-of operator on - // const locals to return the same address. - const astgen = gz.astgen; - const tree = astgen.tree; - const src_token = tree.firstToken(src_node); - const result_index = coerced_result.toIndex() orelse - return gz.addUnTok(.ref, coerced_result, src_token); - const zir_tags = gz.astgen.instructions.items(.tag); - if (zir_tags[@intFromEnum(result_index)].isParam() or astgen.isInferred(coerced_result)) - return gz.addUnTok(.ref, coerced_result, src_token); - const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index); - if (!gop.found_existing) { - gop.value_ptr.* = try gz.makeUnTok(.ref, coerced_result, src_token); - } - return gop.value_ptr.*.toRef(); - }, - .ty => |ty_inst| { - // Quickly eliminate some common, unnecessary type coercion. - const as_ty = @as(u64, @intFromEnum(Zir.Inst.Ref.type_type)) << 32; - const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32; - const as_bool = @as(u64, @intFromEnum(Zir.Inst.Ref.bool_type)) << 32; - const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32; - const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32; - switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) { - as_ty | @intFromEnum(Zir.Inst.Ref.u1_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u8_type), - as_ty | @intFromEnum(Zir.Inst.Ref.i8_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u16_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u29_type), - as_ty | @intFromEnum(Zir.Inst.Ref.i16_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u32_type), - as_ty | @intFromEnum(Zir.Inst.Ref.i32_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u64_type), - as_ty | @intFromEnum(Zir.Inst.Ref.i64_type), - as_ty | @intFromEnum(Zir.Inst.Ref.u128_type), - as_ty | @intFromEnum(Zir.Inst.Ref.i128_type), - as_ty | @intFromEnum(Zir.Inst.Ref.usize_type), - as_ty | @intFromEnum(Zir.Inst.Ref.isize_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_char_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_short_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_ushort_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_int_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_uint_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_long_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_ulong_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_longlong_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_ulonglong_type), - as_ty | @intFromEnum(Zir.Inst.Ref.c_longdouble_type), - as_ty | @intFromEnum(Zir.Inst.Ref.f16_type), - as_ty | @intFromEnum(Zir.Inst.Ref.f32_type), - as_ty | @intFromEnum(Zir.Inst.Ref.f64_type), - as_ty | @intFromEnum(Zir.Inst.Ref.f80_type), - as_ty | @intFromEnum(Zir.Inst.Ref.f128_type), - as_ty | @intFromEnum(Zir.Inst.Ref.anyopaque_type), - as_ty | @intFromEnum(Zir.Inst.Ref.bool_type), - as_ty | @intFromEnum(Zir.Inst.Ref.void_type), - as_ty | @intFromEnum(Zir.Inst.Ref.type_type), - as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_type), - as_ty | @intFromEnum(Zir.Inst.Ref.comptime_int_type), - as_ty | @intFromEnum(Zir.Inst.Ref.comptime_float_type), - as_ty | @intFromEnum(Zir.Inst.Ref.noreturn_type), - as_ty | @intFromEnum(Zir.Inst.Ref.anyframe_type), - as_ty | @intFromEnum(Zir.Inst.Ref.null_type), - as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type), - as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type), - as_ty | @intFromEnum(Zir.Inst.Ref.atomic_order_type), - as_ty | @intFromEnum(Zir.Inst.Ref.atomic_rmw_op_type), - as_ty | @intFromEnum(Zir.Inst.Ref.calling_convention_type), - as_ty | @intFromEnum(Zir.Inst.Ref.address_space_type), - as_ty | @intFromEnum(Zir.Inst.Ref.float_mode_type), - as_ty | @intFromEnum(Zir.Inst.Ref.reduce_op_type), - as_ty | @intFromEnum(Zir.Inst.Ref.call_modifier_type), - as_ty | @intFromEnum(Zir.Inst.Ref.prefetch_options_type), - as_ty | @intFromEnum(Zir.Inst.Ref.export_options_type), - as_ty | @intFromEnum(Zir.Inst.Ref.extern_options_type), - as_ty | @intFromEnum(Zir.Inst.Ref.type_info_type), - as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type), - as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type), - as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), - as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type), - as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), - as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type), - as_ty | @intFromEnum(Zir.Inst.Ref.generic_poison_type), - as_ty | @intFromEnum(Zir.Inst.Ref.empty_struct_type), - as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero), - as_comptime_int | @intFromEnum(Zir.Inst.Ref.one), - as_bool | @intFromEnum(Zir.Inst.Ref.bool_true), - as_bool | @intFromEnum(Zir.Inst.Ref.bool_false), - as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize), - as_usize | @intFromEnum(Zir.Inst.Ref.one_usize), - as_void | @intFromEnum(Zir.Inst.Ref.void_value), - => return result, // type of result is already correct - - // Need an explicit type coercion instruction. - else => return gz.addPlNode(ri.zirTag(), src_node, Zir.Inst.As{ - .dest_type = ty_inst, - .operand = result, - }), - } - }, - .ptr => |ptr_res| { - _ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{ - .lhs = ptr_res.inst, - .rhs = result, - }); - return .void_value; - }, - .inferred_ptr => |alloc| { - _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ - .lhs = alloc, - .rhs = result, - }); - return .void_value; - }, - .destructure => |destructure| { - const components = destructure.components; - _ = try gz.addPlNode(.validate_destructure, src_node, Zir.Inst.ValidateDestructure{ - .operand = result, - .destructure_node = gz.nodeIndexToRelative(destructure.src_node), - .expect_len = @intCast(components.len), - }); - for (components, 0..) |component, i| { - if (component == .discard) continue; - const elem_val = try gz.add(.{ - .tag = .elem_val_imm, - .data = .{ .elem_val_imm = .{ - .operand = result, - .idx = @intCast(i), - } }, - }); - switch (component) { - .typed_ptr => |ptr_res| { - _ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{ - .lhs = ptr_res.inst, - .rhs = elem_val, - }); - }, - .inferred_ptr => |ptr_inst| { - _ = try gz.addPlNode(.store_to_inferred_ptr, src_node, Zir.Inst.Bin{ - .lhs = ptr_inst, - .rhs = elem_val, - }); - }, - .discard => unreachable, - } - } - return .void_value; - }, - } -} - -/// Given an identifier token, obtain the string for it. -/// If the token uses @"" syntax, parses as a string, reports errors if applicable, -/// and allocates the result within `astgen.arena`. -/// Otherwise, returns a reference to the source code bytes directly. -/// See also `appendIdentStr` and `parseStrLit`. -fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]const u8 { - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - assert(token_tags[token] == .identifier); - const ident_name = tree.tokenSlice(token); - if (!mem.startsWith(u8, ident_name, "@")) { - return ident_name; - } - var buf: ArrayListUnmanaged(u8) = .{}; - defer buf.deinit(astgen.gpa); - try astgen.parseStrLit(token, &buf, ident_name, 1); - if (mem.indexOfScalar(u8, buf.items, 0) != null) { - return astgen.failTok(token, "identifier cannot contain null bytes", .{}); - } else if (buf.items.len == 0) { - return astgen.failTok(token, "identifier cannot be empty", .{}); - } - const duped = try astgen.arena.dupe(u8, buf.items); - return duped; -} - -/// Given an identifier token, obtain the string for it (possibly parsing as a string -/// literal if it is @"" syntax), and append the string to `buf`. -/// See also `identifierTokenString` and `parseStrLit`. -fn appendIdentStr( - astgen: *AstGen, - token: Ast.TokenIndex, - buf: *ArrayListUnmanaged(u8), -) InnerError!void { - const tree = astgen.tree; - const token_tags = tree.tokens.items(.tag); - assert(token_tags[token] == .identifier); - const ident_name = tree.tokenSlice(token); - if (!mem.startsWith(u8, ident_name, "@")) { - return buf.appendSlice(astgen.gpa, ident_name); - } else { - const start = buf.items.len; - try astgen.parseStrLit(token, buf, ident_name, 1); - const slice = buf.items[start..]; - if (mem.indexOfScalar(u8, slice, 0) != null) { - return astgen.failTok(token, "identifier cannot contain null bytes", .{}); - } else if (slice.len == 0) { - return astgen.failTok(token, "identifier cannot be empty", .{}); - } - } -} - -/// Appends the result to `buf`. -fn parseStrLit( - astgen: *AstGen, - token: Ast.TokenIndex, - buf: *ArrayListUnmanaged(u8), - bytes: []const u8, - offset: u32, -) InnerError!void { - const raw_string = bytes[offset..]; - var buf_managed = buf.toManaged(astgen.gpa); - const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string); - buf.* = buf_managed.moveToUnmanaged(); - switch (try result) { - .success => return, - .failure => |err| return astgen.failWithStrLitError(err, token, bytes, offset), - } -} - -fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token: Ast.TokenIndex, bytes: []const u8, offset: u32) InnerError { - const raw_string = bytes[offset..]; - switch (err) { - .invalid_escape_character => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "invalid escape character: '{c}'", - .{raw_string[bad_index]}, - ); - }, - .expected_hex_digit => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "expected hex digit, found '{c}'", - .{raw_string[bad_index]}, - ); - }, - .empty_unicode_escape_sequence => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "empty unicode escape sequence", - .{}, - ); - }, - .expected_hex_digit_or_rbrace => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "expected hex digit or '}}', found '{c}'", - .{raw_string[bad_index]}, - ); - }, - .invalid_unicode_codepoint => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "unicode escape does not correspond to a valid codepoint", - .{}, - ); - }, - .expected_lbrace => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "expected '{{', found '{c}", - .{raw_string[bad_index]}, - ); - }, - .expected_rbrace => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "expected '}}', found '{c}", - .{raw_string[bad_index]}, - ); - }, - .expected_single_quote => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "expected single quote ('), found '{c}", - .{raw_string[bad_index]}, - ); - }, - .invalid_character => |bad_index| { - return astgen.failOff( - token, - offset + @as(u32, @intCast(bad_index)), - "invalid byte in string or character literal: '{c}'", - .{raw_string[bad_index]}, - ); - }, - } -} - -fn failNode( - astgen: *AstGen, - node: Ast.Node.Index, - comptime format: []const u8, - args: anytype, -) InnerError { - return astgen.failNodeNotes(node, format, args, &[0]u32{}); -} - -fn appendErrorNode( - astgen: *AstGen, - node: Ast.Node.Index, - comptime format: []const u8, - args: anytype, -) Allocator.Error!void { - try astgen.appendErrorNodeNotes(node, format, args, &[0]u32{}); -} - -fn appendErrorNodeNotes( - astgen: *AstGen, - node: Ast.Node.Index, - comptime format: []const u8, - args: anytype, - notes: []const u32, -) Allocator.Error!void { - @setCold(true); - const string_bytes = &astgen.string_bytes; - const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); - try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); - const notes_index: u32 = if (notes.len != 0) blk: { - const notes_start = astgen.extra.items.len; - try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(notes.len)); - astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(notes_start); - } else 0; - try astgen.compile_errors.append(astgen.gpa, .{ - .msg = msg, - .node = node, - .token = 0, - .byte_offset = 0, - .notes = notes_index, - }); -} - -fn failNodeNotes( - astgen: *AstGen, - node: Ast.Node.Index, - comptime format: []const u8, - args: anytype, - notes: []const u32, -) InnerError { - try appendErrorNodeNotes(astgen, node, format, args, notes); - return error.AnalysisFail; -} - -fn failTok( - astgen: *AstGen, - token: Ast.TokenIndex, - comptime format: []const u8, - args: anytype, -) InnerError { - return astgen.failTokNotes(token, format, args, &[0]u32{}); -} - -fn appendErrorTok( - astgen: *AstGen, - token: Ast.TokenIndex, - comptime format: []const u8, - args: anytype, -) !void { - try astgen.appendErrorTokNotesOff(token, 0, format, args, &[0]u32{}); -} - -fn failTokNotes( - astgen: *AstGen, - token: Ast.TokenIndex, - comptime format: []const u8, - args: anytype, - notes: []const u32, -) InnerError { - try appendErrorTokNotesOff(astgen, token, 0, format, args, notes); - return error.AnalysisFail; -} - -fn appendErrorTokNotes( - astgen: *AstGen, - token: Ast.TokenIndex, - comptime format: []const u8, - args: anytype, - notes: []const u32, -) !void { - return appendErrorTokNotesOff(astgen, token, 0, format, args, notes); -} - -/// Same as `fail`, except given a token plus an offset from its starting byte -/// offset. -fn failOff( - astgen: *AstGen, - token: Ast.TokenIndex, - byte_offset: u32, - comptime format: []const u8, - args: anytype, -) InnerError { - try appendErrorTokNotesOff(astgen, token, byte_offset, format, args, &.{}); - return error.AnalysisFail; -} - -fn appendErrorTokNotesOff( - astgen: *AstGen, - token: Ast.TokenIndex, - byte_offset: u32, - comptime format: []const u8, - args: anytype, - notes: []const u32, -) !void { - @setCold(true); - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); - try string_bytes.writer(gpa).print(format ++ "\x00", args); - const notes_index: u32 = if (notes.len != 0) blk: { - const notes_start = astgen.extra.items.len; - try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len); - astgen.extra.appendAssumeCapacity(@intCast(notes.len)); - astgen.extra.appendSliceAssumeCapacity(notes); - break :blk @intCast(notes_start); - } else 0; - try astgen.compile_errors.append(gpa, .{ - .msg = msg, - .node = 0, - .token = token, - .byte_offset = byte_offset, - .notes = notes_index, - }); -} - -fn errNoteTok( - astgen: *AstGen, - token: Ast.TokenIndex, - comptime format: []const u8, - args: anytype, -) Allocator.Error!u32 { - return errNoteTokOff(astgen, token, 0, format, args); -} - -fn errNoteTokOff( - astgen: *AstGen, - token: Ast.TokenIndex, - byte_offset: u32, - comptime format: []const u8, - args: anytype, -) Allocator.Error!u32 { - @setCold(true); - const string_bytes = &astgen.string_bytes; - const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); - try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); - return astgen.addExtra(Zir.Inst.CompileErrors.Item{ - .msg = msg, - .node = 0, - .token = token, - .byte_offset = byte_offset, - .notes = 0, - }); -} - -fn errNoteNode( - astgen: *AstGen, - node: Ast.Node.Index, - comptime format: []const u8, - args: anytype, -) Allocator.Error!u32 { - @setCold(true); - const string_bytes = &astgen.string_bytes; - const msg: Zir.NullTerminatedString = @enumFromInt(string_bytes.items.len); - try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); - return astgen.addExtra(Zir.Inst.CompileErrors.Item{ - .msg = msg, - .node = node, - .token = 0, - .byte_offset = 0, - .notes = 0, - }); -} - -fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !Zir.NullTerminatedString { - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len); - try astgen.appendIdentStr(ident_token, string_bytes); - const key: []const u8 = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ - .bytes = string_bytes, - }, StringIndexContext{ - .bytes = string_bytes, - }); - if (gop.found_existing) { - string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(gop.key_ptr.*); - } else { - gop.key_ptr.* = str_index; - try string_bytes.append(gpa, 0); - return @enumFromInt(str_index); - } -} - -/// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`. -/// `end_token` must point at the first token after the last doc coment line. -/// Returns 0 if no doc comment is present. -fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString { - if (end_token == 0) return .empty; - - const token_tags = astgen.tree.tokens.items(.tag); - - var tok = end_token - 1; - while (token_tags[tok] == .doc_comment) { - if (tok == 0) break; - tok -= 1; - } else { - tok += 1; - } - - return docCommentAsStringFromFirst(astgen, end_token, tok); -} - -/// end_token must be > the index of the last doc comment. -fn docCommentAsStringFromFirst( - astgen: *AstGen, - end_token: Ast.TokenIndex, - start_token: Ast.TokenIndex, -) !Zir.NullTerminatedString { - if (start_token == end_token) return .empty; - - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len); - const token_starts = astgen.tree.tokens.items(.start); - const token_tags = astgen.tree.tokens.items(.tag); - - const total_bytes = token_starts[end_token] - token_starts[start_token]; - try string_bytes.ensureUnusedCapacity(gpa, total_bytes); - - var current_token = start_token; - while (current_token < end_token) : (current_token += 1) { - switch (token_tags[current_token]) { - .doc_comment => { - const tok_bytes = astgen.tree.tokenSlice(current_token)[3..]; - string_bytes.appendSliceAssumeCapacity(tok_bytes); - if (current_token != end_token - 1) { - string_bytes.appendAssumeCapacity('\n'); - } - }, - else => break, - } - } - - const key: []const u8 = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ - .bytes = string_bytes, - }, StringIndexContext{ - .bytes = string_bytes, - }); - - if (gop.found_existing) { - string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(gop.key_ptr.*); - } else { - gop.key_ptr.* = str_index; - try string_bytes.append(gpa, 0); - return @enumFromInt(str_index); - } -} - -const IndexSlice = struct { index: Zir.NullTerminatedString, len: u32 }; - -fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len); - const token_bytes = astgen.tree.tokenSlice(str_lit_token); - try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); - const key: []const u8 = string_bytes.items[str_index..]; - if (std.mem.indexOfScalar(u8, key, 0)) |_| return .{ - .index = @enumFromInt(str_index), - .len = @intCast(key.len), - }; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ - .bytes = string_bytes, - }, StringIndexContext{ - .bytes = string_bytes, - }); - if (gop.found_existing) { - string_bytes.shrinkRetainingCapacity(str_index); - return .{ - .index = @enumFromInt(gop.key_ptr.*), - .len = @intCast(key.len), - }; - } else { - gop.key_ptr.* = str_index; - // Still need a null byte because we are using the same table - // to lookup null terminated strings, so if we get a match, it has to - // be null terminated for that to work. - try string_bytes.append(gpa, 0); - return .{ - .index = @enumFromInt(str_index), - .len = @intCast(key.len), - }; - } -} - -fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const start = node_datas[node].lhs; - const end = node_datas[node].rhs; - - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const str_index = string_bytes.items.len; - - // First line: do not append a newline. - var tok_i = start; - { - const slice = tree.tokenSlice(tok_i); - const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; - const line_bytes = slice[2 .. slice.len - carriage_return_ending]; - try string_bytes.appendSlice(gpa, line_bytes); - tok_i += 1; - } - // Following lines: each line prepends a newline. - while (tok_i <= end) : (tok_i += 1) { - const slice = tree.tokenSlice(tok_i); - const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; - const line_bytes = slice[2 .. slice.len - carriage_return_ending]; - try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1); - string_bytes.appendAssumeCapacity('\n'); - string_bytes.appendSliceAssumeCapacity(line_bytes); - } - const len = string_bytes.items.len - str_index; - try string_bytes.append(gpa, 0); - return IndexSlice{ - .index = @enumFromInt(str_index), - .len = @intCast(len), - }; -} - -fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !Zir.NullTerminatedString { - const gpa = astgen.gpa; - const string_bytes = &astgen.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len); - const token_bytes = astgen.tree.tokenSlice(str_lit_token); - try string_bytes.append(gpa, 0); // Indicates this is a test. - try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); - const slice = string_bytes.items[str_index + 1 ..]; - if (mem.indexOfScalar(u8, slice, 0) != null) { - return astgen.failTok(str_lit_token, "test name cannot contain null bytes", .{}); - } else if (slice.len == 0) { - return astgen.failTok(str_lit_token, "empty test name must be omitted", .{}); - } - try string_bytes.append(gpa, 0); - return @enumFromInt(str_index); -} - -const Scope = struct { - tag: Tag, - - fn cast(base: *Scope, comptime T: type) ?*T { - if (T == Defer) { - switch (base.tag) { - .defer_normal, .defer_error => return @fieldParentPtr(T, "base", base), - else => return null, - } - } - if (T == Namespace) { - switch (base.tag) { - .namespace, .enum_namespace => return @fieldParentPtr(T, "base", base), - else => return null, - } - } - if (base.tag != T.base_tag) - return null; - - return @fieldParentPtr(T, "base", base); - } - - fn parent(base: *Scope) ?*Scope { - return switch (base.tag) { - .gen_zir => base.cast(GenZir).?.parent, - .local_val => base.cast(LocalVal).?.parent, - .local_ptr => base.cast(LocalPtr).?.parent, - .defer_normal, .defer_error => base.cast(Defer).?.parent, - .namespace, .enum_namespace => base.cast(Namespace).?.parent, - .top => null, - }; - } - - const Tag = enum { - gen_zir, - local_val, - local_ptr, - defer_normal, - defer_error, - namespace, - enum_namespace, - top, - }; - - /// The category of identifier. These tag names are user-visible in compile errors. - const IdCat = enum { - @"function parameter", - @"local constant", - @"local variable", - @"switch tag capture", - capture, - }; - - /// This is always a `const` local and importantly the `inst` is a value type, not a pointer. - /// This structure lives as long as the AST generation of the Block - /// node that contains the variable. - const LocalVal = struct { - const base_tag: Tag = .local_val; - base: Scope = Scope{ .tag = base_tag }, - /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. - parent: *Scope, - gen_zir: *GenZir, - inst: Zir.Inst.Ref, - /// Source location of the corresponding variable declaration. - token_src: Ast.TokenIndex, - /// Track the first identifer where it is referenced. - /// 0 means never referenced. - used: Ast.TokenIndex = 0, - /// Track the identifier where it is discarded, like this `_ = foo;`. - /// 0 means never discarded. - discarded: Ast.TokenIndex = 0, - /// String table index. - name: Zir.NullTerminatedString, - id_cat: IdCat, - }; - - /// This could be a `const` or `var` local. It has a pointer instead of a value. - /// This structure lives as long as the AST generation of the Block - /// node that contains the variable. - const LocalPtr = struct { - const base_tag: Tag = .local_ptr; - base: Scope = Scope{ .tag = base_tag }, - /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. - parent: *Scope, - gen_zir: *GenZir, - ptr: Zir.Inst.Ref, - /// Source location of the corresponding variable declaration. - token_src: Ast.TokenIndex, - /// Track the first identifer where it is referenced. - /// 0 means never referenced. - used: Ast.TokenIndex = 0, - /// Track the identifier where it is discarded, like this `_ = foo;`. - /// 0 means never discarded. - discarded: Ast.TokenIndex = 0, - /// Whether this value is used as an lvalue after inititialization. - /// If not, we know it can be `const`, so will emit a compile error if it is `var`. - used_as_lvalue: bool = false, - /// String table index. - name: Zir.NullTerminatedString, - id_cat: IdCat, - /// true means we find out during Sema whether the value is comptime. - /// false means it is already known at AstGen the value is runtime-known. - maybe_comptime: bool, - }; - - const Defer = struct { - base: Scope, - /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. - parent: *Scope, - index: u32, - len: u32, - remapped_err_code: Zir.Inst.OptionalIndex = .none, - }; - - /// Represents a global scope that has any number of declarations in it. - /// Each declaration has this as the parent scope. - const Namespace = struct { - const base_tag: Tag = .namespace; - base: Scope = Scope{ .tag = base_tag }, - - /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. - parent: *Scope, - /// Maps string table index to the source location of declaration, - /// for the purposes of reporting name shadowing compile errors. - decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{}, - node: Ast.Node.Index, - inst: Zir.Inst.Index, - - /// The astgen scope containing this namespace. - /// Only valid during astgen. - declaring_gz: ?*GenZir, - - /// Map from the raw captured value to the instruction - /// ref of the capture for decls in this namespace - captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, - - fn deinit(self: *Namespace, gpa: Allocator) void { - self.decls.deinit(gpa); - self.captures.deinit(gpa); - self.* = undefined; - } - }; - - const Top = struct { - const base_tag: Scope.Tag = .top; - base: Scope = Scope{ .tag = base_tag }, - }; -}; - -/// This is a temporary structure; references to it are valid only -/// while constructing a `Zir`. -const GenZir = struct { - const base_tag: Scope.Tag = .gen_zir; - base: Scope = Scope{ .tag = base_tag }, - /// Whether we're already in a scope known to be comptime. This is set - /// whenever we know Sema will analyze the current block with `is_comptime`, - /// for instance when we're within a `struct_decl` or a `block_comptime`. - is_comptime: bool, - /// Whether we're in an expression within a `@TypeOf` operand. In this case, closure of runtime - /// variables is permitted where it is usually not. - is_typeof: bool = false, - /// This is set to true for inline loops; false otherwise. - is_inline: bool = false, - c_import: bool = false, - /// How decls created in this scope should be named. - anon_name_strategy: Zir.Inst.NameStrategy = .anon, - /// The containing decl AST node. - decl_node_index: Ast.Node.Index, - /// The containing decl line index, absolute. - decl_line: u32, - /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. - parent: *Scope, - /// All `GenZir` scopes for the same ZIR share this. - astgen: *AstGen, - /// Keeps track of the list of instructions in this scope. Possibly shared. - /// Indexes to instructions in `astgen`. - instructions: *ArrayListUnmanaged(Zir.Inst.Index), - /// A sub-block may share its instructions ArrayList with containing GenZir, - /// if use is strictly nested. This saves prior size of list for unstacking. - instructions_top: usize, - label: ?Label = null, - break_block: Zir.Inst.OptionalIndex = .none, - continue_block: Zir.Inst.OptionalIndex = .none, - /// Only valid when setBreakResultInfo is called. - break_result_info: AstGen.ResultInfo = undefined, - - suspend_node: Ast.Node.Index = 0, - nosuspend_node: Ast.Node.Index = 0, - /// Set if this GenZir is a defer. - cur_defer_node: Ast.Node.Index = 0, - // Set if this GenZir is a defer or it is inside a defer. - any_defer_node: Ast.Node.Index = 0, - - /// Namespace members are lazy. When executing a decl within a namespace, - /// any references to external instructions need to be treated specially. - /// This list tracks those references. See also .closure_capture and .closure_get. - /// Keys are the raw instruction index, values are the closure_capture instruction. - captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, - - const unstacked_top = std.math.maxInt(usize); - /// Call unstack before adding any new instructions to containing GenZir. - fn unstack(self: *GenZir) void { - if (self.instructions_top != unstacked_top) { - self.instructions.items.len = self.instructions_top; - self.instructions_top = unstacked_top; - } - } - - fn isEmpty(self: *const GenZir) bool { - return (self.instructions_top == unstacked_top) or - (self.instructions.items.len == self.instructions_top); - } - - fn instructionsSlice(self: *const GenZir) []Zir.Inst.Index { - return if (self.instructions_top == unstacked_top) - &[0]Zir.Inst.Index{} - else - self.instructions.items[self.instructions_top..]; - } - - fn instructionsSliceUpto(self: *const GenZir, stacked_gz: *GenZir) []Zir.Inst.Index { - return if (self.instructions_top == unstacked_top) - &[0]Zir.Inst.Index{} - else if (self.instructions == stacked_gz.instructions and stacked_gz.instructions_top != unstacked_top) - self.instructions.items[self.instructions_top..stacked_gz.instructions_top] - else - self.instructions.items[self.instructions_top..]; - } - - fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir { - return .{ - .is_comptime = gz.is_comptime, - .is_typeof = gz.is_typeof, - .c_import = gz.c_import, - .decl_node_index = gz.decl_node_index, - .decl_line = gz.decl_line, - .parent = scope, - .astgen = gz.astgen, - .suspend_node = gz.suspend_node, - .nosuspend_node = gz.nosuspend_node, - .any_defer_node = gz.any_defer_node, - .instructions = gz.instructions, - .instructions_top = gz.instructions.items.len, - }; - } - - const Label = struct { - token: Ast.TokenIndex, - block_inst: Zir.Inst.Index, - used: bool = false, - }; - - /// Assumes nothing stacked on `gz`. - fn endsWithNoReturn(gz: GenZir) bool { - if (gz.isEmpty()) return false; - const tags = gz.astgen.instructions.items(.tag); - const last_inst = gz.instructions.items[gz.instructions.items.len - 1]; - return tags[@intFromEnum(last_inst)].isNoReturn(); - } - - /// TODO all uses of this should be replaced with uses of `endsWithNoReturn`. - fn refIsNoReturn(gz: GenZir, inst_ref: Zir.Inst.Ref) bool { - if (inst_ref == .unreachable_value) return true; - if (inst_ref.toIndex()) |inst_index| { - return gz.astgen.instructions.items(.tag)[@intFromEnum(inst_index)].isNoReturn(); - } - return false; - } - - fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { - return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index)); - } - - fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { - return token - gz.srcToken(); - } - - fn srcToken(gz: GenZir) Ast.TokenIndex { - return gz.astgen.tree.firstToken(gz.decl_node_index); - } - - fn setBreakResultInfo(gz: *GenZir, parent_ri: AstGen.ResultInfo) void { - // Depending on whether the result location is a pointer or value, different - // ZIR needs to be generated. In the former case we rely on storing to the - // pointer to communicate the result, and use breakvoid; in the latter case - // the block break instructions will have the result values. - switch (parent_ri.rl) { - .coerced_ty => |ty_inst| { - // Type coercion needs to happen before breaks. - gz.break_result_info = .{ .rl = .{ .ty = ty_inst }, .ctx = parent_ri.ctx }; - }, - .discard => { - // We don't forward the result context here. This prevents - // "unnecessary discard" errors from being caused by expressions - // far from the actual discard, such as a `break` from a - // discarded block. - gz.break_result_info = .{ .rl = .discard }; - }, - else => { - gz.break_result_info = parent_ri; - }, - } - } - - /// Assumes nothing stacked on `gz`. Unstacks `gz`. - fn setBoolBrBody(gz: *GenZir, bool_br: Zir.Inst.Index, bool_br_lhs: Zir.Inst.Ref) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const body = gz.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.BoolBr).Struct.fields.len + body_len, - ); - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(bool_br)].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.BoolBr{ - .lhs = bool_br_lhs, - .body_len = body_len, - }); - astgen.appendBodyWithFixups(body); - gz.unstack(); - } - - /// Assumes nothing stacked on `gz`. Unstacks `gz`. - fn setBlockBody(gz: *GenZir, inst: Zir.Inst.Index) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const body = gz.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, - ); - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(inst)].pl_node.payload_index = astgen.addExtraAssumeCapacity( - Zir.Inst.Block{ .body_len = body_len }, - ); - astgen.appendBodyWithFixups(body); - gz.unstack(); - } - - /// Assumes nothing stacked on `gz`. Unstacks `gz`. - fn setTryBody(gz: *GenZir, inst: Zir.Inst.Index, operand: Zir.Inst.Ref) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - const body = gz.instructionsSlice(); - const body_len = astgen.countBodyLenAfterFixups(body); - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.Try).Struct.fields.len + body_len, - ); - const zir_datas = astgen.instructions.items(.data); - zir_datas[@intFromEnum(inst)].pl_node.payload_index = astgen.addExtraAssumeCapacity( - Zir.Inst.Try{ - .operand = operand, - .body_len = body_len, - }, - ); - astgen.appendBodyWithFixups(body); - gz.unstack(); - } - - /// Must be called with the following stack set up: - /// * gz (bottom) - /// * align_gz - /// * addrspace_gz - /// * section_gz - /// * cc_gz - /// * ret_gz - /// * body_gz (top) - /// Unstacks all of those except for `gz`. - fn addFunc(gz: *GenZir, args: struct { - src_node: Ast.Node.Index, - lbrace_line: u32 = 0, - lbrace_column: u32 = 0, - param_block: Zir.Inst.Index, - - align_gz: ?*GenZir, - addrspace_gz: ?*GenZir, - section_gz: ?*GenZir, - cc_gz: ?*GenZir, - ret_gz: ?*GenZir, - body_gz: ?*GenZir, - - align_ref: Zir.Inst.Ref, - addrspace_ref: Zir.Inst.Ref, - section_ref: Zir.Inst.Ref, - cc_ref: Zir.Inst.Ref, - ret_ref: Zir.Inst.Ref, - - lib_name: Zir.NullTerminatedString, - noalias_bits: u32, - is_var_args: bool, - is_inferred_error: bool, - is_test: bool, - is_extern: bool, - is_noinline: bool, - }) !Zir.Inst.Ref { - assert(args.src_node != 0); - const astgen = gz.astgen; - const gpa = astgen.gpa; - const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref; - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - - var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; - var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; - var src_locs_and_hash_buffer: [7]u32 = undefined; - var src_locs_and_hash: []u32 = src_locs_and_hash_buffer[0..0]; - if (args.body_gz) |body_gz| { - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const node_datas = tree.nodes.items(.data); - const token_starts = tree.tokens.items(.start); - const fn_decl = args.src_node; - assert(node_tags[fn_decl] == .fn_decl or node_tags[fn_decl] == .test_decl); - const block = node_datas[fn_decl].rhs; - const rbrace_start = token_starts[tree.lastToken(block)]; - astgen.advanceSourceCursor(rbrace_start); - const rbrace_line: u32 = @intCast(astgen.source_line - gz.decl_line); - const rbrace_column: u32 = @intCast(astgen.source_column); - - const columns = args.lbrace_column | (rbrace_column << 16); - - const proto_hash: std.zig.SrcHash = switch (node_tags[fn_decl]) { - .fn_decl => sig_hash: { - const proto_node = node_datas[fn_decl].lhs; - break :sig_hash std.zig.hashSrc(tree.getNodeSource(proto_node)); - }, - .test_decl => std.zig.hashSrc(""), // tests don't have a prototype - else => unreachable, - }; - const proto_hash_arr: [4]u32 = @bitCast(proto_hash); - - src_locs_and_hash_buffer = .{ - args.lbrace_line, - rbrace_line, - columns, - proto_hash_arr[0], - proto_hash_arr[1], - proto_hash_arr[2], - proto_hash_arr[3], - }; - src_locs_and_hash = &src_locs_and_hash_buffer; - - body = body_gz.instructionsSlice(); - if (args.ret_gz) |ret_gz| - ret_body = ret_gz.instructionsSliceUpto(body_gz); - } else { - if (args.ret_gz) |ret_gz| - ret_body = ret_gz.instructionsSlice(); - } - const body_len = astgen.countBodyLenAfterFixups(body); - - if (args.cc_ref != .none or args.lib_name != .empty or args.is_var_args or args.is_test or - args.is_extern or args.align_ref != .none or args.section_ref != .none or - args.addrspace_ref != .none or args.noalias_bits != 0 or args.is_noinline) - { - var align_body: []Zir.Inst.Index = &.{}; - var addrspace_body: []Zir.Inst.Index = &.{}; - var section_body: []Zir.Inst.Index = &.{}; - var cc_body: []Zir.Inst.Index = &.{}; - if (args.ret_gz != null) { - align_body = args.align_gz.?.instructionsSliceUpto(args.addrspace_gz.?); - addrspace_body = args.addrspace_gz.?.instructionsSliceUpto(args.section_gz.?); - section_body = args.section_gz.?.instructionsSliceUpto(args.cc_gz.?); - cc_body = args.cc_gz.?.instructionsSliceUpto(args.ret_gz.?); - } - - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.FuncFancy).Struct.fields.len + - fancyFnExprExtraLen(astgen, align_body, args.align_ref) + - fancyFnExprExtraLen(astgen, addrspace_body, args.addrspace_ref) + - fancyFnExprExtraLen(astgen, section_body, args.section_ref) + - fancyFnExprExtraLen(astgen, cc_body, args.cc_ref) + - fancyFnExprExtraLen(astgen, ret_body, ret_ref) + - body_len + src_locs_and_hash.len + - @intFromBool(args.lib_name != .empty) + - @intFromBool(args.noalias_bits != 0), - ); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.FuncFancy{ - .param_block = args.param_block, - .body_len = body_len, - .bits = .{ - .is_var_args = args.is_var_args, - .is_inferred_error = args.is_inferred_error, - .is_test = args.is_test, - .is_extern = args.is_extern, - .is_noinline = args.is_noinline, - .has_lib_name = args.lib_name != .empty, - .has_any_noalias = args.noalias_bits != 0, - - .has_align_ref = args.align_ref != .none, - .has_addrspace_ref = args.addrspace_ref != .none, - .has_section_ref = args.section_ref != .none, - .has_cc_ref = args.cc_ref != .none, - .has_ret_ty_ref = ret_ref != .none, - - .has_align_body = align_body.len != 0, - .has_addrspace_body = addrspace_body.len != 0, - .has_section_body = section_body.len != 0, - .has_cc_body = cc_body.len != 0, - .has_ret_ty_body = ret_body.len != 0, - }, - }); - if (args.lib_name != .empty) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.lib_name)); - } - - const zir_datas = astgen.instructions.items(.data); - if (align_body.len != 0) { - astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, align_body)); - astgen.appendBodyWithFixups(align_body); - const break_extra = zir_datas[@intFromEnum(align_body[align_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (args.align_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_ref)); - } - if (addrspace_body.len != 0) { - astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, addrspace_body)); - astgen.appendBodyWithFixups(addrspace_body); - const break_extra = - zir_datas[@intFromEnum(addrspace_body[addrspace_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (args.addrspace_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.addrspace_ref)); - } - if (section_body.len != 0) { - astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, section_body)); - astgen.appendBodyWithFixups(section_body); - const break_extra = - zir_datas[@intFromEnum(section_body[section_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (args.section_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.section_ref)); - } - if (cc_body.len != 0) { - astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, cc_body)); - astgen.appendBodyWithFixups(cc_body); - const break_extra = zir_datas[@intFromEnum(cc_body[cc_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (args.cc_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.cc_ref)); - } - if (ret_body.len != 0) { - astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, ret_body)); - astgen.appendBodyWithFixups(ret_body); - const break_extra = zir_datas[@intFromEnum(ret_body[ret_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (ret_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref)); - } - - if (args.noalias_bits != 0) { - astgen.extra.appendAssumeCapacity(args.noalias_bits); - } - - astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); - - // Order is important when unstacking. - if (args.body_gz) |body_gz| body_gz.unstack(); - if (args.ret_gz != null) { - args.ret_gz.?.unstack(); - args.cc_gz.?.unstack(); - args.section_gz.?.unstack(); - args.addrspace_gz.?.unstack(); - args.align_gz.?.unstack(); - } - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - - astgen.instructions.appendAssumeCapacity(.{ - .tag = .func_fancy, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(args.src_node), - .payload_index = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } else { - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.Func).Struct.fields.len + 1 + - fancyFnExprExtraLen(astgen, ret_body, ret_ref) + - body_len + src_locs_and_hash.len, - ); - - const ret_body_len = if (ret_body.len != 0) - countBodyLenAfterFixups(astgen, ret_body) - else - @intFromBool(ret_ref != .none); - - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{ - .param_block = args.param_block, - .ret_body_len = ret_body_len, - .body_len = body_len, - }); - const zir_datas = astgen.instructions.items(.data); - if (ret_body.len != 0) { - astgen.appendBodyWithFixups(ret_body); - - const break_extra = zir_datas[@intFromEnum(ret_body[ret_body.len - 1])].@"break".payload_index; - astgen.extra.items[break_extra + std.meta.fieldIndex(Zir.Inst.Break, "block_inst").?] = - @intFromEnum(new_index); - } else if (ret_ref != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref)); - } - astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); - - // Order is important when unstacking. - if (args.body_gz) |body_gz| body_gz.unstack(); - if (args.ret_gz) |ret_gz| ret_gz.unstack(); - if (args.cc_gz) |cc_gz| cc_gz.unstack(); - if (args.section_gz) |section_gz| section_gz.unstack(); - if (args.addrspace_gz) |addrspace_gz| addrspace_gz.unstack(); - if (args.align_gz) |align_gz| align_gz.unstack(); - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - - const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func; - astgen.instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(args.src_node), - .payload_index = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - } - - fn fancyFnExprExtraLen(astgen: *AstGen, body: []Zir.Inst.Index, ref: Zir.Inst.Ref) u32 { - // In the case of non-empty body, there is one for the body length, - // and then one for each instruction. - return countBodyLenAfterFixups(astgen, body) + @intFromBool(ref != .none); - } - - fn addVar(gz: *GenZir, args: struct { - align_inst: Zir.Inst.Ref, - lib_name: Zir.NullTerminatedString, - var_type: Zir.Inst.Ref, - init: Zir.Inst.Ref, - is_extern: bool, - is_const: bool, - is_threadlocal: bool, - }) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.ExtendedVar).Struct.fields.len + - @intFromBool(args.lib_name != .empty) + - @intFromBool(args.align_inst != .none) + - @intFromBool(args.init != .none), - ); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedVar{ - .var_type = args.var_type, - }); - if (args.lib_name != .empty) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.lib_name)); - } - if (args.align_inst != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); - } - if (args.init != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.init)); - } - - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .variable, - .small = @bitCast(Zir.Inst.ExtendedVar.Small{ - .has_lib_name = args.lib_name != .empty, - .has_align = args.align_inst != .none, - .has_init = args.init != .none, - .is_extern = args.is_extern, - .is_const = args.is_const, - .is_threadlocal = args.is_threadlocal, - }), - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref { - return gz.add(.{ - .tag = .int, - .data = .{ .int = integer }, - }); - } - - fn addIntBig(gz: *GenZir, limbs: []const std.math.big.Limb) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); - - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .int_big, - .data = .{ .str = .{ - .start = @enumFromInt(astgen.string_bytes.items.len), - .len = @intCast(limbs.len), - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - astgen.string_bytes.appendSliceAssumeCapacity(mem.sliceAsBytes(limbs)); - return new_index.toRef(); - } - - fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { - return gz.add(.{ - .tag = .float, - .data = .{ .float = number }, - }); - } - - fn addUnNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - operand: Zir.Inst.Ref, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - ) !Zir.Inst.Ref { - assert(operand != .none); - return gz.add(.{ - .tag = tag, - .data = .{ .un_node = .{ - .operand = operand, - .src_node = gz.nodeIndexToRelative(src_node), - } }, - }); - } - - fn makeUnNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - operand: Zir.Inst.Ref, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - ) !Zir.Inst.Index { - assert(operand != .none); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - try gz.astgen.instructions.append(gz.astgen.gpa, .{ - .tag = tag, - .data = .{ .un_node = .{ - .operand = operand, - .src_node = gz.nodeIndexToRelative(src_node), - } }, - }); - return new_index; - } - - fn addPlNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - extra: anytype, - ) !Zir.Inst.Ref { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const payload_index = try gz.astgen.addExtra(extra); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(src_node), - .payload_index = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - fn addPlNodePayloadIndex( - gz: *GenZir, - tag: Zir.Inst.Tag, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - payload_index: u32, - ) !Zir.Inst.Ref { - return try gz.add(.{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(src_node), - .payload_index = payload_index, - } }, - }); - } - - /// Supports `param_gz` stacked on `gz`. Assumes nothing stacked on `param_gz`. Unstacks `param_gz`. - fn addParam( - gz: *GenZir, - param_gz: *GenZir, - tag: Zir.Inst.Tag, - /// Absolute token index. This function does the conversion to Decl offset. - abs_tok_index: Ast.TokenIndex, - name: Zir.NullTerminatedString, - first_doc_comment: ?Ast.TokenIndex, - ) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - const param_body = param_gz.instructionsSlice(); - const body_len = gz.astgen.countBodyLenAfterFixups(param_body); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Param).Struct.fields.len + body_len); - - const doc_comment_index = if (first_doc_comment) |first| - try gz.astgen.docCommentAsStringFromFirst(abs_tok_index, first) - else - .empty; - - const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ - .name = name, - .doc_comment = doc_comment_index, - .body_len = @intCast(body_len), - }); - gz.astgen.appendBodyWithFixups(param_body); - param_gz.unstack(); - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .pl_tok = .{ - .src_tok = gz.tokenIndexToRelative(abs_tok_index), - .payload_index = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn addExtendedPayload(gz: *GenZir, opcode: Zir.Inst.Extended, extra: anytype) !Zir.Inst.Ref { - return addExtendedPayloadSmall(gz, opcode, undefined, extra); - } - - fn addExtendedPayloadSmall( - gz: *GenZir, - opcode: Zir.Inst.Extended, - small: u16, - extra: anytype, - ) !Zir.Inst.Ref { - const gpa = gz.astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const payload_index = try gz.astgen.addExtra(extra); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = opcode, - .small = small, - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - fn addExtendedMultiOp( - gz: *GenZir, - opcode: Zir.Inst.Extended, - node: Ast.Node.Index, - operands: []const Zir.Inst.Ref, - ) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.NodeMultiOp).Struct.fields.len + operands.len, - ); - - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ - .src_node = gz.nodeIndexToRelative(node), - }); - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = opcode, - .small = @intCast(operands.len), - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - astgen.appendRefsAssumeCapacity(operands); - return new_index.toRef(); - } - - fn addExtendedMultiOpPayloadIndex( - gz: *GenZir, - opcode: Zir.Inst.Extended, - payload_index: u32, - trailing_len: usize, - ) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = opcode, - .small = @intCast(trailing_len), - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - fn addUnTok( - gz: *GenZir, - tag: Zir.Inst.Tag, - operand: Zir.Inst.Ref, - /// Absolute token index. This function does the conversion to Decl offset. - abs_tok_index: Ast.TokenIndex, - ) !Zir.Inst.Ref { - assert(operand != .none); - return gz.add(.{ - .tag = tag, - .data = .{ .un_tok = .{ - .operand = operand, - .src_tok = gz.tokenIndexToRelative(abs_tok_index), - } }, - }); - } - - fn makeUnTok( - gz: *GenZir, - tag: Zir.Inst.Tag, - operand: Zir.Inst.Ref, - /// Absolute token index. This function does the conversion to Decl offset. - abs_tok_index: Ast.TokenIndex, - ) !Zir.Inst.Index { - const astgen = gz.astgen; - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - assert(operand != .none); - try astgen.instructions.append(astgen.gpa, .{ - .tag = tag, - .data = .{ .un_tok = .{ - .operand = operand, - .src_tok = gz.tokenIndexToRelative(abs_tok_index), - } }, - }); - return new_index; - } - - fn addStrTok( - gz: *GenZir, - tag: Zir.Inst.Tag, - str_index: Zir.NullTerminatedString, - /// Absolute token index. This function does the conversion to Decl offset. - abs_tok_index: Ast.TokenIndex, - ) !Zir.Inst.Ref { - return gz.add(.{ - .tag = tag, - .data = .{ .str_tok = .{ - .start = str_index, - .src_tok = gz.tokenIndexToRelative(abs_tok_index), - } }, - }); - } - - fn addSaveErrRetIndex( - gz: *GenZir, - cond: union(enum) { - always: void, - if_of_error_type: Zir.Inst.Ref, - }, - ) !Zir.Inst.Index { - return gz.addAsIndex(.{ - .tag = .save_err_ret_index, - .data = .{ .save_err_ret_index = .{ - .operand = switch (cond) { - .if_of_error_type => |x| x, - else => .none, - }, - } }, - }); - } - - const BranchTarget = union(enum) { - ret, - block: Zir.Inst.Index, - }; - - fn addRestoreErrRetIndex( - gz: *GenZir, - bt: BranchTarget, - cond: union(enum) { - always: void, - if_non_error: Zir.Inst.Ref, - }, - src_node: Ast.Node.Index, - ) !Zir.Inst.Index { - switch (cond) { - .always => return gz.addAsIndex(.{ - .tag = .restore_err_ret_index_unconditional, - .data = .{ .un_node = .{ - .operand = switch (bt) { - .ret => .none, - .block => |b| b.toRef(), - }, - .src_node = gz.nodeIndexToRelative(src_node), - } }, - }), - .if_non_error => |operand| switch (bt) { - .ret => return gz.addAsIndex(.{ - .tag = .restore_err_ret_index_fn_entry, - .data = .{ .un_node = .{ - .operand = operand, - .src_node = gz.nodeIndexToRelative(src_node), - } }, - }), - .block => |block| return (try gz.addExtendedPayload( - .restore_err_ret_index, - Zir.Inst.RestoreErrRetIndex{ - .src_node = gz.nodeIndexToRelative(src_node), - .block = block.toRef(), - .operand = operand, - }, - )).toIndex().?, - }, - } - } - - fn addBreak( - gz: *GenZir, - tag: Zir.Inst.Tag, - block_inst: Zir.Inst.Index, - operand: Zir.Inst.Ref, - ) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - - const new_index = try gz.makeBreak(tag, block_inst, operand); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn makeBreak( - gz: *GenZir, - tag: Zir.Inst.Tag, - block_inst: Zir.Inst.Index, - operand: Zir.Inst.Ref, - ) !Zir.Inst.Index { - return gz.makeBreakCommon(tag, block_inst, operand, null); - } - - fn addBreakWithSrcNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - block_inst: Zir.Inst.Index, - operand: Zir.Inst.Ref, - operand_src_node: Ast.Node.Index, - ) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - - const new_index = try gz.makeBreakWithSrcNode(tag, block_inst, operand, operand_src_node); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn makeBreakWithSrcNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - block_inst: Zir.Inst.Index, - operand: Zir.Inst.Ref, - operand_src_node: Ast.Node.Index, - ) !Zir.Inst.Index { - return gz.makeBreakCommon(tag, block_inst, operand, operand_src_node); - } - - fn makeBreakCommon( - gz: *GenZir, - tag: Zir.Inst.Tag, - block_inst: Zir.Inst.Index, - operand: Zir.Inst.Ref, - operand_src_node: ?Ast.Node.Index, - ) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Break).Struct.fields.len); - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .@"break" = .{ - .operand = operand, - .payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Break{ - .operand_src_node = if (operand_src_node) |src_node| - gz.nodeIndexToRelative(src_node) - else - Zir.Inst.Break.no_src_node, - .block_inst = block_inst, - }), - } }, - }); - return new_index; - } - - fn addBin( - gz: *GenZir, - tag: Zir.Inst.Tag, - lhs: Zir.Inst.Ref, - rhs: Zir.Inst.Ref, - ) !Zir.Inst.Ref { - assert(lhs != .none); - assert(rhs != .none); - return gz.add(.{ - .tag = tag, - .data = .{ .bin = .{ - .lhs = lhs, - .rhs = rhs, - } }, - }); - } - - fn addDefer(gz: *GenZir, index: u32, len: u32) !void { - _ = try gz.add(.{ - .tag = .@"defer", - .data = .{ .@"defer" = .{ - .index = index, - .len = len, - } }, - }); - } - - fn addDecl( - gz: *GenZir, - tag: Zir.Inst.Tag, - decl_index: u32, - src_node: Ast.Node.Index, - ) !Zir.Inst.Ref { - return gz.add(.{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(src_node), - .payload_index = decl_index, - } }, - }); - } - - fn addNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - ) !Zir.Inst.Ref { - return gz.add(.{ - .tag = tag, - .data = .{ .node = gz.nodeIndexToRelative(src_node) }, - }); - } - - fn addInstNode( - gz: *GenZir, - tag: Zir.Inst.Tag, - inst: Zir.Inst.Index, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - ) !Zir.Inst.Ref { - return gz.add(.{ - .tag = tag, - .data = .{ .inst_node = .{ - .inst = inst, - .src_node = gz.nodeIndexToRelative(src_node), - } }, - }); - } - - fn addNodeExtended( - gz: *GenZir, - opcode: Zir.Inst.Extended, - /// Absolute node index. This function does the conversion to offset from Decl. - src_node: Ast.Node.Index, - ) !Zir.Inst.Ref { - return gz.add(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = opcode, - .small = undefined, - .operand = @bitCast(gz.nodeIndexToRelative(src_node)), - } }, - }); - } - - fn addAllocExtended( - gz: *GenZir, - args: struct { - /// Absolute node index. This function does the conversion to offset from Decl. - node: Ast.Node.Index, - type_inst: Zir.Inst.Ref, - align_inst: Zir.Inst.Ref, - is_const: bool, - is_comptime: bool, - }, - ) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Zir.Inst.AllocExtended).Struct.fields.len + - @intFromBool(args.type_inst != .none) + - @intFromBool(args.align_inst != .none), - ); - const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.AllocExtended{ - .src_node = gz.nodeIndexToRelative(args.node), - }); - if (args.type_inst != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.type_inst)); - } - if (args.align_inst != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); - } - - const has_type: u4 = @intFromBool(args.type_inst != .none); - const has_align: u4 = @intFromBool(args.align_inst != .none); - const is_const: u4 = @intFromBool(args.is_const); - const is_comptime: u4 = @intFromBool(args.is_comptime); - const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); - - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .alloc, - .small = small, - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - fn addAsm( - gz: *GenZir, - args: struct { - tag: Zir.Inst.Extended, - /// Absolute node index. This function does the conversion to offset from Decl. - node: Ast.Node.Index, - asm_source: Zir.NullTerminatedString, - output_type_bits: u32, - is_volatile: bool, - outputs: []const Zir.Inst.Asm.Output, - inputs: []const Zir.Inst.Asm.Input, - clobbers: []const u32, - }, - ) !Zir.Inst.Ref { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.instructions.ensureUnusedCapacity(gpa, 1); - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Asm).Struct.fields.len + - args.outputs.len * @typeInfo(Zir.Inst.Asm.Output).Struct.fields.len + - args.inputs.len * @typeInfo(Zir.Inst.Asm.Input).Struct.fields.len + - args.clobbers.len); - - const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Asm{ - .src_node = gz.nodeIndexToRelative(args.node), - .asm_source = args.asm_source, - .output_type_bits = args.output_type_bits, - }); - for (args.outputs) |output| { - _ = gz.astgen.addExtraAssumeCapacity(output); - } - for (args.inputs) |input| { - _ = gz.astgen.addExtraAssumeCapacity(input); - } - gz.astgen.extra.appendSliceAssumeCapacity(args.clobbers); - - // * 0b00000000_000XXXXX - `outputs_len`. - // * 0b000000XX_XXX00000 - `inputs_len`. - // * 0b0XXXXX00_00000000 - `clobbers_len`. - // * 0bX0000000_00000000 - is volatile - const small: u16 = @as(u16, @intCast(args.outputs.len)) | - @as(u16, @intCast(args.inputs.len << 5)) | - @as(u16, @intCast(args.clobbers.len << 10)) | - (@as(u16, @intFromBool(args.is_volatile)) << 15); - - const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); - astgen.instructions.appendAssumeCapacity(.{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = args.tag, - .small = small, - .operand = payload_index, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index.toRef(); - } - - /// Note that this returns a `Zir.Inst.Index` not a ref. - /// Does *not* append the block instruction to the scope. - /// Leaves the `payload_index` field undefined. - fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - const gpa = gz.astgen.gpa; - try gz.astgen.instructions.append(gpa, .{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(node), - .payload_index = undefined, - } }, - }); - return new_index; - } - - /// Note that this returns a `Zir.Inst.Index` not a ref. - /// Leaves the `payload_index` field undefined. - fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - try gz.astgen.instructions.append(gpa, .{ - .tag = tag, - .data = .{ .pl_node = .{ - .src_node = gz.nodeIndexToRelative(node), - .payload_index = undefined, - } }, - }); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct { - src_node: Ast.Node.Index, - fields_len: u32, - decls_len: u32, - backing_int_ref: Zir.Inst.Ref, - backing_int_body_len: u32, - layout: std.builtin.Type.ContainerLayout, - known_non_opv: bool, - known_comptime_only: bool, - is_tuple: bool, - any_comptime_fields: bool, - any_default_inits: bool, - any_aligned_fields: bool, - fields_hash: std.zig.SrcHash, - }) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - // Node 0 is valid for the root `struct_decl` of a file! - assert(args.src_node != 0 or gz.parent.tag == .top); - - const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{ - .fields_hash_0 = fields_hash_arr[0], - .fields_hash_1 = fields_hash_arr[1], - .fields_hash_2 = fields_hash_arr[2], - .fields_hash_3 = fields_hash_arr[3], - .src_node = gz.nodeIndexToRelative(args.src_node), - }); - - if (args.fields_len != 0) { - astgen.extra.appendAssumeCapacity(args.fields_len); - } - if (args.decls_len != 0) { - astgen.extra.appendAssumeCapacity(args.decls_len); - } - if (args.backing_int_ref != .none) { - astgen.extra.appendAssumeCapacity(args.backing_int_body_len); - if (args.backing_int_body_len == 0) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_ref)); - } - } - astgen.instructions.set(@intFromEnum(inst), .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .struct_decl, - .small = @bitCast(Zir.Inst.StructDecl.Small{ - .has_fields_len = args.fields_len != 0, - .has_decls_len = args.decls_len != 0, - .has_backing_int = args.backing_int_ref != .none, - .known_non_opv = args.known_non_opv, - .known_comptime_only = args.known_comptime_only, - .is_tuple = args.is_tuple, - .name_strategy = gz.anon_name_strategy, - .layout = args.layout, - .any_comptime_fields = args.any_comptime_fields, - .any_default_inits = args.any_default_inits, - .any_aligned_fields = args.any_aligned_fields, - }), - .operand = payload_index, - } }, - }); - } - - fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct { - src_node: Ast.Node.Index, - tag_type: Zir.Inst.Ref, - body_len: u32, - fields_len: u32, - decls_len: u32, - layout: std.builtin.Type.ContainerLayout, - auto_enum_tag: bool, - any_aligned_fields: bool, - fields_hash: std.zig.SrcHash, - }) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - assert(args.src_node != 0); - - const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{ - .fields_hash_0 = fields_hash_arr[0], - .fields_hash_1 = fields_hash_arr[1], - .fields_hash_2 = fields_hash_arr[2], - .fields_hash_3 = fields_hash_arr[3], - .src_node = gz.nodeIndexToRelative(args.src_node), - }); - - if (args.tag_type != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); - } - if (args.body_len != 0) { - astgen.extra.appendAssumeCapacity(args.body_len); - } - if (args.fields_len != 0) { - astgen.extra.appendAssumeCapacity(args.fields_len); - } - if (args.decls_len != 0) { - astgen.extra.appendAssumeCapacity(args.decls_len); - } - astgen.instructions.set(@intFromEnum(inst), .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .union_decl, - .small = @bitCast(Zir.Inst.UnionDecl.Small{ - .has_tag_type = args.tag_type != .none, - .has_body_len = args.body_len != 0, - .has_fields_len = args.fields_len != 0, - .has_decls_len = args.decls_len != 0, - .name_strategy = gz.anon_name_strategy, - .layout = args.layout, - .auto_enum_tag = args.auto_enum_tag, - .any_aligned_fields = args.any_aligned_fields, - }), - .operand = payload_index, - } }, - }); - } - - fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct { - src_node: Ast.Node.Index, - tag_type: Zir.Inst.Ref, - body_len: u32, - fields_len: u32, - decls_len: u32, - nonexhaustive: bool, - fields_hash: std.zig.SrcHash, - }) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - assert(args.src_node != 0); - - const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{ - .fields_hash_0 = fields_hash_arr[0], - .fields_hash_1 = fields_hash_arr[1], - .fields_hash_2 = fields_hash_arr[2], - .fields_hash_3 = fields_hash_arr[3], - .src_node = gz.nodeIndexToRelative(args.src_node), - }); - - if (args.tag_type != .none) { - astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); - } - if (args.body_len != 0) { - astgen.extra.appendAssumeCapacity(args.body_len); - } - if (args.fields_len != 0) { - astgen.extra.appendAssumeCapacity(args.fields_len); - } - if (args.decls_len != 0) { - astgen.extra.appendAssumeCapacity(args.decls_len); - } - astgen.instructions.set(@intFromEnum(inst), .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .enum_decl, - .small = @bitCast(Zir.Inst.EnumDecl.Small{ - .has_tag_type = args.tag_type != .none, - .has_body_len = args.body_len != 0, - .has_fields_len = args.fields_len != 0, - .has_decls_len = args.decls_len != 0, - .name_strategy = gz.anon_name_strategy, - .nonexhaustive = args.nonexhaustive, - }), - .operand = payload_index, - } }, - }); - } - - fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct { - src_node: Ast.Node.Index, - decls_len: u32, - }) !void { - const astgen = gz.astgen; - const gpa = astgen.gpa; - - assert(args.src_node != 0); - - try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1); - const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{ - .src_node = gz.nodeIndexToRelative(args.src_node), - }); - - if (args.decls_len != 0) { - astgen.extra.appendAssumeCapacity(args.decls_len); - } - astgen.instructions.set(@intFromEnum(inst), .{ - .tag = .extended, - .data = .{ .extended = .{ - .opcode = .opaque_decl, - .small = @bitCast(Zir.Inst.OpaqueDecl.Small{ - .has_decls_len = args.decls_len != 0, - .name_strategy = gz.anon_name_strategy, - }), - .operand = payload_index, - } }, - }); - } - - fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref { - return (try gz.addAsIndex(inst)).toRef(); - } - - fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.appendAssumeCapacity(inst); - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn reserveInstructionIndex(gz: *GenZir) !Zir.Inst.Index { - const gpa = gz.astgen.gpa; - try gz.instructions.ensureUnusedCapacity(gpa, 1); - try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); - - const new_index: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len); - gz.astgen.instructions.len += 1; - gz.instructions.appendAssumeCapacity(new_index); - return new_index; - } - - fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { - switch (ri.rl) { - .ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node), - .coerced_ty => _ = try gz.addUnNode(.ret_node, operand, node), - else => unreachable, - } - } - - fn addNamespaceCaptures(gz: *GenZir, namespace: *Scope.Namespace) !void { - if (namespace.captures.count() > 0) { - try gz.instructions.ensureUnusedCapacity(gz.astgen.gpa, namespace.captures.count()); - for (namespace.captures.values()) |capture| { - gz.instructions.appendAssumeCapacity(capture); - } - } - } - - fn addDbgVar(gz: *GenZir, tag: Zir.Inst.Tag, name: Zir.NullTerminatedString, inst: Zir.Inst.Ref) !void { - if (gz.is_comptime) return; - - _ = try gz.add(.{ .tag = tag, .data = .{ - .str_op = .{ - .str = name, - .operand = inst, - }, - } }); - } -}; - -/// This can only be for short-lived references; the memory becomes invalidated -/// when another string is added. -fn nullTerminatedString(astgen: AstGen, index: Zir.NullTerminatedString) [*:0]const u8 { - return @ptrCast(astgen.string_bytes.items[@intFromEnum(index)..]); -} - -/// Local variables shadowing detection, including function parameters. -fn detectLocalShadowing( - astgen: *AstGen, - scope: *Scope, - ident_name: Zir.NullTerminatedString, - name_token: Ast.TokenIndex, - token_bytes: []const u8, - id_cat: Scope.IdCat, -) !void { - const gpa = astgen.gpa; - if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { - return astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ - token_bytes, - }, &[_]u32{ - try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ - token_bytes, - }), - }); - } - - var s = scope; - var outer_scope = false; - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - if (local_val.name == ident_name) { - const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); - const name = try gpa.dupe(u8, name_slice); - defer gpa.free(name); - if (outer_scope) { - return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ - @tagName(id_cat), name, @tagName(local_val.id_cat), - }, &[_]u32{ - try astgen.errNoteTok( - local_val.token_src, - "previous declaration here", - .{}, - ), - }); - } - return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ - @tagName(local_val.id_cat), name, - }, &[_]u32{ - try astgen.errNoteTok( - local_val.token_src, - "previous declaration here", - .{}, - ), - }); - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == ident_name) { - const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); - const name = try gpa.dupe(u8, name_slice); - defer gpa.free(name); - if (outer_scope) { - return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ - @tagName(id_cat), name, @tagName(local_ptr.id_cat), - }, &[_]u32{ - try astgen.errNoteTok( - local_ptr.token_src, - "previous declaration here", - .{}, - ), - }); - } - return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ - @tagName(local_ptr.id_cat), name, - }, &[_]u32{ - try astgen.errNoteTok( - local_ptr.token_src, - "previous declaration here", - .{}, - ), - }); - } - s = local_ptr.parent; - }, - .namespace, .enum_namespace => { - outer_scope = true; - const ns = s.cast(Scope.Namespace).?; - const decl_node = ns.decls.get(ident_name) orelse { - s = ns.parent; - continue; - }; - const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); - const name = try gpa.dupe(u8, name_slice); - defer gpa.free(name); - return astgen.failTokNotes(name_token, "{s} shadows declaration of '{s}'", .{ - @tagName(id_cat), name, - }, &[_]u32{ - try astgen.errNoteNode(decl_node, "declared here", .{}), - }); - }, - .gen_zir => { - s = s.cast(GenZir).?.parent; - outer_scope = true; - }, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .top => break, - }; -} - -const LineColumn = struct { u32, u32 }; - -/// Advances the source cursor to the main token of `node` if not in comptime scope. -/// Usually paired with `emitDbgStmt`. -fn maybeAdvanceSourceCursorToMainToken(gz: *GenZir, node: Ast.Node.Index) LineColumn { - if (gz.is_comptime) return .{ gz.astgen.source_line - gz.decl_line, gz.astgen.source_column }; - - const tree = gz.astgen.tree; - const token_starts = tree.tokens.items(.start); - const main_tokens = tree.nodes.items(.main_token); - const node_start = token_starts[main_tokens[node]]; - gz.astgen.advanceSourceCursor(node_start); - - return .{ gz.astgen.source_line - gz.decl_line, gz.astgen.source_column }; -} - -/// Advances the source cursor to the beginning of `node`. -fn advanceSourceCursorToNode(astgen: *AstGen, node: Ast.Node.Index) void { - const tree = astgen.tree; - const token_starts = tree.tokens.items(.start); - const node_start = token_starts[tree.firstToken(node)]; - astgen.advanceSourceCursor(node_start); -} - -/// Advances the source cursor to an absolute byte offset `end` in the file. -fn advanceSourceCursor(astgen: *AstGen, end: usize) void { - const source = astgen.tree.source; - var i = astgen.source_offset; - var line = astgen.source_line; - var column = astgen.source_column; - assert(i <= end); - while (i < end) : (i += 1) { - if (source[i] == '\n') { - line += 1; - column = 0; - } else { - column += 1; - } - } - astgen.source_offset = i; - astgen.source_line = line; - astgen.source_column = column; -} - -fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !u32 { - const gpa = astgen.gpa; - const tree = astgen.tree; - const node_tags = tree.nodes.items(.tag); - const main_tokens = tree.nodes.items(.main_token); - const token_tags = tree.tokens.items(.tag); - var decl_count: u32 = 0; - for (members) |member_node| { - const name_token = switch (node_tags[member_node]) { - .global_var_decl, - .local_var_decl, - .simple_var_decl, - .aligned_var_decl, - => blk: { - decl_count += 1; - break :blk main_tokens[member_node] + 1; - }, - - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, - .fn_decl, - => blk: { - decl_count += 1; - const ident = main_tokens[member_node] + 1; - if (token_tags[ident] != .identifier) { - switch (astgen.failNode(member_node, "missing function name", .{})) { - error.AnalysisFail => continue, - error.OutOfMemory => return error.OutOfMemory, - } - } - break :blk ident; - }, - - .@"comptime", .@"usingnamespace", .test_decl => { - decl_count += 1; - continue; - }, - - else => continue, - }; - - const token_bytes = astgen.tree.tokenSlice(name_token); - if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { - switch (astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ - token_bytes, - }, &[_]u32{ - try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ - token_bytes, - }), - })) { - error.AnalysisFail => continue, - error.OutOfMemory => return error.OutOfMemory, - } - } - - const name_str_index = try astgen.identAsString(name_token); - const gop = try namespace.decls.getOrPut(gpa, name_str_index); - if (gop.found_existing) { - const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(name_str_index))); - defer gpa.free(name); - switch (astgen.failNodeNotes(member_node, "redeclaration of '{s}'", .{ - name, - }, &[_]u32{ - try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}), - })) { - error.AnalysisFail => continue, - error.OutOfMemory => return error.OutOfMemory, - } - } - - var s = namespace.parent; - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - if (local_val.name == name_str_index) { - return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ - token_bytes, @tagName(local_val.id_cat), - }, &[_]u32{ - try astgen.errNoteTok( - local_val.token_src, - "previous declaration here", - .{}, - ), - }); - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == name_str_index) { - return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ - token_bytes, @tagName(local_ptr.id_cat), - }, &[_]u32{ - try astgen.errNoteTok( - local_ptr.token_src, - "previous declaration here", - .{}, - ), - }); - } - s = local_ptr.parent; - }, - .namespace, .enum_namespace => s = s.cast(Scope.Namespace).?.parent, - .gen_zir => s = s.cast(GenZir).?.parent, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .top => break, - }; - gop.value_ptr.* = member_node; - } - return decl_count; -} - -fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { - const inst = ref.toIndex() orelse return false; - const zir_tags = astgen.instructions.items(.tag); - return switch (zir_tags[@intFromEnum(inst)]) { - .alloc_inferred, - .alloc_inferred_mut, - .alloc_inferred_comptime, - .alloc_inferred_comptime_mut, - => true, - - .extended => { - const zir_data = astgen.instructions.items(.data); - if (zir_data[@intFromEnum(inst)].extended.opcode != .alloc) return false; - const small: Zir.Inst.AllocExtended.Small = @bitCast(zir_data[@intFromEnum(inst)].extended.small); - return !small.has_type; - }, - - else => false, - }; -} - -/// Assumes capacity for body has already been added. Needed capacity taking into -/// account fixups can be found with `countBodyLenAfterFixups`. -fn appendBodyWithFixups(astgen: *AstGen, body: []const Zir.Inst.Index) void { - return appendBodyWithFixupsArrayList(astgen, &astgen.extra, body); -} - -fn appendBodyWithFixupsArrayList( - astgen: *AstGen, - list: *std.ArrayListUnmanaged(u32), - body: []const Zir.Inst.Index, -) void { - for (body) |body_inst| { - appendPossiblyRefdBodyInst(astgen, list, body_inst); - } -} - -fn appendPossiblyRefdBodyInst( - astgen: *AstGen, - list: *std.ArrayListUnmanaged(u32), - body_inst: Zir.Inst.Index, -) void { - list.appendAssumeCapacity(@intFromEnum(body_inst)); - const kv = astgen.ref_table.fetchRemove(body_inst) orelse return; - const ref_inst = kv.value; - return appendPossiblyRefdBodyInst(astgen, list, ref_inst); -} - -fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { - var count = body.len; - for (body) |body_inst| { - var check_inst = body_inst; - while (astgen.ref_table.get(check_inst)) |ref_inst| { - count += 1; - check_inst = ref_inst; - } - } - return @intCast(count); -} - -fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void { - if (gz.is_comptime) return; - if (gz.instructions.items.len > 0) { - const astgen = gz.astgen; - const last = gz.instructions.items[gz.instructions.items.len - 1]; - if (astgen.instructions.items(.tag)[@intFromEnum(last)] == .dbg_stmt) { - astgen.instructions.items(.data)[@intFromEnum(last)].dbg_stmt = .{ - .line = lc[0], - .column = lc[1], - }; - return; - } - } - - _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ - .dbg_stmt = .{ - .line = lc[0], - .column = lc[1], - }, - } }); -} - -/// In some cases, Sema expects us to generate a `dbg_stmt` at the instruction -/// *index* directly preceding the next instruction (e.g. if a call is %10, it -/// expects a dbg_stmt at %9). TODO: this logic may allow redundant dbg_stmt -/// instructions; fix up Sema so we don't need it! -fn emitDbgStmtForceCurrentIndex(gz: *GenZir, lc: LineColumn) !void { - const astgen = gz.astgen; - if (gz.instructions.items.len > 0 and - @intFromEnum(gz.instructions.items[gz.instructions.items.len - 1]) == astgen.instructions.len - 1) - { - const last = astgen.instructions.len - 1; - if (astgen.instructions.items(.tag)[last] == .dbg_stmt) { - astgen.instructions.items(.data)[last].dbg_stmt = .{ - .line = lc[0], - .column = lc[1], - }; - return; - } - } - - _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ - .dbg_stmt = .{ - .line = lc[0], - .column = lc[1], - }, - } }); -} - -fn lowerAstErrors(astgen: *AstGen) !void { - const tree = astgen.tree; - assert(tree.errors.len > 0); - - const gpa = astgen.gpa; - const parse_err = tree.errors[0]; - - var msg: std.ArrayListUnmanaged(u8) = .{}; - defer msg.deinit(gpa); - - const token_starts = tree.tokens.items(.start); - const token_tags = tree.tokens.items(.tag); - - var notes: std.ArrayListUnmanaged(u32) = .{}; - defer notes.deinit(gpa); - - if (token_tags[parse_err.token + @intFromBool(parse_err.token_is_prev)] == .invalid) { - const tok = parse_err.token + @intFromBool(parse_err.token_is_prev); - const bad_off: u32 = @intCast(tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len); - const byte_abs = token_starts[parse_err.token + @intFromBool(parse_err.token_is_prev)] + bad_off; - try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{ - std.zig.fmtEscapes(tree.source[byte_abs..][0..1]), - })); - } - - for (tree.errors[1..]) |note| { - if (!note.is_note) break; - - msg.clearRetainingCapacity(); - try tree.renderError(note, msg.writer(gpa)); - try notes.append(gpa, try astgen.errNoteTok(note.token, "{s}", .{msg.items})); - } - - const extra_offset = tree.errorOffset(parse_err); - msg.clearRetainingCapacity(); - try tree.renderError(parse_err, msg.writer(gpa)); - try astgen.appendErrorTokNotesOff(parse_err.token, extra_offset, "{s}", .{msg.items}, notes.items); -} - -const DeclarationName = union(enum) { - named: Ast.TokenIndex, - named_test: Ast.TokenIndex, - unnamed_test, - decltest: Zir.NullTerminatedString, - @"comptime", - @"usingnamespace", -}; - -/// Sets all extra data for a `declaration` instruction. -/// Unstacks `value_gz`, `align_gz`, `linksection_gz`, and `addrspace_gz`. -fn setDeclaration( - decl_inst: Zir.Inst.Index, - src_hash: std.zig.SrcHash, - name: DeclarationName, - line_offset: u32, - is_pub: bool, - is_export: bool, - doc_comment: Zir.NullTerminatedString, - value_gz: *GenZir, - /// May be `null` if all these blocks would be empty. - /// If `null`, then `value_gz` must have nothing stacked on it. - extra_gzs: ?struct { - /// Must be stacked on `value_gz`. - align_gz: *GenZir, - /// Must be stacked on `align_gz`. - linksection_gz: *GenZir, - /// Must be stacked on `linksection_gz`, and have nothing stacked on it. - addrspace_gz: *GenZir, - }, -) !void { - const astgen = value_gz.astgen; - const gpa = astgen.gpa; - - const empty_body: []Zir.Inst.Index = &.{}; - const value_body, const align_body, const linksection_body, const addrspace_body = if (extra_gzs) |e| .{ - value_gz.instructionsSliceUpto(e.align_gz), - e.align_gz.instructionsSliceUpto(e.linksection_gz), - e.linksection_gz.instructionsSliceUpto(e.addrspace_gz), - e.addrspace_gz.instructionsSlice(), - } else .{ value_gz.instructionsSlice(), empty_body, empty_body, empty_body }; - - const value_len = astgen.countBodyLenAfterFixups(value_body); - const align_len = astgen.countBodyLenAfterFixups(align_body); - const linksection_len = astgen.countBodyLenAfterFixups(linksection_body); - const addrspace_len = astgen.countBodyLenAfterFixups(addrspace_body); - - const true_doc_comment: Zir.NullTerminatedString = switch (name) { - .decltest => |test_name| test_name, - else => doc_comment, - }; - - const src_hash_arr: [4]u32 = @bitCast(src_hash); - - const extra: Zir.Inst.Declaration = .{ - .src_hash_0 = src_hash_arr[0], - .src_hash_1 = src_hash_arr[1], - .src_hash_2 = src_hash_arr[2], - .src_hash_3 = src_hash_arr[3], - .name = switch (name) { - .named => |tok| @enumFromInt(@intFromEnum(try astgen.identAsString(tok))), - .named_test => |tok| @enumFromInt(@intFromEnum(try astgen.testNameString(tok))), - .unnamed_test => .unnamed_test, - .decltest => .decltest, - .@"comptime" => .@"comptime", - .@"usingnamespace" => .@"usingnamespace", - }, - .line_offset = line_offset, - .flags = .{ - .value_body_len = @intCast(value_len), - .is_pub = is_pub, - .is_export = is_export, - .has_doc_comment = true_doc_comment != .empty, - .has_align_linksection_addrspace = align_len != 0 or linksection_len != 0 or addrspace_len != 0, - }, - }; - astgen.instructions.items(.data)[@intFromEnum(decl_inst)].pl_node.payload_index = try astgen.addExtra(extra); - if (extra.flags.has_doc_comment) { - try astgen.extra.append(gpa, @intFromEnum(true_doc_comment)); - } - if (extra.flags.has_align_linksection_addrspace) { - try astgen.extra.appendSlice(gpa, &.{ - align_len, - linksection_len, - addrspace_len, - }); - } - try astgen.extra.ensureUnusedCapacity(gpa, value_len + align_len + linksection_len + addrspace_len); - astgen.appendBodyWithFixups(value_body); - if (extra.flags.has_align_linksection_addrspace) { - astgen.appendBodyWithFixups(align_body); - astgen.appendBodyWithFixups(linksection_body); - astgen.appendBodyWithFixups(addrspace_body); - } - - if (extra_gzs) |e| { - e.addrspace_gz.unstack(); - e.linksection_gz.unstack(); - e.align_gz.unstack(); - } - value_gz.unstack(); -} diff --git a/src/Builtin.zig b/src/Builtin.zig index fb0c1e9490..5c8577f4cb 100644 --- a/src/Builtin.zig +++ b/src/Builtin.zig @@ -296,7 +296,7 @@ const Allocator = std.mem.Allocator; const build_options = @import("build_options"); const Module = @import("Package/Module.zig"); const assert = std.debug.assert; -const AstGen = @import("AstGen.zig"); +const AstGen = std.zig.AstGen; const File = @import("Module.zig").File; const Compilation = @import("Compilation.zig"); const log = std.log.scoped(.builtin); diff --git a/src/Module.zig b/src/Module.zig index 6316979f4c..e3e97bb291 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -28,7 +28,7 @@ const link = @import("link.zig"); const Air = @import("Air.zig"); const Zir = std.zig.Zir; const trace = @import("tracy.zig").trace; -const AstGen = @import("AstGen.zig"); +const AstGen = std.zig.AstGen; const Sema = @import("Sema.zig"); const target_util = @import("target.zig"); const build_options = @import("build_options"); diff --git a/src/main.zig b/src/main.zig index 9522734dea..278a3939fc 100644 --- a/src/main.zig +++ b/src/main.zig @@ -25,7 +25,7 @@ const Cache = std.Build.Cache; const target_util = @import("target.zig"); const crash_report = @import("crash_report.zig"); const Module = @import("Module.zig"); -const AstGen = @import("AstGen.zig"); +const AstGen = std.zig.AstGen; const mingw = @import("mingw.zig"); const Server = std.zig.Server; diff --git a/src/reduce.zig b/src/reduce.zig index 35456fe7c3..0a52814ac1 100644 --- a/src/reduce.zig +++ b/src/reduce.zig @@ -5,7 +5,7 @@ const assert = std.debug.assert; const fatal = @import("./main.zig").fatal; const Ast = std.zig.Ast; const Walk = @import("reduce/Walk.zig"); -const AstGen = @import("AstGen.zig"); +const AstGen = std.zig.AstGen; const Zir = std.zig.Zir; const usage = -- cgit v1.2.3 From d661f0f35ba5c5600c3547b52e6fbca34991702b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2024 22:26:19 -0700 Subject: compiler: JIT zig fmt See #19063 --- lib/build_runner.zig | 2 +- lib/std/zig.zig | 110 +++++++- lib/std/zig/Ast.zig | 42 ++- lib/std/zig/ErrorBundle.zig | 84 ++++++ lib/std/zig/fmt.zig | 343 ++++++++++++++++++++++++- src/Compilation.zig | 81 +----- src/Module.zig | 174 +++++-------- src/Package/Fetch.zig | 3 +- src/main.zig | 609 ++++++++++---------------------------------- 9 files changed, 778 insertions(+), 670 deletions(-) (limited to 'src/Module.zig') diff --git a/lib/build_runner.zig b/lib/build_runner.zig index d4a503a5c5..071b56a71c 100644 --- a/lib/build_runner.zig +++ b/lib/build_runner.zig @@ -13,7 +13,7 @@ const Step = std.Build.Step; pub const dependencies = @import("@dependencies"); pub fn main() !void { - // Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived, + // Here we use an ArenaAllocator backed by a page allocator because a build is a short-lived, // one shot program. We don't need to waste time freeing memory and finding places to squish // bytes into. So we free everything all at once at the very end. var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 9085b23de1..a195889d77 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -1,6 +1,3 @@ -/// Implementation of `zig fmt`. -pub const fmt = @import("zig/fmt.zig"); - pub const ErrorBundle = @import("zig/ErrorBundle.zig"); pub const Server = @import("zig/Server.zig"); pub const Client = @import("zig/Client.zig"); @@ -30,6 +27,36 @@ pub const c_translation = @import("zig/c_translation.zig"); pub const SrcHasher = std.crypto.hash.Blake3; pub const SrcHash = [16]u8; +pub const Color = enum { + /// Determine whether stderr is a terminal or not automatically. + auto, + /// Assume stderr is not a terminal. + off, + /// Assume stderr is a terminal. + on, + + pub fn get_tty_conf(color: Color) std.io.tty.Config { + return switch (color) { + .auto => std.io.tty.detectConfig(std.io.getStdErr()), + .on => .escape_codes, + .off => .no_color, + }; + } + + pub fn renderOptions(color: Color) std.zig.ErrorBundle.RenderOptions { + const ttyconf = get_tty_conf(color); + return .{ + .ttyconf = ttyconf, + .include_source_line = ttyconf != .no_color, + .include_reference_trace = ttyconf != .no_color, + }; + } +}; + +/// There are many assumptions in the entire codebase that Zig source files can +/// be byte-indexed with a u32 integer. +pub const max_src_size = std.math.maxInt(u32); + pub fn hashSrc(src: []const u8) SrcHash { var out: SrcHash = undefined; SrcHasher.hash(src, &out, .{}); @@ -801,6 +828,78 @@ test isValidId { try std.testing.expect(isValidId("i386")); } +pub fn readSourceFileToEndAlloc( + allocator: Allocator, + input: std.fs.File, + size_hint: ?usize, +) ![:0]u8 { + const source_code = input.readToEndAllocOptions( + allocator, + max_src_size, + size_hint, + @alignOf(u16), + 0, + ) catch |err| switch (err) { + error.ConnectionResetByPeer => unreachable, + error.ConnectionTimedOut => unreachable, + error.NotOpenForReading => unreachable, + else => |e| return e, + }; + errdefer allocator.free(source_code); + + // Detect unsupported file types with their Byte Order Mark + const unsupported_boms = [_][]const u8{ + "\xff\xfe\x00\x00", // UTF-32 little endian + "\xfe\xff\x00\x00", // UTF-32 big endian + "\xfe\xff", // UTF-16 big endian + }; + for (unsupported_boms) |bom| { + if (std.mem.startsWith(u8, source_code, bom)) { + return error.UnsupportedEncoding; + } + } + + // If the file starts with a UTF-16 little endian BOM, translate it to UTF-8 + if (std.mem.startsWith(u8, source_code, "\xff\xfe")) { + const source_code_utf16_le = std.mem.bytesAsSlice(u16, source_code); + const source_code_utf8 = std.unicode.utf16LeToUtf8AllocZ(allocator, source_code_utf16_le) catch |err| switch (err) { + error.DanglingSurrogateHalf => error.UnsupportedEncoding, + error.ExpectedSecondSurrogateHalf => error.UnsupportedEncoding, + error.UnexpectedSecondSurrogateHalf => error.UnsupportedEncoding, + else => |e| return e, + }; + + allocator.free(source_code); + return source_code_utf8; + } + + return source_code; +} + +pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void { + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + try wip_errors.init(gpa); + defer wip_errors.deinit(); + + try putAstErrorsIntoBundle(gpa, tree, path, &wip_errors); + + var error_bundle = try wip_errors.toOwnedBundle(""); + defer error_bundle.deinit(gpa); + error_bundle.renderToStdErr(color.renderOptions()); +} + +pub fn putAstErrorsIntoBundle( + gpa: Allocator, + tree: Ast, + path: []const u8, + wip_errors: *std.zig.ErrorBundle.Wip, +) Allocator.Error!void { + var zir = try AstGen.generate(gpa, tree); + defer zir.deinit(gpa); + + try wip_errors.addZirErrorMessages(zir, tree, tree.source, path); +} + test { _ = Ast; _ = AstRlAnnotate; @@ -808,9 +907,12 @@ test { _ = Client; _ = ErrorBundle; _ = Server; - _ = fmt; _ = number_literal; _ = primitives; _ = string_literal; _ = system; + + // This is not standard library API; it is the standalone executable + // implementation of `zig fmt`. + _ = @import("zig/fmt.zig"); } diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index d4e393bdf0..6f4afbbe4f 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -32,6 +32,12 @@ pub const Location = struct { line_end: usize, }; +pub const Span = struct { + start: u32, + end: u32, + main: u32, +}; + pub fn deinit(tree: *Ast, gpa: Allocator) void { tree.tokens.deinit(gpa); tree.nodes.deinit(gpa); @@ -3533,6 +3539,39 @@ pub const Node = struct { }; }; +pub fn nodeToSpan(tree: *const Ast, node: u32) Span { + return tokensToSpan( + tree, + tree.firstToken(node), + tree.lastToken(node), + tree.nodes.items(.main_token)[node], + ); +} + +pub fn tokenToSpan(tree: *const Ast, token: Ast.TokenIndex) Span { + return tokensToSpan(tree, token, token, token); +} + +pub fn tokensToSpan(tree: *const Ast, start: Ast.TokenIndex, end: Ast.TokenIndex, main: Ast.TokenIndex) Span { + const token_starts = tree.tokens.items(.start); + var start_tok = start; + var end_tok = end; + + if (tree.tokensOnSameLine(start, end)) { + // do nothing + } else if (tree.tokensOnSameLine(start, main)) { + end_tok = main; + } else if (tree.tokensOnSameLine(main, end)) { + start_tok = main; + } else { + start_tok = main; + end_tok = main; + } + const start_off = token_starts[start_tok]; + const end_off = token_starts[end_tok] + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); + return Span{ .start = start_off, .end = end_off, .main = token_starts[main] }; +} + const std = @import("../std.zig"); const assert = std.debug.assert; const testing = std.testing; @@ -3544,5 +3583,6 @@ const Parse = @import("Parse.zig"); const private_render = @import("./render.zig"); test { - testing.refAllDecls(@This()); + _ = Parse; + _ = private_render; } diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index ff47e3794b..013d447ab1 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -459,6 +459,90 @@ pub const Wip = struct { return @intCast(wip.extra.items.len - notes_len); } + pub fn addZirErrorMessages( + eb: *ErrorBundle.Wip, + zir: std.zig.Zir, + tree: std.zig.Ast, + source: [:0]const u8, + src_path: []const u8, + ) !void { + const Zir = std.zig.Zir; + const payload_index = zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)]; + assert(payload_index != 0); + + const header = zir.extraData(Zir.Inst.CompileErrors, payload_index); + const items_len = header.data.items_len; + var extra_index = header.end; + for (0..items_len) |_| { + const item = zir.extraData(Zir.Inst.CompileErrors.Item, extra_index); + extra_index = item.end; + const err_span = blk: { + if (item.data.node != 0) { + break :blk tree.nodeToSpan(item.data.node); + } + const token_starts = tree.tokens.items(.start); + const start = token_starts[item.data.token] + item.data.byte_offset; + const end = start + @as(u32, @intCast(tree.tokenSlice(item.data.token).len)) - item.data.byte_offset; + break :blk std.zig.Ast.Span{ .start = start, .end = end, .main = start }; + }; + const err_loc = std.zig.findLineColumn(source, err_span.main); + + { + const msg = zir.nullTerminatedString(item.data.msg); + try eb.addRootErrorMessage(.{ + .msg = try eb.addString(msg), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(src_path), + .span_start = err_span.start, + .span_main = err_span.main, + .span_end = err_span.end, + .line = @intCast(err_loc.line), + .column = @intCast(err_loc.column), + .source_line = try eb.addString(err_loc.source_line), + }), + .notes_len = item.data.notesLen(zir), + }); + } + + if (item.data.notes != 0) { + const notes_start = try eb.reserveNotes(item.data.notes); + const block = zir.extraData(Zir.Inst.Block, item.data.notes); + const body = zir.extra[block.end..][0..block.data.body_len]; + for (notes_start.., body) |note_i, body_elem| { + const note_item = zir.extraData(Zir.Inst.CompileErrors.Item, body_elem); + const msg = zir.nullTerminatedString(note_item.data.msg); + const span = blk: { + if (note_item.data.node != 0) { + break :blk tree.nodeToSpan(note_item.data.node); + } + const token_starts = tree.tokens.items(.start); + const start = token_starts[note_item.data.token] + note_item.data.byte_offset; + const end = start + @as(u32, @intCast(tree.tokenSlice(note_item.data.token).len)) - item.data.byte_offset; + break :blk std.zig.Ast.Span{ .start = start, .end = end, .main = start }; + }; + const loc = std.zig.findLineColumn(source, span.main); + + eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{ + .msg = try eb.addString(msg), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(src_path), + .span_start = span.start, + .span_main = span.main, + .span_end = span.end, + .line = @intCast(loc.line), + .column = @intCast(loc.column), + .source_line = if (loc.eql(err_loc)) + 0 + else + try eb.addString(loc.source_line), + }), + .notes_len = 0, // TODO rework this function to be recursive + })); + } + } + } + } + fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex { const other_msg = other.getErrorMessage(msg_index); const src_loc = try wip.addOtherSourceLocation(other, other_msg.src_loc); diff --git a/lib/std/zig/fmt.zig b/lib/std/zig/fmt.zig index f8841bfb5b..2fc04b7935 100644 --- a/lib/std/zig/fmt.zig +++ b/lib/std/zig/fmt.zig @@ -1 +1,342 @@ -const std = @import("../std.zig"); +const std = @import("std"); +const mem = std.mem; +const fs = std.fs; +const process = std.process; +const Allocator = std.mem.Allocator; +const warn = std.log.warn; +const Color = std.zig.Color; + +const usage_fmt = + \\Usage: zig fmt [file]... + \\ + \\ Formats the input files and modifies them in-place. + \\ Arguments can be files or directories, which are searched + \\ recursively. + \\ + \\Options: + \\ -h, --help Print this help and exit + \\ --color [auto|off|on] Enable or disable colored error messages + \\ --stdin Format code from stdin; output to stdout + \\ --check List non-conforming files and exit with an error + \\ if the list is non-empty + \\ --ast-check Run zig ast-check on every file + \\ --exclude [file] Exclude file or directory from formatting + \\ + \\ +; + +const Fmt = struct { + seen: SeenMap, + any_error: bool, + check_ast: bool, + color: Color, + gpa: Allocator, + arena: Allocator, + out_buffer: std.ArrayList(u8), + + const SeenMap = std.AutoHashMap(fs.File.INode, void); +}; + +pub fn main() !void { + var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena_instance.deinit(); + const arena = arena_instance.allocator(); + const gpa = arena; + + const args = try process.argsAlloc(arena); + + var color: Color = .auto; + var stdin_flag: bool = false; + var check_flag: bool = false; + var check_ast_flag: bool = false; + var input_files = std.ArrayList([]const u8).init(gpa); + defer input_files.deinit(); + var excluded_files = std.ArrayList([]const u8).init(gpa); + defer excluded_files.deinit(); + + { + var i: usize = 1; + while (i < args.len) : (i += 1) { + const arg = args[i]; + if (mem.startsWith(u8, arg, "-")) { + if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { + const stdout = std.io.getStdOut().writer(); + try stdout.writeAll(usage_fmt); + return process.cleanExit(); + } else if (mem.eql(u8, arg, "--color")) { + if (i + 1 >= args.len) { + fatal("expected [auto|on|off] after --color", .{}); + } + i += 1; + const next_arg = args[i]; + color = std.meta.stringToEnum(Color, next_arg) orelse { + fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); + }; + } else if (mem.eql(u8, arg, "--stdin")) { + stdin_flag = true; + } else if (mem.eql(u8, arg, "--check")) { + check_flag = true; + } else if (mem.eql(u8, arg, "--ast-check")) { + check_ast_flag = true; + } else if (mem.eql(u8, arg, "--exclude")) { + if (i + 1 >= args.len) { + fatal("expected parameter after --exclude", .{}); + } + i += 1; + const next_arg = args[i]; + try excluded_files.append(next_arg); + } else { + fatal("unrecognized parameter: '{s}'", .{arg}); + } + } else { + try input_files.append(arg); + } + } + } + + if (stdin_flag) { + if (input_files.items.len != 0) { + fatal("cannot use --stdin with positional arguments", .{}); + } + + const stdin = std.io.getStdIn(); + const source_code = std.zig.readSourceFileToEndAlloc(gpa, stdin, null) catch |err| { + fatal("unable to read stdin: {}", .{err}); + }; + defer gpa.free(source_code); + + var tree = std.zig.Ast.parse(gpa, source_code, .zig) catch |err| { + fatal("error parsing stdin: {}", .{err}); + }; + defer tree.deinit(gpa); + + if (check_ast_flag) { + var zir = try std.zig.AstGen.generate(gpa, tree); + + if (zir.hasCompileErrors()) { + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + try wip_errors.init(gpa); + defer wip_errors.deinit(); + try wip_errors.addZirErrorMessages(zir, tree, source_code, ""); + var error_bundle = try wip_errors.toOwnedBundle(""); + defer error_bundle.deinit(gpa); + error_bundle.renderToStdErr(color.renderOptions()); + process.exit(2); + } + } else if (tree.errors.len != 0) { + try std.zig.printAstErrorsToStderr(gpa, tree, "", color); + process.exit(2); + } + const formatted = try tree.render(gpa); + defer gpa.free(formatted); + + if (check_flag) { + const code: u8 = @intFromBool(mem.eql(u8, formatted, source_code)); + process.exit(code); + } + + return std.io.getStdOut().writeAll(formatted); + } + + if (input_files.items.len == 0) { + fatal("expected at least one source file argument", .{}); + } + + var fmt = Fmt{ + .gpa = gpa, + .arena = arena, + .seen = Fmt.SeenMap.init(gpa), + .any_error = false, + .check_ast = check_ast_flag, + .color = color, + .out_buffer = std.ArrayList(u8).init(gpa), + }; + defer fmt.seen.deinit(); + defer fmt.out_buffer.deinit(); + + // Mark any excluded files/directories as already seen, + // so that they are skipped later during actual processing + for (excluded_files.items) |file_path| { + const stat = fs.cwd().statFile(file_path) catch |err| switch (err) { + error.FileNotFound => continue, + // On Windows, statFile does not work for directories + error.IsDir => dir: { + var dir = try fs.cwd().openDir(file_path, .{}); + defer dir.close(); + break :dir try dir.stat(); + }, + else => |e| return e, + }; + try fmt.seen.put(stat.inode, {}); + } + + for (input_files.items) |file_path| { + try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); + } + if (fmt.any_error) { + process.exit(1); + } +} + +const FmtError = error{ + SystemResources, + OperationAborted, + IoPending, + BrokenPipe, + Unexpected, + WouldBlock, + FileClosed, + DestinationAddressRequired, + DiskQuota, + FileTooBig, + InputOutput, + NoSpaceLeft, + AccessDenied, + OutOfMemory, + RenameAcrossMountPoints, + ReadOnlyFileSystem, + LinkQuotaExceeded, + FileBusy, + EndOfStream, + Unseekable, + NotOpenForWriting, + UnsupportedEncoding, + ConnectionResetByPeer, + SocketNotConnected, + LockViolation, + NetNameDeleted, + InvalidArgument, +} || fs.File.OpenError; + +fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void { + fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) { + error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path), + else => { + warn("unable to format '{s}': {s}", .{ file_path, @errorName(err) }); + fmt.any_error = true; + return; + }, + }; +} + +fn fmtPathDir( + fmt: *Fmt, + file_path: []const u8, + check_mode: bool, + parent_dir: fs.Dir, + parent_sub_path: []const u8, +) FmtError!void { + var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true }); + defer dir.close(); + + const stat = try dir.stat(); + if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; + + var dir_it = dir.iterate(); + while (try dir_it.next()) |entry| { + const is_dir = entry.kind == .directory; + + if (is_dir and (mem.eql(u8, entry.name, "zig-cache") or mem.eql(u8, entry.name, "zig-out"))) continue; + + if (is_dir or entry.kind == .file and (mem.endsWith(u8, entry.name, ".zig") or mem.endsWith(u8, entry.name, ".zon"))) { + const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name }); + defer fmt.gpa.free(full_path); + + if (is_dir) { + try fmtPathDir(fmt, full_path, check_mode, dir, entry.name); + } else { + fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| { + warn("unable to format '{s}': {s}", .{ full_path, @errorName(err) }); + fmt.any_error = true; + return; + }; + } + } + } +} + +fn fmtPathFile( + fmt: *Fmt, + file_path: []const u8, + check_mode: bool, + dir: fs.Dir, + sub_path: []const u8, +) FmtError!void { + const source_file = try dir.openFile(sub_path, .{}); + var file_closed = false; + errdefer if (!file_closed) source_file.close(); + + const stat = try source_file.stat(); + + if (stat.kind == .directory) + return error.IsDir; + + const gpa = fmt.gpa; + const source_code = try std.zig.readSourceFileToEndAlloc( + gpa, + source_file, + std.math.cast(usize, stat.size) orelse return error.FileTooBig, + ); + defer gpa.free(source_code); + + source_file.close(); + file_closed = true; + + // Add to set after no longer possible to get error.IsDir. + if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; + + var tree = try std.zig.Ast.parse(gpa, source_code, .zig); + defer tree.deinit(gpa); + + if (tree.errors.len != 0) { + try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color); + fmt.any_error = true; + return; + } + + if (fmt.check_ast) { + if (stat.size > std.zig.max_src_size) + return error.FileTooBig; + + var zir = try std.zig.AstGen.generate(gpa, tree); + defer zir.deinit(gpa); + + if (zir.hasCompileErrors()) { + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + try wip_errors.init(gpa); + defer wip_errors.deinit(); + try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path); + var error_bundle = try wip_errors.toOwnedBundle(""); + defer error_bundle.deinit(gpa); + error_bundle.renderToStdErr(fmt.color.renderOptions()); + fmt.any_error = true; + } + } + + // As a heuristic, we make enough capacity for the same as the input source. + fmt.out_buffer.shrinkRetainingCapacity(0); + try fmt.out_buffer.ensureTotalCapacity(source_code.len); + + try tree.renderToArrayList(&fmt.out_buffer, .{}); + if (mem.eql(u8, fmt.out_buffer.items, source_code)) + return; + + if (check_mode) { + const stdout = std.io.getStdOut().writer(); + try stdout.print("{s}\n", .{file_path}); + fmt.any_error = true; + } else { + var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode }); + defer af.deinit(); + + try af.file.writeAll(fmt.out_buffer.items); + try af.finish(); + const stdout = std.io.getStdOut().writer(); + try stdout.print("{s}\n", .{file_path}); + } +} + +fn fatal(comptime format: []const u8, args: anytype) noreturn { + std.log.err(format, args); + process.exit(1); +} diff --git a/src/Compilation.zig b/src/Compilation.zig index d4d2826880..4f4c5f46ef 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3322,85 +3322,10 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { assert(file.zir_loaded); assert(file.tree_loaded); assert(file.source_loaded); - const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)]; - assert(payload_index != 0); const gpa = eb.gpa; - - const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index); - const items_len = header.data.items_len; - var extra_index = header.end; - for (0..items_len) |_| { - const item = file.zir.extraData(Zir.Inst.CompileErrors.Item, extra_index); - extra_index = item.end; - const err_span = blk: { - if (item.data.node != 0) { - break :blk Module.SrcLoc.nodeToSpan(&file.tree, item.data.node); - } - const token_starts = file.tree.tokens.items(.start); - const start = token_starts[item.data.token] + item.data.byte_offset; - const end = start + @as(u32, @intCast(file.tree.tokenSlice(item.data.token).len)) - item.data.byte_offset; - break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; - }; - const err_loc = std.zig.findLineColumn(file.source, err_span.main); - - { - const msg = file.zir.nullTerminatedString(item.data.msg); - const src_path = try file.fullPath(gpa); - defer gpa.free(src_path); - try eb.addRootErrorMessage(.{ - .msg = try eb.addString(msg), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(src_path), - .span_start = err_span.start, - .span_main = err_span.main, - .span_end = err_span.end, - .line = @as(u32, @intCast(err_loc.line)), - .column = @as(u32, @intCast(err_loc.column)), - .source_line = try eb.addString(err_loc.source_line), - }), - .notes_len = item.data.notesLen(file.zir), - }); - } - - if (item.data.notes != 0) { - const notes_start = try eb.reserveNotes(item.data.notes); - const block = file.zir.extraData(Zir.Inst.Block, item.data.notes); - const body = file.zir.extra[block.end..][0..block.data.body_len]; - for (notes_start.., body) |note_i, body_elem| { - const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body_elem); - const msg = file.zir.nullTerminatedString(note_item.data.msg); - const span = blk: { - if (note_item.data.node != 0) { - break :blk Module.SrcLoc.nodeToSpan(&file.tree, note_item.data.node); - } - const token_starts = file.tree.tokens.items(.start); - const start = token_starts[note_item.data.token] + note_item.data.byte_offset; - const end = start + @as(u32, @intCast(file.tree.tokenSlice(note_item.data.token).len)) - item.data.byte_offset; - break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start }; - }; - const loc = std.zig.findLineColumn(file.source, span.main); - const src_path = try file.fullPath(gpa); - defer gpa.free(src_path); - - eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{ - .msg = try eb.addString(msg), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(src_path), - .span_start = span.start, - .span_main = span.main, - .span_end = span.end, - .line = @as(u32, @intCast(loc.line)), - .column = @as(u32, @intCast(loc.column)), - .source_line = if (loc.eql(err_loc)) - 0 - else - try eb.addString(loc.source_line), - }), - .notes_len = 0, // TODO rework this function to be recursive - })); - } - } - } + const src_path = try file.fullPath(gpa); + defer gpa.free(src_path); + return eb.addZirErrorMessages(file.zir, file.tree, file.source, src_path); } pub fn performAllTheWork( diff --git a/src/Module.zig b/src/Module.zig index e3e97bb291..ad6487b1eb 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1255,11 +1255,7 @@ pub const SrcLoc = struct { return @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node))); } - pub const Span = struct { - start: u32, - end: u32, - main: u32, - }; + pub const Span = Ast.Span; pub fn span(src_loc: SrcLoc, gpa: Allocator) !Span { switch (src_loc.lazy) { @@ -1276,7 +1272,7 @@ pub const SrcLoc = struct { }, .node_abs => |node| { const tree = try src_loc.file_scope.getTree(gpa); - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, .byte_offset => |byte_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1297,25 +1293,24 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); assert(src_loc.file_scope.tree_loaded); - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, .node_offset_main_token => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const main_token = tree.nodes.items(.main_token)[node]; - return tokensToSpan(tree, main_token, main_token, main_token); + return tree.tokensToSpan(main_token, main_token, main_token); }, .node_offset_bin_op => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); assert(src_loc.file_scope.tree_loaded); - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, .node_offset_initializer => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); - return tokensToSpan( - tree, + return tree.tokensToSpan( tree.firstToken(node) - 3, tree.lastToken(node), tree.nodes.items(.main_token)[node] - 2, @@ -1333,12 +1328,12 @@ pub const SrcLoc = struct { => tree.fullVarDecl(node).?, .@"usingnamespace" => { const node_data = tree.nodes.items(.data); - return nodeToSpan(tree, node_data[node].lhs); + return tree.nodeToSpan(node_data[node].lhs); }, else => unreachable, }; if (full.ast.type_node != 0) { - return nodeToSpan(tree, full.ast.type_node); + return tree.nodeToSpan(full.ast.type_node); } const tok_index = full.ast.mut_token + 1; // the name token const start = tree.tokens.items(.start)[tok_index]; @@ -1349,25 +1344,25 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullVarDecl(node).?; - return nodeToSpan(tree, full.ast.align_node); + return tree.nodeToSpan(full.ast.align_node); }, .node_offset_var_decl_section => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullVarDecl(node).?; - return nodeToSpan(tree, full.ast.section_node); + return tree.nodeToSpan(full.ast.section_node); }, .node_offset_var_decl_addrspace => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullVarDecl(node).?; - return nodeToSpan(tree, full.ast.addrspace_node); + return tree.nodeToSpan(full.ast.addrspace_node); }, .node_offset_var_decl_init => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullVarDecl(node).?; - return nodeToSpan(tree, full.ast.init_node); + return tree.nodeToSpan(full.ast.init_node); }, .node_offset_builtin_call_arg0 => |n| return src_loc.byteOffsetBuiltinCallArg(gpa, n, 0), .node_offset_builtin_call_arg1 => |n| return src_loc.byteOffsetBuiltinCallArg(gpa, n, 1), @@ -1408,13 +1403,13 @@ pub const SrcLoc = struct { node = node_datas[node].lhs; } - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, .node_offset_array_access_index => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node = src_loc.declRelativeToNodeIndex(node_off); - return nodeToSpan(tree, node_datas[node].rhs); + return tree.nodeToSpan(node_datas[node].rhs); }, .node_offset_slice_ptr, .node_offset_slice_start, @@ -1431,14 +1426,14 @@ pub const SrcLoc = struct { .node_offset_slice_sentinel => full.ast.sentinel, else => unreachable, }; - return nodeToSpan(tree, part_node); + return tree.nodeToSpan(part_node); }, .node_offset_call_func => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullCall(&buf, node).?; - return nodeToSpan(tree, full.ast.fn_expr); + return tree.nodeToSpan(full.ast.fn_expr); }, .node_offset_field_name => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1477,13 +1472,13 @@ pub const SrcLoc = struct { .node_offset_deref_ptr => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, .node_offset_asm_source => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullAsm(node).?; - return nodeToSpan(tree, full.ast.template); + return tree.nodeToSpan(full.ast.template); }, .node_offset_asm_ret_ty => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1491,7 +1486,7 @@ pub const SrcLoc = struct { const full = tree.fullAsm(node).?; const asm_output = full.outputs[0]; const node_datas = tree.nodes.items(.data); - return nodeToSpan(tree, node_datas[asm_output].lhs); + return tree.nodeToSpan(node_datas[asm_output].lhs); }, .node_offset_if_cond => |node_off| { @@ -1514,21 +1509,21 @@ pub const SrcLoc = struct { const inputs = tree.fullFor(node).?.ast.inputs; const start = tree.firstToken(inputs[0]); const end = tree.lastToken(inputs[inputs.len - 1]); - return tokensToSpan(tree, start, end, start); + return tree.tokensToSpan(start, end, start); }, .@"orelse" => node, .@"catch" => node, else => unreachable, }; - return nodeToSpan(tree, src_node); + return tree.nodeToSpan(src_node); }, .for_input => |for_input| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(for_input.for_node_offset); const for_full = tree.fullFor(node).?; const src_node = for_full.ast.inputs[for_input.input_index]; - return nodeToSpan(tree, src_node); + return tree.nodeToSpan(src_node); }, .for_capture_from_input => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1554,12 +1549,12 @@ pub const SrcLoc = struct { }, .identifier => { if (count == 0) - return tokensToSpan(tree, tok, tok + 1, tok); + return tree.tokensToSpan(tok, tok + 1, tok); tok += 1; }, .asterisk => { if (count == 0) - return tokensToSpan(tree, tok, tok + 2, tok); + return tree.tokensToSpan(tok, tok + 2, tok); tok += 1; }, else => unreachable, @@ -1591,7 +1586,7 @@ pub const SrcLoc = struct { .array_init_comma, => { const full = tree.fullArrayInit(&buf, call_args_node).?.ast.elements; - return nodeToSpan(tree, full[call_arg.arg_index]); + return tree.nodeToSpan(full[call_arg.arg_index]); }, .struct_init_one, .struct_init_one_comma, @@ -1603,12 +1598,12 @@ pub const SrcLoc = struct { .struct_init_comma, => { const full = tree.fullStructInit(&buf, call_args_node).?.ast.fields; - return nodeToSpan(tree, full[call_arg.arg_index]); + return tree.nodeToSpan(full[call_arg.arg_index]); }, - else => return nodeToSpan(tree, call_args_node), + else => return tree.nodeToSpan(call_args_node), } }; - return nodeToSpan(tree, call_full.ast.params[call_arg.arg_index]); + return tree.nodeToSpan(call_full.ast.params[call_arg.arg_index]); }, .fn_proto_param => |fn_proto_param| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1619,12 +1614,11 @@ pub const SrcLoc = struct { var i: usize = 0; while (it.next()) |param| : (i += 1) { if (i == fn_proto_param.param_index) { - if (param.anytype_ellipsis3) |token| return tokenToSpan(tree, token); + if (param.anytype_ellipsis3) |token| return tree.tokenToSpan(token); const first_token = param.comptime_noalias orelse param.name_token orelse tree.firstToken(param.type_expr); - return tokensToSpan( - tree, + return tree.tokensToSpan( first_token, tree.lastToken(param.type_expr), first_token, @@ -1637,13 +1631,13 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); - return nodeToSpan(tree, node_datas[node].lhs); + return tree.nodeToSpan(node_datas[node].lhs); }, .node_offset_bin_rhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); - return nodeToSpan(tree, node_datas[node].rhs); + return tree.nodeToSpan(node_datas[node].rhs); }, .array_cat_lhs, .array_cat_rhs => |cat| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1667,9 +1661,9 @@ pub const SrcLoc = struct { .array_init_comma, => { const full = tree.fullArrayInit(&buf, arr_node).?.ast.elements; - return nodeToSpan(tree, full[cat.elem_index]); + return tree.nodeToSpan(full[cat.elem_index]); }, - else => return nodeToSpan(tree, arr_node), + else => return tree.nodeToSpan(arr_node), } }, @@ -1677,7 +1671,7 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); - return nodeToSpan(tree, node_datas[node].lhs); + return tree.nodeToSpan(node_datas[node].lhs); }, .node_offset_switch_special_prong => |node_off| { @@ -1696,7 +1690,7 @@ pub const SrcLoc = struct { mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")); if (!is_special) continue; - return nodeToSpan(tree, case_node); + return tree.nodeToSpan(case_node); } else unreachable; }, @@ -1718,7 +1712,7 @@ pub const SrcLoc = struct { for (case.ast.values) |item_node| { if (node_tags[item_node] == .switch_range) { - return nodeToSpan(tree, item_node); + return tree.nodeToSpan(item_node); } } } else unreachable; @@ -1754,28 +1748,28 @@ pub const SrcLoc = struct { const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullFnProto(&buf, node).?; - return nodeToSpan(tree, full.ast.align_expr); + return tree.nodeToSpan(full.ast.align_expr); }, .node_offset_fn_type_addrspace => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullFnProto(&buf, node).?; - return nodeToSpan(tree, full.ast.addrspace_expr); + return tree.nodeToSpan(full.ast.addrspace_expr); }, .node_offset_fn_type_section => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullFnProto(&buf, node).?; - return nodeToSpan(tree, full.ast.section_expr); + return tree.nodeToSpan(full.ast.section_expr); }, .node_offset_fn_type_cc => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullFnProto(&buf, node).?; - return nodeToSpan(tree, full.ast.callconv_expr); + return tree.nodeToSpan(full.ast.callconv_expr); }, .node_offset_fn_type_ret_ty => |node_off| { @@ -1783,7 +1777,7 @@ pub const SrcLoc = struct { const node = src_loc.declRelativeToNodeIndex(node_off); var buf: [1]Ast.Node.Index = undefined; const full = tree.fullFnProto(&buf, node).?; - return nodeToSpan(tree, full.ast.return_type); + return tree.nodeToSpan(full.ast.return_type); }, .node_offset_param => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1795,8 +1789,7 @@ pub const SrcLoc = struct { .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1, else => break, }; - return tokensToSpan( - tree, + return tree.tokensToSpan( first_tok, tree.lastToken(node), first_tok, @@ -1813,8 +1806,7 @@ pub const SrcLoc = struct { .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1, else => break, }; - return tokensToSpan( - tree, + return tree.tokensToSpan( first_tok, tok_index, first_tok, @@ -1825,7 +1817,7 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const parent_node = src_loc.declRelativeToNodeIndex(node_off); - return nodeToSpan(tree, node_datas[parent_node].rhs); + return tree.nodeToSpan(node_datas[parent_node].rhs); }, .node_offset_lib_name => |node_off| { @@ -1844,70 +1836,70 @@ pub const SrcLoc = struct { const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullArrayType(parent_node).?; - return nodeToSpan(tree, full.ast.elem_count); + return tree.nodeToSpan(full.ast.elem_count); }, .node_offset_array_type_sentinel => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullArrayType(parent_node).?; - return nodeToSpan(tree, full.ast.sentinel); + return tree.nodeToSpan(full.ast.sentinel); }, .node_offset_array_type_elem => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullArrayType(parent_node).?; - return nodeToSpan(tree, full.ast.elem_type); + return tree.nodeToSpan(full.ast.elem_type); }, .node_offset_un_op => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node = src_loc.declRelativeToNodeIndex(node_off); - return nodeToSpan(tree, node_datas[node].lhs); + return tree.nodeToSpan(node_datas[node].lhs); }, .node_offset_ptr_elem => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.child_type); + return tree.nodeToSpan(full.ast.child_type); }, .node_offset_ptr_sentinel => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.sentinel); + return tree.nodeToSpan(full.ast.sentinel); }, .node_offset_ptr_align => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.align_node); + return tree.nodeToSpan(full.ast.align_node); }, .node_offset_ptr_addrspace => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.addrspace_node); + return tree.nodeToSpan(full.ast.addrspace_node); }, .node_offset_ptr_bitoffset => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.bit_range_start); + return tree.nodeToSpan(full.ast.bit_range_start); }, .node_offset_ptr_hostsize => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const full = tree.fullPtrType(parent_node).?; - return nodeToSpan(tree, full.ast.bit_range_end); + return tree.nodeToSpan(full.ast.bit_range_end); }, .node_offset_container_tag => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1917,13 +1909,12 @@ pub const SrcLoc = struct { switch (node_tags[parent_node]) { .container_decl_arg, .container_decl_arg_trailing => { const full = tree.containerDeclArg(parent_node); - return nodeToSpan(tree, full.ast.arg); + return tree.nodeToSpan(full.ast.arg); }, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => { const full = tree.taggedUnionEnumTag(parent_node); - return tokensToSpan( - tree, + return tree.tokensToSpan( tree.firstToken(full.ast.arg) - 2, tree.lastToken(full.ast.arg) + 1, tree.nodes.items(.main_token)[full.ast.arg], @@ -1942,7 +1933,7 @@ pub const SrcLoc = struct { .container_field_init => tree.containerFieldInit(parent_node), else => unreachable, }; - return nodeToSpan(tree, full.ast.value_expr); + return tree.nodeToSpan(full.ast.value_expr); }, .node_offset_init_ty => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1950,7 +1941,7 @@ pub const SrcLoc = struct { var buf: [2]Ast.Node.Index = undefined; const full = tree.fullArrayInit(&buf, parent_node).?; - return nodeToSpan(tree, full.ast.type_expr); + return tree.nodeToSpan(full.ast.type_expr); }, .node_offset_store_ptr => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -1960,9 +1951,9 @@ pub const SrcLoc = struct { switch (node_tags[node]) { .assign => { - return nodeToSpan(tree, node_datas[node].lhs); + return tree.nodeToSpan(node_datas[node].lhs); }, - else => return nodeToSpan(tree, node), + else => return tree.nodeToSpan(node), } }, .node_offset_store_operand => |node_off| { @@ -1973,9 +1964,9 @@ pub const SrcLoc = struct { switch (node_tags[node]) { .assign => { - return nodeToSpan(tree, node_datas[node].rhs); + return tree.nodeToSpan(node_datas[node].rhs); }, - else => return nodeToSpan(tree, node), + else => return tree.nodeToSpan(node), } }, .node_offset_return_operand => |node_off| { @@ -1984,9 +1975,9 @@ pub const SrcLoc = struct { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); if (node_tags[node] == .@"return" and node_datas[node].lhs != 0) { - return nodeToSpan(tree, node_datas[node].lhs); + return tree.nodeToSpan(node_datas[node].lhs); } - return nodeToSpan(tree, node); + return tree.nodeToSpan(node); }, } } @@ -2010,40 +2001,7 @@ pub const SrcLoc = struct { .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs + arg_index], else => unreachable, }; - return nodeToSpan(tree, param); - } - - pub fn nodeToSpan(tree: *const Ast, node: u32) Span { - return tokensToSpan( - tree, - tree.firstToken(node), - tree.lastToken(node), - tree.nodes.items(.main_token)[node], - ); - } - - fn tokenToSpan(tree: *const Ast, token: Ast.TokenIndex) Span { - return tokensToSpan(tree, token, token, token); - } - - fn tokensToSpan(tree: *const Ast, start: Ast.TokenIndex, end: Ast.TokenIndex, main: Ast.TokenIndex) Span { - const token_starts = tree.tokens.items(.start); - var start_tok = start; - var end_tok = end; - - if (tree.tokensOnSameLine(start, end)) { - // do nothing - } else if (tree.tokensOnSameLine(start, main)) { - end_tok = main; - } else if (tree.tokensOnSameLine(main, end)) { - start_tok = main; - } else { - start_tok = main; - end_tok = main; - } - const start_off = token_starts[start_tok]; - const end_off = token_starts[end_tok] + @as(u32, @intCast(tree.tokenSlice(end_tok).len)); - return Span{ .start = start_off, .end = end_off, .main = token_starts[main] }; + return tree.nodeToSpan(param); } }; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index e4e944d186..ac21632a60 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -592,7 +592,7 @@ fn loadManifest(f: *Fetch, pkg_root: Package.Path) RunError!void { if (ast.errors.len > 0) { const file_path = try std.fmt.allocPrint(arena, "{}" ++ Manifest.basename, .{pkg_root}); - try main.putAstErrorsIntoBundle(arena, ast.*, file_path, eb); + try std.zig.putAstErrorsIntoBundle(arena, ast.*, file_path, eb); return error.FetchFailed; } @@ -1690,7 +1690,6 @@ const Cache = std.Build.Cache; const ThreadPool = std.Thread.Pool; const WaitGroup = std.Thread.WaitGroup; const Fetch = @This(); -const main = @import("../main.zig"); const git = @import("Fetch/git.zig"); const Package = @import("../Package.zig"); const Manifest = Package.Manifest; diff --git a/src/main.zig b/src/main.zig index 278a3939fc..5ece1b97d9 100644 --- a/src/main.zig +++ b/src/main.zig @@ -8,6 +8,7 @@ const process = std.process; const Allocator = mem.Allocator; const ArrayList = std.ArrayList; const Ast = std.zig.Ast; +const Color = std.zig.Color; const warn = std.log.warn; const ThreadPool = std.Thread.Pool; const cleanExit = std.process.cleanExit; @@ -66,18 +67,8 @@ pub fn fatal(comptime format: []const u8, args: anytype) noreturn { process.exit(1); } -/// There are many assumptions in the entire codebase that Zig source files can -/// be byte-indexed with a u32 integer. -const max_src_size = std.math.maxInt(u32); - const debug_extensions_enabled = builtin.mode == .Debug; -const Color = enum { - auto, - off, - on, -}; - const normal_usage = \\Usage: zig [command] [options] \\ @@ -4501,7 +4492,7 @@ fn updateModule(comp: *Compilation, color: Color) !void { defer errors.deinit(comp.gpa); if (errors.errorMessageCount() > 0) { - errors.renderToStdErr(renderOptions(color)); + errors.renderToStdErr(color.renderOptions()); return error.SemanticAnalyzeFail; } } @@ -4601,7 +4592,7 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilati p.errors = errors; return; } else { - errors.renderToStdErr(renderOptions(color)); + errors.renderToStdErr(color.renderOptions()); process.exit(1); } }, @@ -5528,7 +5519,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { if (fetch.error_bundle.root_list.items.len > 0) { var errors = try fetch.error_bundle.toOwnedBundle(""); - errors.renderToStdErr(renderOptions(color)); + errors.renderToStdErr(color.renderOptions()); process.exit(1); } @@ -5719,470 +5710,155 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { } } -fn readSourceFileToEndAlloc( - allocator: Allocator, - input: *const fs.File, - size_hint: ?usize, -) ![:0]u8 { - const source_code = input.readToEndAllocOptions( - allocator, - max_src_size, - size_hint, - @alignOf(u16), - 0, - ) catch |err| switch (err) { - error.ConnectionResetByPeer => unreachable, - error.ConnectionTimedOut => unreachable, - error.NotOpenForReading => unreachable, - else => |e| return e, - }; - errdefer allocator.free(source_code); +fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { + const color: Color = .auto; - // Detect unsupported file types with their Byte Order Mark - const unsupported_boms = [_][]const u8{ - "\xff\xfe\x00\x00", // UTF-32 little endian - "\xfe\xff\x00\x00", // UTF-32 big endian - "\xfe\xff", // UTF-16 big endian + const target_query: std.Target.Query = .{}; + const resolved_target: Package.Module.ResolvedTarget = .{ + .result = resolveTargetQueryOrFatal(target_query), + .is_native_os = true, + .is_native_abi = true, }; - for (unsupported_boms) |bom| { - if (mem.startsWith(u8, source_code, bom)) { - return error.UnsupportedEncoding; - } - } - // If the file starts with a UTF-16 little endian BOM, translate it to UTF-8 - if (mem.startsWith(u8, source_code, "\xff\xfe")) { - const source_code_utf16_le = mem.bytesAsSlice(u16, source_code); - const source_code_utf8 = std.unicode.utf16LeToUtf8AllocZ(allocator, source_code_utf16_le) catch |err| switch (err) { - error.DanglingSurrogateHalf => error.UnsupportedEncoding, - error.ExpectedSecondSurrogateHalf => error.UnsupportedEncoding, - error.UnexpectedSecondSurrogateHalf => error.UnsupportedEncoding, - else => |e| return e, - }; + const exe_basename = try std.zig.binNameAlloc(arena, .{ + .root_name = "fmt", + .target = resolved_target.result, + .output_mode = .Exe, + }); + const emit_bin: Compilation.EmitLoc = .{ + .directory = null, // Use the global zig-cache. + .basename = exe_basename, + }; - allocator.free(source_code); - return source_code_utf8; - } + const self_exe_path = introspect.findZigExePath(arena) catch |err| { + fatal("unable to find self exe path: {s}", .{@errorName(err)}); + }; - return source_code; -} + const override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); + const override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena); -const usage_fmt = - \\Usage: zig fmt [file]... - \\ - \\ Formats the input files and modifies them in-place. - \\ Arguments can be files or directories, which are searched - \\ recursively. - \\ - \\Options: - \\ -h, --help Print this help and exit - \\ --color [auto|off|on] Enable or disable colored error messages - \\ --stdin Format code from stdin; output to stdout - \\ --check List non-conforming files and exit with an error - \\ if the list is non-empty - \\ --ast-check Run zig ast-check on every file - \\ --exclude [file] Exclude file or directory from formatting - \\ - \\ -; + var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ + .path = lib_dir, + .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { + fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); + }, + } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { + fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); + }; + defer zig_lib_directory.handle.close(); -const Fmt = struct { - seen: SeenMap, - any_error: bool, - check_ast: bool, - color: Color, - gpa: Allocator, - arena: Allocator, - out_buffer: std.ArrayList(u8), + var global_cache_directory: Compilation.Directory = l: { + const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); + break :l .{ + .handle = try fs.cwd().makeOpenPath(p, .{}), + .path = p, + }; + }; + defer global_cache_directory.handle.close(); - const SeenMap = std.AutoHashMap(fs.File.INode, void); -}; + var thread_pool: ThreadPool = undefined; + try thread_pool.init(.{ .allocator = gpa }); + defer thread_pool.deinit(); -fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { - var color: Color = .auto; - var stdin_flag: bool = false; - var check_flag: bool = false; - var check_ast_flag: bool = false; - var input_files = ArrayList([]const u8).init(gpa); - defer input_files.deinit(); - var excluded_files = ArrayList([]const u8).init(gpa); - defer excluded_files.deinit(); + var child_argv: std.ArrayListUnmanaged([]const u8) = .{}; + try child_argv.ensureUnusedCapacity(arena, args.len + 1); + // We want to release all the locks before executing the child process, so we make a nice + // big block here to ensure the cleanup gets run when we extract out our argv. { - var i: usize = 0; - while (i < args.len) : (i += 1) { - const arg = args[i]; - if (mem.startsWith(u8, arg, "-")) { - if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - const stdout = io.getStdOut().writer(); - try stdout.writeAll(usage_fmt); - return cleanExit(); - } else if (mem.eql(u8, arg, "--color")) { - if (i + 1 >= args.len) { - fatal("expected [auto|on|off] after --color", .{}); - } - i += 1; - const next_arg = args[i]; - color = std.meta.stringToEnum(Color, next_arg) orelse { - fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); - }; - } else if (mem.eql(u8, arg, "--stdin")) { - stdin_flag = true; - } else if (mem.eql(u8, arg, "--check")) { - check_flag = true; - } else if (mem.eql(u8, arg, "--ast-check")) { - check_ast_flag = true; - } else if (mem.eql(u8, arg, "--exclude")) { - if (i + 1 >= args.len) { - fatal("expected parameter after --exclude", .{}); - } - i += 1; - const next_arg = args[i]; - try excluded_files.append(next_arg); - } else { - fatal("unrecognized parameter: '{s}'", .{arg}); - } - } else { - try input_files.append(arg); - } - } - } - - if (stdin_flag) { - if (input_files.items.len != 0) { - fatal("cannot use --stdin with positional arguments", .{}); - } - - const stdin = io.getStdIn(); - const source_code = readSourceFileToEndAlloc(gpa, &stdin, null) catch |err| { - fatal("unable to read stdin: {}", .{err}); - }; - defer gpa.free(source_code); - - var tree = Ast.parse(gpa, source_code, .zig) catch |err| { - fatal("error parsing stdin: {}", .{err}); + const main_mod_paths: Package.Module.CreateOptions.Paths = .{ + .root = .{ + .root_dir = zig_lib_directory, + .sub_path = "std/zig", + }, + .root_src_path = "fmt.zig", }; - defer tree.deinit(gpa); - - if (check_ast_flag) { - var file: Module.File = .{ - .status = .never_loaded, - .source_loaded = true, - .zir_loaded = false, - .sub_file_path = "", - .source = source_code, - .stat = undefined, - .tree = tree, - .tree_loaded = true, - .zir = undefined, - .mod = undefined, - .root_decl = .none, - }; - - file.mod = try Package.Module.createLimited(arena, .{ - .root = Package.Path.cwd(), - .root_src_path = file.sub_file_path, - .fully_qualified_name = "root", - }); - - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - defer file.zir.deinit(gpa); - - if (file.zir.hasCompileErrors()) { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - try wip_errors.init(gpa); - defer wip_errors.deinit(); - try Compilation.addZirErrorMessages(&wip_errors, &file); - var error_bundle = try wip_errors.toOwnedBundle(""); - defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(color)); - process.exit(2); - } - } else if (tree.errors.len != 0) { - try printAstErrorsToStderr(gpa, tree, "", color); - process.exit(2); - } - const formatted = try tree.render(gpa); - defer gpa.free(formatted); - - if (check_flag) { - const code: u8 = @intFromBool(mem.eql(u8, formatted, source_code)); - process.exit(code); - } - return io.getStdOut().writeAll(formatted); - } + const config = try Compilation.Config.resolve(.{ + .output_mode = .Exe, + .root_optimize_mode = .ReleaseFast, + .resolved_target = resolved_target, + .have_zcu = true, + .emit_bin = true, + .is_test = false, + }); - if (input_files.items.len == 0) { - fatal("expected at least one source file argument", .{}); - } + const root_mod = try Package.Module.create(arena, .{ + .global_cache_directory = global_cache_directory, + .paths = main_mod_paths, + .fully_qualified_name = "root", + .cc_argv = &.{}, + .inherited = .{ + .resolved_target = resolved_target, + .optimize_mode = .ReleaseFast, + }, + .global = config, + .parent = null, + .builtin_mod = null, + }); - var fmt = Fmt{ - .gpa = gpa, - .arena = arena, - .seen = Fmt.SeenMap.init(gpa), - .any_error = false, - .check_ast = check_ast_flag, - .color = color, - .out_buffer = std.ArrayList(u8).init(gpa), - }; - defer fmt.seen.deinit(); - defer fmt.out_buffer.deinit(); + const comp = Compilation.create(gpa, arena, .{ + .zig_lib_directory = zig_lib_directory, + .local_cache_directory = global_cache_directory, + .global_cache_directory = global_cache_directory, + .root_name = "fmt", + .config = config, + .root_mod = root_mod, + .main_mod = root_mod, + .emit_bin = emit_bin, + .emit_h = null, + .self_exe_path = self_exe_path, + .thread_pool = &thread_pool, + .cache_mode = .whole, + }) catch |err| { + fatal("unable to create compilation: {s}", .{@errorName(err)}); + }; + defer comp.destroy(); - // Mark any excluded files/directories as already seen, - // so that they are skipped later during actual processing - for (excluded_files.items) |file_path| { - const stat = fs.cwd().statFile(file_path) catch |err| switch (err) { - error.FileNotFound => continue, - // On Windows, statFile does not work for directories - error.IsDir => dir: { - var dir = try fs.cwd().openDir(file_path, .{}); - defer dir.close(); - break :dir try dir.stat(); - }, + updateModule(comp, color) catch |err| switch (err) { + error.SemanticAnalyzeFail => process.exit(2), else => |e| return e, }; - try fmt.seen.put(stat.inode, {}); - } - - for (input_files.items) |file_path| { - try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); - } - if (fmt.any_error) { - process.exit(1); - } -} - -const FmtError = error{ - SystemResources, - OperationAborted, - IoPending, - BrokenPipe, - Unexpected, - WouldBlock, - FileClosed, - DestinationAddressRequired, - DiskQuota, - FileTooBig, - InputOutput, - NoSpaceLeft, - AccessDenied, - OutOfMemory, - RenameAcrossMountPoints, - ReadOnlyFileSystem, - LinkQuotaExceeded, - FileBusy, - EndOfStream, - Unseekable, - NotOpenForWriting, - UnsupportedEncoding, - ConnectionResetByPeer, - SocketNotConnected, - LockViolation, - NetNameDeleted, - InvalidArgument, -} || fs.File.OpenError; - -fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void { - fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) { - error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path), - else => { - warn("unable to format '{s}': {s}", .{ file_path, @errorName(err) }); - fmt.any_error = true; - return; - }, - }; -} - -fn fmtPathDir( - fmt: *Fmt, - file_path: []const u8, - check_mode: bool, - parent_dir: fs.Dir, - parent_sub_path: []const u8, -) FmtError!void { - var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true }); - defer dir.close(); - - const stat = try dir.stat(); - if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; - - var dir_it = dir.iterate(); - while (try dir_it.next()) |entry| { - const is_dir = entry.kind == .directory; - - if (is_dir and (mem.eql(u8, entry.name, "zig-cache") or mem.eql(u8, entry.name, "zig-out"))) continue; - - if (is_dir or entry.kind == .file and (mem.endsWith(u8, entry.name, ".zig") or mem.endsWith(u8, entry.name, ".zon"))) { - const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name }); - defer fmt.gpa.free(full_path); - - if (is_dir) { - try fmtPathDir(fmt, full_path, check_mode, dir, entry.name); - } else { - fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| { - warn("unable to format '{s}': {s}", .{ full_path, @errorName(err) }); - fmt.any_error = true; - return; - }; - } - } - } -} - -fn fmtPathFile( - fmt: *Fmt, - file_path: []const u8, - check_mode: bool, - dir: fs.Dir, - sub_path: []const u8, -) FmtError!void { - const source_file = try dir.openFile(sub_path, .{}); - var file_closed = false; - errdefer if (!file_closed) source_file.close(); - - const stat = try source_file.stat(); - - if (stat.kind == .directory) - return error.IsDir; - - const gpa = fmt.gpa; - const source_code = try readSourceFileToEndAlloc( - gpa, - &source_file, - std.math.cast(usize, stat.size) orelse return error.FileTooBig, - ); - defer gpa.free(source_code); - - source_file.close(); - file_closed = true; - - // Add to set after no longer possible to get error.IsDir. - if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; - - var tree = try Ast.parse(gpa, source_code, .zig); - defer tree.deinit(gpa); - if (tree.errors.len != 0) { - try printAstErrorsToStderr(gpa, tree, file_path, fmt.color); - fmt.any_error = true; - return; + const fmt_exe = try global_cache_directory.join(arena, &.{comp.cache_use.whole.bin_sub_path.?}); + child_argv.appendAssumeCapacity(fmt_exe); } - if (fmt.check_ast) { - var file: Module.File = .{ - .status = .never_loaded, - .source_loaded = true, - .zir_loaded = false, - .sub_file_path = file_path, - .source = source_code, - .stat = .{ - .size = stat.size, - .inode = stat.inode, - .mtime = stat.mtime, - }, - .tree = tree, - .tree_loaded = true, - .zir = undefined, - .mod = undefined, - .root_decl = .none, - }; + child_argv.appendSliceAssumeCapacity(args); - file.mod = try Package.Module.createLimited(fmt.arena, .{ - .root = Package.Path.cwd(), - .root_src_path = file.sub_file_path, - .fully_qualified_name = "root", + if (process.can_execv) { + const err = process.execv(gpa, child_argv.items); + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following command failed to execve with '{s}':\n{s}", .{ + @errorName(err), + cmd, }); - - if (stat.size > max_src_size) - return error.FileTooBig; - - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - defer file.zir.deinit(gpa); - - if (file.zir.hasCompileErrors()) { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - try wip_errors.init(gpa); - defer wip_errors.deinit(); - try Compilation.addZirErrorMessages(&wip_errors, &file); - var error_bundle = try wip_errors.toOwnedBundle(""); - defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(fmt.color)); - fmt.any_error = true; - } } - // As a heuristic, we make enough capacity for the same as the input source. - fmt.out_buffer.shrinkRetainingCapacity(0); - try fmt.out_buffer.ensureTotalCapacity(source_code.len); - - try tree.renderToArrayList(&fmt.out_buffer, .{}); - if (mem.eql(u8, fmt.out_buffer.items, source_code)) - return; - - if (check_mode) { - const stdout = io.getStdOut().writer(); - try stdout.print("{s}\n", .{file_path}); - fmt.any_error = true; - } else { - var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode }); - defer af.deinit(); - - try af.file.writeAll(fmt.out_buffer.items); - try af.finish(); - const stdout = io.getStdOut().writer(); - try stdout.print("{s}\n", .{file_path}); + if (!process.can_spawn) { + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ + @tagName(builtin.os.tag), cmd, + }); } -} - -fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - try wip_errors.init(gpa); - defer wip_errors.deinit(); - try putAstErrorsIntoBundle(gpa, tree, path, &wip_errors); + var child = std.ChildProcess.init(child_argv.items, gpa); + child.stdin_behavior = .Inherit; + child.stdout_behavior = .Inherit; + child.stderr_behavior = .Inherit; - var error_bundle = try wip_errors.toOwnedBundle(""); - defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(color)); -} - -pub fn putAstErrorsIntoBundle( - gpa: Allocator, - tree: Ast, - path: []const u8, - wip_errors: *std.zig.ErrorBundle.Wip, -) Allocator.Error!void { - var file: Module.File = .{ - .status = .never_loaded, - .source_loaded = true, - .zir_loaded = false, - .sub_file_path = path, - .source = tree.source, - .stat = .{ - .size = 0, - .inode = 0, - .mtime = 0, + const term = try child.spawnAndWait(); + switch (term) { + .Exited => |code| { + if (code == 0) return cleanExit(); + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd }); }, - .tree = tree, - .tree_loaded = true, - .zir = undefined, - .mod = try Package.Module.createLimited(gpa, .{ - .root = Package.Path.cwd(), - .root_src_path = path, - .fully_qualified_name = "root", - }), - .root_decl = .none, - }; - defer gpa.destroy(file.mod); - - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - defer file.zir.deinit(gpa); - - try Compilation.addZirErrorMessages(wip_errors, &file); + else => { + const cmd = try std.mem.join(arena, " ", child_argv.items); + fatal("the following build command crashed:\n{s}", .{cmd}); + }, + } } const info_zen = @@ -6710,7 +6386,7 @@ fn cmdAstCheck( const stat = try f.stat(); - if (stat.size > max_src_size) + if (stat.size > std.zig.max_src_size) return error.FileTooBig; const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); @@ -6728,7 +6404,7 @@ fn cmdAstCheck( }; } else { const stdin = io.getStdIn(); - const source = readSourceFileToEndAlloc(arena, &stdin, null) catch |err| { + const source = std.zig.readSourceFileToEndAlloc(arena, stdin, null) catch |err| { fatal("unable to read stdin: {}", .{err}); }; file.sub_file_path = ""; @@ -6758,7 +6434,7 @@ fn cmdAstCheck( try Compilation.addZirErrorMessages(&wip_errors, &file); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(color)); + error_bundle.renderToStdErr(color.renderOptions()); process.exit(1); } @@ -6889,7 +6565,7 @@ fn cmdChangelist( const stat = try f.stat(); - if (stat.size > max_src_size) + if (stat.size > std.zig.max_src_size) return error.FileTooBig; var file: Module.File = .{ @@ -6938,7 +6614,7 @@ fn cmdChangelist( try Compilation.addZirErrorMessages(&wip_errors, &file); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(color)); + error_bundle.renderToStdErr(color.renderOptions()); process.exit(1); } @@ -6949,7 +6625,7 @@ fn cmdChangelist( const new_stat = try new_f.stat(); - if (new_stat.size > max_src_size) + if (new_stat.size > std.zig.max_src_size) return error.FileTooBig; const new_source = try arena.allocSentinel(u8, @as(usize, @intCast(new_stat.size)), 0); @@ -6973,7 +6649,7 @@ fn cmdChangelist( try Compilation.addZirErrorMessages(&wip_errors, &file); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(color)); + error_bundle.renderToStdErr(color.renderOptions()); process.exit(1); } @@ -7241,23 +6917,6 @@ const ClangSearchSanitizer = struct { }; }; -fn get_tty_conf(color: Color) std.io.tty.Config { - return switch (color) { - .auto => std.io.tty.detectConfig(std.io.getStdErr()), - .on => .escape_codes, - .off => .no_color, - }; -} - -fn renderOptions(color: Color) std.zig.ErrorBundle.RenderOptions { - const ttyconf = get_tty_conf(color); - return .{ - .ttyconf = ttyconf, - .include_source_line = ttyconf != .no_color, - .include_reference_trace = ttyconf != .no_color, - }; -} - fn accessLibPath( test_path: *std.ArrayList(u8), checked_paths: *std.ArrayList(u8), @@ -7498,7 +7157,7 @@ fn cmdFetch( if (fetch.error_bundle.root_list.items.len > 0) { var errors = try fetch.error_bundle.toOwnedBundle(""); - errors.renderToStdErr(renderOptions(color)); + errors.renderToStdErr(color.renderOptions()); process.exit(1); } @@ -7790,7 +7449,7 @@ fn loadManifest( errdefer ast.deinit(gpa); if (ast.errors.len > 0) { - try printAstErrorsToStderr(gpa, ast, Package.Manifest.basename, options.color); + try std.zig.printAstErrorsToStderr(gpa, ast, Package.Manifest.basename, options.color); process.exit(2); } @@ -7807,7 +7466,7 @@ fn loadManifest( var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(renderOptions(options.color)); + error_bundle.renderToStdErr(options.color.renderOptions()); process.exit(2); } -- cgit v1.2.3