From 5d6f7b44c19b064a543b0c1eecb6ef5c671b612e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jul 2021 20:42:47 -0700 Subject: stage2: rework AIR memory layout This commit changes the AIR file and the documentation of the memory layout. The actual work of modifying the surrounding code (in Sema and codegen) is not yet done. --- src/link/Elf.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/link') diff --git a/src/link/Elf.zig b/src/link/Elf.zig index d754b478b9..90224866ba 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -10,7 +10,7 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const ir = @import("../air.zig"); +const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); -- cgit v1.2.3 From 913393fd3b986dd262a8419341dced9ad5d9620d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 15:30:30 -0700 Subject: stage2: first pass over Module.zig for AIR memory layout --- BRANCH_TODO | 122 ++++++++++++++++ src/Air.zig | 14 +- src/AstGen.zig | 2 +- src/Module.zig | 359 ++++------------------------------------------- src/Sema.zig | 114 ++++++++++++++- src/codegen.zig | 196 ++++++++++++++------------ src/codegen/spirv.zig | 57 +++++--- src/link/SpirV.zig | 4 + src/register_manager.zig | 16 +-- 9 files changed, 429 insertions(+), 455 deletions(-) (limited to 'src/link') diff --git a/BRANCH_TODO b/BRANCH_TODO index be3959e035..585c8adf44 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -568,3 +568,125 @@ const DumpAir = struct { } } }; + +pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { + _ = mod; + const const_inst = try arena.create(ir.Inst.Constant); + const_inst.* = .{ + .base = .{ + .tag = ir.Inst.Constant.base_tag, + .ty = typed_value.ty, + .src = src, + }, + .val = typed_value.val, + }; + return &const_inst.base; +} + +pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.type), + .val = try ty.toValue(arena), + }); +} + +pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.void), + .val = Value.initTag(.void_value), + }); +} + +pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.noreturn), + .val = Value.initTag(.unreachable_value), + }); +} + +pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = Value.initTag(.undef), + }); +} + +pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.bool), + .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], + }); +} + +pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_u64.create(arena, int), + }); +} + +pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_i64.create(arena, int), + }); +} + +pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { + if (big_int.positive) { + if (big_int.to(u64)) |x| { + return mod.constIntUnsigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), + }); + } else { + if (big_int.to(i64)) |x| { + return mod.constIntSigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), + }); + } +} + +pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { + const zir_module = scope.namespace(); + const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); + const loc = std.zig.findLineColumn(source, inst.src); + if (inst.tag == .constant) { + std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ + inst.ty, + inst.castTag(.constant).?.val, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else if (inst.deaths == 0) { + std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else { + std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + inst.deaths, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } +} + diff --git a/src/Air.zig b/src/Air.zig index 112845559d..e85f2e5c43 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -29,8 +29,11 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { - /// The first N instructions in Air must be one arg instruction per function parameter. - /// Uses the `ty` field. + /// The first N instructions in the main block must be one arg instruction per + /// function parameter. This makes function parameters participate in + /// liveness analysis without any special handling. + /// Uses the `ty_str` field. + /// The string is the parameter name. arg, /// Float or integer addition. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type @@ -131,6 +134,8 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, + /// A comptime-known type. Uses the `ty` field. + const_ty, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -289,6 +294,11 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_str: struct { + ty: Ref, + // ZIR string table index. + str: u32, + }, br: struct { block_inst: Index, operand: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index 19906c94d3..24766aaf60 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -9821,7 +9821,7 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { astgen.source_column = column; } -const ref_start_index = Zir.Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len; fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref { return @intToEnum(Zir.Inst.Ref, ref_start_index + inst); diff --git a/src/Module.zig b/src/Module.zig index 6273243ee2..8971a57487 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1155,7 +1155,7 @@ pub const Scope = struct { /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, - instructions: ArrayListUnmanaged(*ir.Inst), + instructions: ArrayListUnmanaged(Air.Inst.Index), label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. @@ -1187,14 +1187,14 @@ pub const Scope = struct { }; pub const Merges = struct { - block_inst: *ir.Inst.Block, + block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(*ir.Inst), + results: ArrayListUnmanaged(Air.Inst.Index), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. - br_list: ArrayListUnmanaged(*ir.Inst.Br), + br_list: ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. @@ -1230,187 +1230,6 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } - - pub fn addNoOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - comptime tag: ir.Inst.Tag, - ) !*ir.Inst { - const inst = try block.sema.arena.create(tag.Type()); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addUnOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - operand: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.UnOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .operand = operand, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBinOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - lhs: *ir.Inst, - rhs: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.BinOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .lhs = lhs, - .rhs = rhs, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBr( - scope_block: *Scope.Block, - src: LazySrcLoc, - target_block: *ir.Inst.Block, - operand: *ir.Inst, - ) !*ir.Inst.Br { - const inst = try scope_block.sema.arena.create(ir.Inst.Br); - inst.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = target_block, - }; - try scope_block.instructions.append(scope_block.sema.gpa, &inst.base); - return inst; - } - - pub fn addCondBr( - block: *Scope.Block, - src: LazySrcLoc, - condition: *ir.Inst, - then_body: ir.Body, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.CondBr); - inst.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = condition, - .then_body = then_body, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addCall( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - func: *ir.Inst, - args: []const *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.Call); - inst.* = .{ - .base = .{ - .tag = .call, - .ty = ty, - .src = src, - }, - .func = func, - .args = args, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addSwitchBr( - block: *Scope.Block, - src: LazySrcLoc, - operand: *ir.Inst, - cases: []ir.Inst.SwitchBr.Case, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.SwitchBr); - inst.* = .{ - .base = .{ - .tag = .switchbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .target = operand, - .cases = cases, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addDbgStmt(block: *Scope.Block, src: LazySrcLoc, line: u32, column: u32) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.DbgStmt); - inst.* = .{ - .base = .{ - .tag = .dbg_stmt, - .ty = Type.initTag(.void), - .src = src, - }, - .line = line, - .column = column, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addStructFieldPtr( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - struct_ptr: *ir.Inst, - field_index: u32, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.StructFieldPtr); - inst.* = .{ - .base = .{ - .tag = .struct_field_ptr, - .ty = ty, - .src = src, - }, - .struct_ptr = struct_ptr, - .field_index = field_index, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } }; }; @@ -3594,30 +3413,14 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); - for (param_inst_list) |*param_inst, param_index| { - const param_type = fn_ty.fnParamType(param_index); - const arg_inst = try arena.allocator.create(ir.Inst.Arg); - arg_inst.* = .{ - .base = .{ - .tag = .arg, - .ty = param_type, - .src = .unneeded, - }, - .name = undefined, // Set in the semantic analysis of the arg instruction. - }; - param_inst.* = &arg_inst.base; - } - - const zir = decl.namespace.file_scope.zir; - var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &arena.allocator, - .code = zir, + .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, .func = func, @@ -3641,7 +3444,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }; defer inner_block.instructions.deinit(gpa); - // AIR currently requires the arg parameters to be the first N instructions + // AIR requires the arg parameters to be the first N instructions. + for (param_inst_list) |*param_inst, param_index| { + const param_type = fn_ty.fnParamType(param_index); + const ty_ref = try sema.addType(param_type); + param_inst.* = @intCast(u32, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .arg, + .data = .{ + .ty_str = .{ + .ty = ty_ref, + .str = undefined, // Set in the semantic analysis of the arg instruction. + }, + }, + }); + } try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; @@ -3650,17 +3467,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; - try sema.air_extra.appendSlice(inner_block.instructions.items); + try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, inner_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; log.debug("set {s} to success", .{decl.name}); return Air{ .instructions = sema.air_instructions.toOwnedSlice(), - .extra = sema.air_extra.toOwnedSlice(), - .values = sema.air_values.toOwnedSlice(), - .variables = sema.air_variables.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(gpa), + .values = sema.air_values.toOwnedSlice(gpa), + .variables = sema.air_variables.toOwnedSlice(gpa), }; } @@ -3815,94 +3636,6 @@ pub fn analyzeExport( de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void { const scope_decl = scope.ownerDecl().?; @@ -4438,38 +4171,6 @@ pub fn errorUnionType( }); } -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - pub fn getTarget(mod: Module) Target { return mod.comp.bin_file.options.target; } diff --git a/src/Sema.zig b/src/Sema.zig index b4e10837af..d7ec01696f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12,9 +12,9 @@ gpa: *Allocator, arena: *Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, -air_extra: ArrayListUnmanaged(u32) = .{}, -air_values: ArrayListUnmanaged(Value) = .{}, -air_variables: ArrayListUnmanaged(Module.Var) = .{}, +air_extra: std.ArrayListUnmanaged(u32) = .{}, +air_values: std.ArrayListUnmanaged(Value) = .{}, +air_variables: std.ArrayListUnmanaged(*Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -1263,15 +1263,16 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air sema.next_arg_index += 1; // TODO check if arg_name shadows a Decl + _ = arg_name; if (block.inlining) |_| { return sema.param_inst_list[arg_index]; } - // Need to set the name of the Air.Arg instruction. - const air_arg = sema.param_inst_list[arg_index].castTag(.arg).?; - air_arg.name = arg_name; - return &air_arg.base; + // Set the name of the Air.Arg instruction for use by codegen debug info. + const air_arg = sema.param_inst_list[arg_index]; + sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + return air_arg; } fn zirAllocExtended( @@ -7940,3 +7941,102 @@ fn enumFieldSrcLoc( } } else unreachable; } + +pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.tag()) { + .u8 => return .u8_type, + .i8 => return .i8_type, + .u16 => return .u16_type, + .i16 => return .i16_type, + .u32 => return .u32_type, + .i32 => return .i32_type, + .u64 => return .u64_type, + .i64 => return .i64_type, + .u128 => return .u128_type, + .i128 => return .i128_type, + .usize => return .usize_type, + .isize => return .isize_type, + .c_short => return .c_short_type, + .c_ushort => return .c_ushort_type, + .c_int => return .c_int_type, + .c_uint => return .c_uint_type, + .c_long => return .c_long_type, + .c_ulong => return .c_ulong_type, + .c_longlong => return .c_longlong_type, + .c_ulonglong => return .c_ulonglong_type, + .c_longdouble => return .c_longdouble_type, + .f16 => return .f16_type, + .f32 => return .f32_type, + .f64 => return .f64_type, + .f128 => return .f128_type, + .c_void => return .c_void_type, + .bool => return .bool_type, + .void => return .void_type, + .type => return .type_type, + .anyerror => return .anyerror_type, + .comptime_int => return .comptime_int_type, + .comptime_float => return .comptime_float_type, + .noreturn => return .noreturn_type, + .@"anyframe" => return .anyframe_type, + .@"null" => return .null_type, + .@"undefined" => return .undefined_type, + .enum_literal => return .enum_literal_type, + .atomic_ordering => return .atomic_ordering_type, + .atomic_rmw_op => return .atomic_rmw_op_type, + .calling_convention => return .calling_convention_type, + .float_mode => return .float_mode_type, + .reduce_op => return .reduce_op_type, + .call_options => return .call_options_type, + .export_options => return .export_options_type, + .extern_options => return .extern_options_type, + .manyptr_u8 => return .manyptr_u8_type, + .manyptr_const_u8 => return .manyptr_const_u8_type, + .fn_noreturn_no_args => return .fn_noreturn_no_args_type, + .fn_void_no_args => return .fn_void_no_args_type, + .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8 => return .const_slice_u8_type, + else => {}, + } + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); + return addExtraAssumeCapacity(sema, extra); +} + +pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, sema.air_extra.items.len); + inline for (fields) |field| { + sema.air_extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + Air.Inst.Ref => @enumToInt(@field(extra, field.name)), + i32 => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type"), + }); + } + return result; +} diff --git a/src/codegen.zig b/src/codegen.zig index 65e85702e5..eaf910977e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); @@ -45,6 +46,71 @@ pub const DebugInfoOutput = union(enum) { none, }; +pub fn generateFunction( + bin_file: *link.File, + src_loc: Module.SrcLoc, + func: *Module.Fn, + air: Air, + liveness: Liveness, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) GenerateSymbolError!Result { + switch (bin_file.options.target.cpu.arch) { + .wasm32 => unreachable, // has its own code path + .wasm64 => unreachable, // has its own code path + .arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64 => return Function(.aarch64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_be => return Function(.aarch64_be).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_32 => return Function(.aarch64_32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .riscv64 => return Function(.riscv64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .x86_64 => return Function(.x86_64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), + } +} + pub fn generateSymbol( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -57,60 +123,14 @@ pub fn generateSymbol( switch (typed_value.ty.zigTypeTag()) { .Fn => { - switch (bin_file.options.target.cpu.arch) { - .wasm32 => unreachable, // has its own code path - .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .armeb => return Function(.armeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.arc => return Function(.arc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.avr => return Function(.avr).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips => return Function(.mips).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.r600 => return Function(.r600).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tce => return Function(.tce).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.i386 => return Function(.i386).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le32 => return Function(.le32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le64 => return Function(.le64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir => return Function(.spir).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.shave => return Function(.shave).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.ve => return Function(.ve).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), - } + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol function pointers", + .{}, + ), + }; }, .Array => { // TODO populate .debug_info for the array @@ -262,6 +282,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, + air: *const Air, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -421,10 +442,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const Self = @This(); - fn generateSymbol( + fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, + module_fn: *Module.Fn, + air: Air, + liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { @@ -432,8 +455,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const module_fn = typed_value.val.castTag(.function).?.data; - assert(module_fn.owner_decl.has_tv); const fn_type = module_fn.owner_decl.ty; @@ -447,6 +468,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, + .air = &air, + .liveness = &liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, @@ -2131,8 +2154,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; - const ty = self.air.getType(inst); + const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; + const name = zir.nullTerminatedString(ty_str.str); + const name_with_null = name.ptr[0 .. name.len + 1]; + const ty = self.air.getRefType(ty_str.ty); switch (mcv) { .register => |reg| { @@ -2249,8 +2275,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instruction.items(.data)[inst].pl_op; const fn_ty = self.air.getType(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, inst_data.payload); @@ -2848,8 +2873,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, inst_data.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; @@ -3101,16 +3125,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNull(operand); } fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3126,16 +3150,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonNull(operand); } fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3151,16 +3175,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isErr(operand); } fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3176,16 +3200,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonErr(operand); } fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3200,8 +3224,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. - const inst_datas = self.air.instructions.items(.data); - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; try self.genBody(body); @@ -4377,13 +4401,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + return self.resolveInst(un_op); } fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].ty_op.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4a9087d7f5..3d704a8dc5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,10 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - /// An array of function argument result-ids. Each index corresponds with the function argument of the same index. + air: *const Air, + + /// An array of function argument result-ids. Each index corresponds with the + /// function argument of the same index. args: std.ArrayList(ResultId), /// A counter to keep track of how many `arg` instructions we've seen yet. @@ -168,33 +171,35 @@ pub const DeclGen = struct { /// A map keeping track of which instruction generated which result-id. inst_results: InstMap, - /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block. + /// We need to keep track of result ids for block labels, as well as the 'incoming' + /// blocks for a block. blocks: BlockMap, /// The label of the SPIR-V block we are currently generating. current_block_label_id: ResultId, - /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't - /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array. - /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated - /// into spv.binary.fn_decls directly. + /// The actual instructions for this function. We need to declare all locals in + /// the first block, and because we don't know which locals there are going to be, + /// we're just going to generate everything after the locals-section in this array. + /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the + /// initial OpLabel. These will be generated into spv.binary.fn_decls directly. code: std.ArrayList(Word), /// The decl we are currently generating code for. decl: *Decl, - /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by - /// `module.gpa`. + /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. + /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, /// Possible errors the `gen` function may return. const Error = error{ AnalysisFail, OutOfMemory }; - /// This structure is used to return information about a type typically used for arithmetic operations. - /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors, - /// so we can easily represent those as arithmetic types. - /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers - /// to the vector's element type. + /// This structure is used to return information about a type typically used for + /// arithmetic operations. These types may either be integers, floats, or a vector + /// of these. Most scalar operations also work on vectors, so we can easily represent + /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the + /// scalar type. Otherwise, if its a vector, it refers to the vector's element type. const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { @@ -206,13 +211,14 @@ pub const DeclGen = struct { /// the relevant capability is enabled). integer, - /// A regular float. These are all required to be natively supported. Floating points for - /// which the relevant capability is not enabled are not emulated. + /// A regular float. These are all required to be natively supported. Floating points + /// for which the relevant capability is not enabled are not emulated. float, - /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this - /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still - /// within the limits of the largest natively supported integer type. + /// An integer of a 'strange' size (which' bit size is not the same as its backing + /// type. **Note**: this may **also** include power-of-2 integers for which the + /// relevant capability is not enabled), but still within the limits of the largest + /// natively supported integer type. strange_integer, /// An integer with more bits than the largest natively supported integer type. @@ -220,7 +226,7 @@ pub const DeclGen = struct { }; /// The number of bits in the inner type. - /// Note: this is the actual number of bits of the type, not the size of the backing integer. + /// This is the actual number of bits of the type, not the size of the backing integer. bits: u16, /// Whether the type is a vector. @@ -234,10 +240,12 @@ pub const DeclGen = struct { class: Class, }; - /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called. + /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, + /// only set when `gen` is called. pub fn init(spv: *SPIRVModule) DeclGen { return .{ .spv = spv, + .air = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -252,8 +260,9 @@ pub const DeclGen = struct { /// Generate the code for `decl`. If a reportable error occured during code generation, /// a message is returned by this function. Callee owns the memory. If this function returns such /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg { + pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. + self.air = &air; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -680,7 +689,7 @@ pub const DeclGen = struct { .br => return self.genBr(inst), .breakpoint => return, - .condbr => return self.genCondBr(inst), + .cond_br => return self.genCondBr(inst), .constant => unreachable, .dbg_stmt => return self.genDbgStmt(inst), .loop => return self.genLoop(inst), @@ -688,6 +697,10 @@ pub const DeclGen = struct { .store => return self.genStore(inst), .unreach => return self.genUnreach(), // zig fmt: on + + else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ + @tagName(tag), + }), }; try self.inst_results.putNoClobber(inst, result_id); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bfae799462..8a2e877d42 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -135,6 +135,10 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/register_manager.zig b/src/register_manager.zig index 8aca7fcc3d..f0d128e7f9 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -20,7 +20,7 @@ pub fn RegisterManager( ) type { return struct { /// The key must be canonical register. - registers: [callee_preserved_regs.len]?*ir.Inst = [_]?*ir.Inst{null} ** callee_preserved_regs.len, + registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len, free_registers: FreeRegInt = math.maxInt(FreeRegInt), /// Tracks all registers allocated in the course of this function allocated_registers: FreeRegInt = 0, @@ -75,7 +75,7 @@ pub fn RegisterManager( pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ?[count]Register { comptime if (callee_preserved_regs.len == 0) return null; @@ -113,7 +113,7 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. - pub fn tryAllocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) ?Register { + pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) ?Register { return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; } @@ -123,7 +123,7 @@ pub fn RegisterManager( pub fn allocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); @@ -168,14 +168,14 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. - pub fn allocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) !Register { + pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) !Register { return (try self.allocRegs(1, .{inst}, exceptions))[0]; } /// Spills the register if it is currently allocated. If a /// corresponding instruction is passed, will also track this /// register. - pub fn getReg(self: *Self, reg: Register, inst: ?*ir.Inst) !void { + pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void { const index = reg.allocIndex() orelse return; if (inst) |tracked_inst| @@ -202,7 +202,7 @@ pub fn RegisterManager( /// Allocates the specified register with the specified /// instruction. Asserts that the register is free and no /// spilling is necessary. - pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) void { + pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void { const index = reg.allocIndex() orelse return; assert(self.registers[index] == null); @@ -264,7 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { _ = inst; try self.spilled.append(self.allocator, reg); } -- cgit v1.2.3 From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 762 +++++++++++++++++++++++++------------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 +++--- src/link.zig | 34 ++- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++++----------------- src/link/MachO.zig | 55 ++++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1023 insertions(+), 713 deletions(-) (limited to 'src/link') diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), - - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), - - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), - - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on - - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), + + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), + + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), + + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on + + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - } - - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; + table.deinit(gpa); +} +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; -- cgit v1.2.3 From c09b973ec25f328f5e15e9e6eed4da7f5e4634af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 15:45:08 -0700 Subject: stage2: compile error fixes for AIR memory layout branch Now the branch is compiling again, provided that one uses `-Dskip-non-native`, but many code paths are disabled. The code paths can now be re-enabled one at a time and updated to conform to the new AIR memory layout. --- src/Air.zig | 30 +- src/Compilation.zig | 2 +- src/Liveness.zig | 71 ++-- src/Module.zig | 34 +- src/Sema.zig | 986 +++++++++++++++++++++++++++++----------------------- src/codegen.zig | 159 +++++---- src/codegen/c.zig | 204 +++++------ src/link/Elf.zig | 3 + src/value.zig | 2 +- 9 files changed, 851 insertions(+), 640 deletions(-) (limited to 'src/link') diff --git a/src/Air.zig b/src/Air.zig index e85f2e5c43..1f294c43f3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -332,12 +332,12 @@ pub const Block = struct { body_len: u32, }; -/// Trailing is a list of `Ref` for every `args_len`. +/// Trailing is a list of `Inst.Ref` for every `args_len`. pub const Call = struct { args_len: u32, }; -/// This data is stored inside extra, with two sets of trailing `Ref`: +/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: /// * 0. the then body, according to `then_body_len`. /// * 1. the else body, according to `else_body_len`. pub const CondBr = struct { @@ -355,19 +355,19 @@ pub const SwitchBr = struct { /// Trailing: /// * instruction index for each `body_len`. pub const Case = struct { - item: Ref, + item: Inst.Ref, body_len: u32, }; }; pub const StructField = struct { - struct_ptr: Ref, + struct_ptr: Inst.Ref, field_index: u32, }; /// Trailing: -/// 0. `Ref` for every outputs_len -/// 1. `Ref` for every inputs_len +/// 0. `Inst.Ref` for every outputs_len +/// 1. `Inst.Ref` for every inputs_len pub const Asm = struct { /// Index to the corresponding ZIR instruction. /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and @@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[body_index..][0..body_len]; } +pub fn getType(air: Air, inst: Air.Inst.Index) Type { + _ = air; + _ = inst; + @panic("TODO Air getType"); +} + +pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = air.instructions.items(.tag); + const air_datas = air.instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { diff --git a/src/Compilation.zig b/src/Compilation.zig index 90224a77d1..4a442a8b67 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer air.deinit(gpa); log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { diff --git a/src/Liveness.zig b/src/Liveness.zig index 1402a5997b..838f19d4a1 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -7,11 +7,13 @@ //! * Switch Branches const Liveness = @This(); const std = @import("std"); -const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); +const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. @@ -44,7 +46,7 @@ pub const SwitchBr = struct { else_death_count: u32, }; -pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .zir = &zir, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + return @truncate(Bpi, l.tomb_bits[usize_index] >> + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); +} + pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] |= mask; } @@ -113,10 +125,12 @@ const Analysis = struct { tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), + zir: *const Zir, fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); - a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -203,9 +217,11 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, + .arg, .alloc, .br, .constant, + .const_ty, .breakpoint, .dbg_stmt, .varptr, @@ -255,15 +271,30 @@ fn analyzeInst( if (args.len <= bpi - 2) { var buf: [bpi - 1]Air.Inst.Ref = undefined; buf[0] = callee; - std.mem.copy(&buf, buf[1..], args); + std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with many args"); + @panic("TODO: liveness analysis for function with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .assembly => { + const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); + const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; + const outputs_len = @truncate(u5, extended.small); + const inputs_len = @truncate(u5, extended.small >> 5); + const outputs = a.air.extra[extra.end..][0..outputs_len]; + const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; + if (outputs.len + inputs.len <= bpi - 1) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for asm with greater than 3 args"); + }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; @@ -287,8 +318,8 @@ fn analyzeInst( const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer then_table.deinit(); + var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. @@ -299,8 +330,8 @@ fn analyzeInst( } } - var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer else_table.deinit(); + var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); @@ -331,7 +362,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); @@ -344,7 +375,7 @@ fn analyzeInst( const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); - try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -352,7 +383,7 @@ fn analyzeInst( }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the @@ -438,12 +469,12 @@ fn analyzeInst( }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); - try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, @@ -452,7 +483,7 @@ fn analyzeInst( fn trackOperands( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, @@ -468,12 +499,12 @@ fn trackOperands( tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; - if (new_set) |ns| try ns.putNoClobber(operand, {}); + if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); diff --git a/src/Module.zig b/src/Module.zig index 5972c2bdcf..7ec9c7e93d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1225,6 +1225,30 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } + + pub fn addTyOp( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + try block.instructions.ensureUnusedCapacity(gpa, 1); + + const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try sema.addType(ty), + .operand = operand, + } }, + }); + block.instructions.appendAssumeCapacity(inst); + return Sema.indexToRef(inst); + } }; }; @@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); var sema: Sema = .{ @@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer inner_block.instructions.deinit(gpa); // AIR requires the arg parameters to be the first N instructions. + try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); const ty_ref = try sema.addType(param_type); - param_inst.* = @intCast(u32, sema.air_instructions.len); + const arg_index = @intCast(u32, sema.air_instructions.len); + inner_block.instructions.appendAssumeCapacity(arg_index); + param_inst.* = Sema.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ @@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }, }); } - try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); @@ -4043,13 +4069,11 @@ pub fn floatMul( } pub fn simplePtrType( - mod: *Module, arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { - _ = mod; if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } diff --git a/src/Sema.zig b/src/Sema.zig index 54c42a482d..fc130cd4a4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const Air.Inst.Index, +param_inst_list: []const Air.Inst.Ref, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -59,8 +59,6 @@ const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); -const Inst = ir.Inst; -const Body = ir.Body; const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; @@ -117,7 +115,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -513,7 +511,7 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } @@ -529,12 +527,12 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } //}, - else => @panic("TODO remove else prong"), + else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; @@ -543,7 +541,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -598,7 +596,7 @@ fn resolveConstBool( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -611,7 +609,7 @@ fn resolveConstString( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -619,24 +617,39 @@ fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { +fn resolveAirAsType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_inst: Air.Inst.Ref, +) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { - return (try sema.resolveDefinedValue(block, src, base)) orelse +fn resolveConstValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !Value { + return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { - if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { +fn resolveDefinedValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !?Value { + if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } @@ -649,13 +662,29 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: Air.Inst.Index, + air_ref: Air.Inst.Ref, ) !?Value { - if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { + const ty = sema.getTypeOfAirRef(air_ref); + if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } - const inst = base.castTag(.constant) orelse return null; - return inst.val; + // First section of indexes correspond to a set number of constant values. + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val; + } + i -= Air.Inst.Ref.typed_value_map.len; + + switch (sema.air_instructions.items(.tag)[i]) { + .constant => { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + return sema.air_values.items[ty_pl.payload]; + }, + .const_ty => { + return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + }, + else => return null, + } } fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { @@ -677,7 +706,7 @@ fn resolveAlreadyCoercedInt( comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), @@ -692,7 +721,7 @@ fn resolveInt( zir_ref: Zir.Inst.Ref, dest_type: Type, ) !u64 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); @@ -705,21 +734,21 @@ pub fn resolveInstConst( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) InnerError!TypedValue { - const air_inst = try sema.resolveInst(zir_ref); - const val = try sema.resolveConstValue(block, src, air_inst); + const air_ref = sema.resolveInst(zir_ref); + const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = air_inst.ty, + .ty = sema.getTypeOfAirRef(air_ref), .val = val, }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -754,7 +783,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -825,7 +854,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1022,7 +1051,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1086,7 +1115,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1106,7 +1135,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1146,7 +1175,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1154,16 +1183,16 @@ fn zirRetPtr( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - const ptr_type = try sema.mod.simplePtrType(sema.arena, ret_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } @@ -1171,7 +1200,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1216,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); @@ -1196,7 +1225,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1210,7 +1239,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); switch (operand.ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), @@ -1218,13 +1247,13 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const array_ptr = try sema.resolveInst(inst_data.operand); + const array_ptr = sema.resolveInst(inst_data.operand); const elem_ty = array_ptr.ty.elemType(); if (!elem_ty.isIndexable()) { @@ -1267,7 +1296,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1275,13 +1304,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1289,7 +1318,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc); val_payload.* = .{ @@ -1304,13 +1333,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1318,12 +1347,12 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1332,7 +1361,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } @@ -1342,7 +1371,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1372,7 +1401,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const ptr_val = ptr.castTag(.constant).?.val; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; @@ -1385,7 +1414,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty); } - const final_ptr_ty = try sema.mod.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. ptr.ty = final_ptr_ty; @@ -1406,7 +1435,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const struct_obj: *Module.Struct = s: { const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; }; @@ -1535,9 +1564,9 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // to omit it. return; } - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1552,14 +1581,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const src: LazySrcLoc = .unneeded; const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1578,8 +1607,8 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } @@ -1590,18 +1619,18 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ptr = try sema.resolveInst(extra.lhs); - const value = try sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.lhs); + const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].param_type; - const fn_inst = try sema.resolveInst(inst_data.callee); + const fn_inst = sema.resolveInst(inst_data.callee); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { @@ -1631,7 +1660,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1659,7 +1688,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1668,7 +1697,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1686,7 +1715,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1699,7 +1728,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1728,7 +1757,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1741,7 +1770,7 @@ fn zirCompileLog( for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); - const arg = try sema.resolveInst(arg_ref); + const arg = sema.resolveInst(arg_ref); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg.ty, val }); } else { @@ -1773,12 +1802,12 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - const msg_inst = try sema.resolveInst(inst_data.operand); + const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1843,7 +1872,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1853,13 +1882,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1917,7 +1946,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1928,7 +1957,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2088,7 +2117,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const src = sema.src; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; @@ -2136,7 +2165,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2144,7 +2173,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2198,7 +2227,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2208,12 +2237,12 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - const func = try sema.resolveInst(extra.data.callee); + const func = sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. - resolved_args[i] = try sema.resolveInst(zir_arg); + resolved_args[i] = sema.resolveInst(zir_arg); } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); @@ -2222,13 +2251,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: Air.Inst.Index, + func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const Air.Inst.Index, -) InnerError!Air.Inst.Index { + args: []const Air.Inst.Ref, +) InnerError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2285,7 +2314,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: Air.Inst.Index = if (is_inline_call) res: { + const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2383,7 +2412,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2395,7 +2424,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2407,7 +2436,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2415,7 +2444,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2430,7 +2459,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2443,7 +2472,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2458,7 +2487,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2471,7 +2500,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2492,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2511,14 +2540,14 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.initTag(.anyerror), op, operand_src); const result_ty = Type.initTag(.u16); @@ -2541,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2549,7 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); @@ -2574,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2583,8 +2612,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2664,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2678,15 +2707,15 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); - const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2760,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2770,7 +2799,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); @@ -2821,12 +2850,12 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const optional_ptr = try sema.resolveInst(inst_data.operand); + const optional_ptr = sema.resolveInst(inst_data.operand); assert(optional_ptr.ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2836,7 +2865,7 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try sema.mod.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); if (optional_ptr.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2864,13 +2893,13 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const opt_type = operand.ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2902,13 +2931,13 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); @@ -2936,19 +2965,19 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); - const operand_pointer_ty = try sema.mod.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2975,13 +3004,13 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); @@ -3001,13 +3030,13 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3035,7 +3064,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3048,7 +3077,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3099,7 +3128,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3240,7 +3269,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3248,7 +3277,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3264,18 +3293,18 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); - const operand = try sema.resolveInst(zir_operand); + const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); if (ptr.ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); @@ -3287,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3296,7 +3325,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const object_ptr = if (object.ty.zigTypeTag() == .Pointer) object else @@ -3305,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3314,11 +3343,11 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3326,14 +3355,14 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); const object_ptr = try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3341,12 +3370,12 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3357,7 +3386,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -3389,20 +3418,21 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); - return sema.bitcast(block, dest_type, operand); + const operand = sema.resolveInst(extra.rhs); + return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3413,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_type.zigTypeTag()) { .ComptimeFloat => true, @@ -3445,22 +3475,22 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array = try sema.resolveInst(bin_inst.lhs); + const array = sema.resolveInst(bin_inst.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const elem_index = sema.resolveInst(bin_inst.rhs); const result_ptr = try sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3468,27 +3498,27 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array = try sema.resolveInst(extra.lhs); + const array = sema.resolveInst(extra.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); - const elem_index = try sema.resolveInst(extra.rhs); + const elem_index = sema.resolveInst(extra.rhs); const result_ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array_ptr = try sema.resolveInst(bin_inst.lhs); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const array_ptr = sema.resolveInst(bin_inst.lhs); + const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3496,39 +3526,39 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const array_ptr = sema.resolveInst(extra.lhs); + const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3536,10 +3566,10 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); - const sentinel = try sema.resolveInst(extra.sentinel); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); + const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } @@ -3550,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3569,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3588,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3597,7 +3627,7 @@ fn zirSwitchBlock( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3621,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3630,7 +3660,7 @@ fn zirSwitchBlockMulti( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3651,14 +3681,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4217,7 +4247,7 @@ fn analyzeSwitch( const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); @@ -4235,8 +4265,8 @@ fn analyzeSwitch( const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const item_first = try sema.resolveInst(first_ref); - const item_last = try sema.resolveInst(last_ref); + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); @@ -4334,7 +4364,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) InnerError!TypedValue { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc // because we only have the switch AST node. Only if we know for sure we need to report // a compile error do we resolve the full source locations. @@ -4513,7 +4543,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4522,7 +4552,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4547,7 +4577,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4572,13 +4602,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,7 +4617,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4599,8 +4629,8 @@ fn zirBitwise( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - ir_tag: ir.Inst.Tag, -) InnerError!Air.Inst.Index { + air_tag: Air.Inst.Tag, +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4609,8 +4639,8 @@ fn zirBitwise( const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4655,10 +4685,10 @@ fn zirBitwise( } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4666,7 +4696,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4674,7 +4704,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4687,7 +4717,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4695,13 +4725,13 @@ fn zirNegate( const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(.zero); - const rhs = try sema.resolveInst(inst_data.operand); + const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4711,8 +4741,8 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); } @@ -4721,7 +4751,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4735,12 +4765,12 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4850,14 +4880,14 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } @@ -4865,7 +4895,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4915,7 +4945,7 @@ fn zirAsm( const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name - arg.* = try sema.resolveInst(input.data.operand); + arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } @@ -4949,7 +4979,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4960,8 +4990,8 @@ fn zirCmp( const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const is_equality_cmp = switch (op) { .eq, .neq => true, @@ -5047,7 +5077,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5057,7 +5087,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5071,7 +5101,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5080,7 +5110,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5089,12 +5119,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5137,31 +5167,31 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand_ptr = try sema.resolveInst(inst_data.operand); + const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5171,7 +5201,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5183,20 +5213,20 @@ fn zirTypeofPeer( defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { - inst_list[i] = try sema.resolveInst(arg_ref); + inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list); return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const uncasted_operand = try sema.resolveInst(inst_data.operand); + const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); @@ -5212,16 +5242,16 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const bool_type = Type.initTag(.bool); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = try sema.resolveInst(bin_inst.lhs); + const uncasted_lhs = sema.resolveInst(bin_inst.lhs); const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = try sema.resolveInst(bin_inst.rhs); + const uncasted_rhs = sema.resolveInst(bin_inst.rhs); const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); if (lhs.value()) |lhs_val| { @@ -5234,7 +5264,7 @@ fn zirBoolOp( } } try sema.requireRuntimeBlock(block, src); - const tag: ir.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; + const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; return block.addBinOp(src, bool_type, tag, lhs, rhs); } @@ -5243,14 +5273,14 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const src: LazySrcLoc = .unneeded; - const lhs = try sema.resolveInst(inst_data.lhs); + const lhs = sema.resolveInst(inst_data.lhs); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; @@ -5313,13 +5343,13 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } @@ -5327,33 +5357,33 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -5374,7 +5404,7 @@ fn zirCondbr( const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const uncasted_cond = try sema.resolveInst(extra.data.condition); + const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { @@ -5456,7 +5486,7 @@ fn zirRetCoerce( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, need_coercion); @@ -5467,7 +5497,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, false); @@ -5476,7 +5506,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5511,7 +5541,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5532,7 +5562,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5586,7 +5616,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5600,13 +5630,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5657,7 +5687,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.failWithOwnedErrorMsg(&block.base, msg); } found_fields[field_index] = item.data.field_type; - field_inits[field_index] = try sema.resolveInst(item.data.init); + field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; @@ -5719,7 +5749,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5727,7 +5757,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5735,7 +5765,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5743,13 +5773,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5771,7 +5801,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5780,7 +5810,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5789,91 +5819,91 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const operand_res = try sema.resolveInst(extra.rhs); + const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5929,199 +5959,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6132,7 +6162,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6144,7 +6174,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6210,7 +6240,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6277,7 +6307,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6287,7 +6317,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6297,7 +6327,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6307,7 +6337,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6317,7 +6347,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6327,7 +6357,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6361,7 +6391,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6423,7 +6453,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: Air.Inst.Index, + msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6439,7 +6469,7 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); const null_stack_trace = try mod.constInst(arena, src, .{ .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), @@ -6500,10 +6530,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: Air.Inst.Index, + object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6579,7 +6609,7 @@ fn namedFieldPtr( } else (try mod.getErrorValue(field_name)).key; return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ @@ -6633,7 +6663,7 @@ fn namedFieldPtr( const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create(arena, enum_val), }); }, @@ -6653,7 +6683,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Index { +) InnerError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6677,11 +6707,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: Air.Inst.Index, + struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6692,7 +6722,7 @@ fn analyzeStructFieldPtr( const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return mod.constInst(arena, src, .{ @@ -6712,11 +6742,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: Air.Inst.Index, + union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6728,7 +6758,7 @@ fn analyzeUnionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -6749,10 +6779,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6776,10 +6806,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6804,35 +6834,41 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: Air.Inst.Index, + inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { - return sema.coerceVarArgParam(block, inst); + return sema.coerceVarArgParam(block, inst, inst_src); } + + const inst_ty = sema.getTypeOfAirRef(inst); // If the types are the same, we can return the operand. - if (dest_type.eql(inst.ty)) + if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); if (in_memory_result == .ok) { - return sema.bitcast(block, dest_type, inst); + return sema.bitcast(block, dest_type, inst, inst_src); } const mod = sema.mod; const arena = sema.arena; // undefined to anything - if (inst.value()) |val| { - if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { + return sema.addConstant(dest_type, val); } } - assert(inst.ty.zigTypeTag() != .Undefined); + assert(inst_ty.zigTypeTag() != .Undefined); + + if (true) { + @panic("TODO finish AIR memory layout rework"); + } // T to E!T or E to E!T if (dest_type.tag() == .error_union) { - return try sema.wrapErrorUnion(block, dest_type, inst); + return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number @@ -6844,14 +6880,14 @@ fn coerce( switch (dest_type.zigTypeTag()) { .Optional => { // null to ?T - if (inst.ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag() == .Null) { return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); - if (child_type.eql(inst.ty)) { + if (child_type.eql(inst_ty)) { return sema.wrapOptional(block, dest_type, inst); } else if (try sema.coerceNum(block, child_type, inst)) |some| { return sema.wrapOptional(block, dest_type, some); @@ -6860,12 +6896,12 @@ fn coerce( .Pointer => { // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst.ty.isSinglePointer()) break :src_array_ptr; - const array_type = inst.ty.elemType(); + if (!inst_ty.isSinglePointer()) break :src_array_ptr; + const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst.ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; - if (inst.ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { @@ -6904,11 +6940,11 @@ fn coerce( }, .Int => { // integer widening - if (inst.ty.zigTypeTag() == .Int) { + if (inst_ty.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const dst_info = dest_type.intInfo(target); - const src_info = inst.ty.intInfo(target); + const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) @@ -6920,10 +6956,10 @@ fn coerce( }, .Float => { // float widening - if (inst.ty.zigTypeTag() == .Float) { + if (inst_ty.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above - const src_bits = inst.ty.floatBits(target); + const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); @@ -6933,7 +6969,7 @@ fn coerce( }, .Enum => { // enum literal to enum - if (inst.ty.zigTypeTag() == .EnumLiteral) { + if (inst_ty.zigTypeTag() == .EnumLiteral) { const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -6965,7 +7001,7 @@ fn coerce( else => {}, } - return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty }); + return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst_ty }); } const InMemoryCoercionResult = enum { @@ -6982,7 +7018,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7020,9 +7056,15 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.I return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { - switch (inst.ty.zigTypeTag()) { - .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), +fn coerceVarArgParam( + sema: *Sema, + block: *Scope.Block, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + const inst_ty = sema.getTypeOfAirRef(inst); + switch (inst_ty.zigTypeTag()) { + .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. @@ -7033,8 +7075,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, - uncasted_value: Air.Inst.Index, + ptr: Air.Inst.Ref, + uncasted_value: Air.Inst.Ref, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7082,17 +7124,23 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { - if (inst.value()) |val| { +fn bitcast( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) InnerError!Air.Inst.Ref { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } // TODO validate the type size and other compile errors - try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .bitcast, inst); + try sema.requireRuntimeBlock(block, inst_src); + return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7100,7 +7148,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7108,12 +7156,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,43 +7176,41 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl if (decl_tv.val.tag() == .variable) { return sema.analyzeVarRef(block, src, decl_tv); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = try sema.mod.simplePtrType(sema.arena, decl_tv.ty, false, .One), - .val = try Value.Tag.decl_ref.create(sema.arena, decl), - }); + return sema.addConstant( + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Value.Tag.decl_ref.create(sema.arena, decl), + ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; - const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ty, - .val = try Value.Tag.ref_val.create(sema.arena, variable.init), - }); + return sema.addConstant(ty, try Value.Tag.ref_val.create(sema.arena, variable.init)); } + const gpa = sema.gpa; try sema.requireRuntimeBlock(block, src); - const inst = try sema.arena.create(Inst.VarPtr); - inst.* = .{ - .base = .{ - .tag = .varptr, - .ty = ty, - .src = src, - }, - .variable = variable, - }; - try block.instructions.append(sema.gpa, &inst.base); - return &inst.base; + try sema.air_variables.append(gpa, variable); + const result_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .varptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ty), + .payload = @intCast(u32, sema.air_variables.items.len - 1), + } }, + }); + try block.instructions.append(gpa, result_inst); + return indexToRef(result_inst); } fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7182,34 +7228,32 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, + ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Index { - const elem_ty = switch (ptr.ty.zigTypeTag()) { - .Pointer => ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), +) InnerError!Air.Inst.Ref { + const ptr_ty = sema.getTypeOfAirRef(ptr); + const elem_ty = switch (ptr_ty.zigTypeTag()) { + .Pointer => ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: { if (ptr_val.tag() == .int_u64) break :blk; // do it at runtime - return sema.mod.constInst(sema.arena, src, .{ - .ty = elem_ty, - .val = try ptr_val.pointerDeref(sema.arena), - }); + return sema.addConstant(elem_ty, try ptr_val.pointerDeref(sema.arena)); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, elem_ty, .load, ptr); + return block.addTyOp(.load, elem_ty, ptr); } fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7228,8 +7272,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7249,12 +7293,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - start: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + start: Air.Inst.Ref, end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7325,10 +7369,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7494,7 +7538,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7503,9 +7547,15 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapErrorUnion( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; - if (inst.value()) |val| { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); } else switch (err_union.data.error_set.tag()) { @@ -7710,7 +7760,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7938,6 +7988,68 @@ fn enumFieldSrcLoc( } else unreachable; } +/// Returns the type of the AIR instruction. +fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { + switch (air_ref) { + .none => unreachable, + .u8_type => return Type.initTag(.u8), + .i8_type => return Type.initTag(.i8), + .u16_type => return Type.initTag(.u16), + .i16_type => return Type.initTag(.i16), + .u32_type => return Type.initTag(.u32), + .i32_type => return Type.initTag(.i32), + .u64_type => return Type.initTag(.u64), + .i64_type => return Type.initTag(.i64), + .u128_type => return Type.initTag(.u128), + .i128_type => return Type.initTag(.i128), + .usize_type => return Type.initTag(.usize), + .isize_type => return Type.initTag(.isize), + .c_short_type => return Type.initTag(.c_short), + .c_ushort_type => return Type.initTag(.c_ushort), + .c_int_type => return Type.initTag(.c_int), + .c_uint_type => return Type.initTag(.c_uint), + .c_long_type => return Type.initTag(.c_long), + .c_ulong_type => return Type.initTag(.c_ulong), + .c_longlong_type => return Type.initTag(.c_longlong), + .c_ulonglong_type => return Type.initTag(.c_ulonglong), + .c_longdouble_type => return Type.initTag(.c_longdouble), + .f16_type => return Type.initTag(.f16), + .f32_type => return Type.initTag(.f32), + .f64_type => return Type.initTag(.f64), + .f128_type => return Type.initTag(.f128), + .c_void_type => return Type.initTag(.c_void), + .bool_type => return Type.initTag(.bool), + .void_type => return Type.initTag(.void), + .type_type => return Type.initTag(.type), + .anyerror_type => return Type.initTag(.anyerror), + .comptime_int_type => return Type.initTag(.comptime_int), + .comptime_float_type => return Type.initTag(.comptime_float), + .noreturn_type => return Type.initTag(.noreturn), + .anyframe_type => return Type.initTag(.@"anyframe"), + .null_type => return Type.initTag(.@"null"), + .undefined_type => return Type.initTag(.@"undefined"), + .enum_literal_type => return Type.initTag(.enum_literal), + .atomic_ordering_type => return Type.initTag(.atomic_ordering), + .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), + .calling_convention_type => return Type.initTag(.calling_convention), + .float_mode_type => return Type.initTag(.float_mode), + .reduce_op_type => return Type.initTag(.reduce_op), + .call_options_type => return Type.initTag(.call_options), + .export_options_type => return Type.initTag(.export_options), + .extern_options_type => return Type.initTag(.extern_options), + .manyptr_u8_type => return Type.initTag(.manyptr_u8), + .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), + .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), + .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), + .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), + .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), + .const_slice_u8_type => return Type.initTag(.const_slice_u8), + else => return sema.getAirType(air_ref), + } +} + +/// Asserts the AIR instruction is a `const_ty` and returns the type. fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { var i: usize = @enumToInt(air_ref); if (i < Air.Inst.Ref.typed_value_map.len) { @@ -8014,13 +8126,27 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } +pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { + const gpa = sema.gpa; + const ty_inst = try sema.addType(ty); + try sema.air_values.append(gpa, val); + try sema.air_instructions.append(gpa, .{ + .tag = .constant, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; -fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); } -fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; diff --git a/src/codegen.zig b/src/codegen.zig index a6c4b5ad3c..c27a1444ef 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // TODO inline this logic into every instruction - var i: ir.Inst.DeathsBitIndex = 0; - while (inst.getOperand(i)) |operand| : (i += 1) { - if (inst.operandDies(i)) - self.processDeath(operand); - } + @panic("TODO rework AIR memory layout codegen for processing deaths"); + //var i: ir.Inst.DeathsBitIndex = 0; + //while (inst.getOperand(i)) |operand| : (i += 1) { + // if (inst.operandDies(i)) + // self.processDeath(operand); + //} } } @@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // zig fmt: off - .add => return self.genAdd(inst.castTag(.add).?), - .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .xor => return self.genXor(inst.castTag(.xor).?), - - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .call => return self.genCall(inst.castTag(.call).?), - .cond_br => return self.genCondBr(inst.castTag(.condbr).?), - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - .constant => unreachable, // excluded from function bodies - .unreach => return MCValue{ .unreach = {} }, - - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + //.add => return self.genAdd(inst.castTag(.add).?), + //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), + //.sub => return self.genSub(inst.castTag(.sub).?), + //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + //.mul => return self.genMul(inst.castTag(.mul).?), + //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + //.div => return self.genDiv(inst.castTag(.div).?), + + //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + + //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + //.xor => return self.genXor(inst.castTag(.xor).?), + + //.alloc => return self.genAlloc(inst.castTag(.alloc).?), + //.arg => return self.genArg(inst.castTag(.arg).?), + //.assembly => return self.genAsm(inst.castTag(.assembly).?), + //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + //.block => return self.genBlock(inst.castTag(.block).?), + //.br => return self.genBr(inst.castTag(.br).?), + //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + //.breakpoint => return self.genBreakpoint(inst.src), + //.call => return self.genCall(inst.castTag(.call).?), + //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), + //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + //.intcast => return self.genIntCast(inst.castTag(.intcast).?), + //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + //.is_null => return self.genIsNull(inst.castTag(.is_null).?), + //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + //.is_err => return self.genIsErr(inst.castTag(.is_err).?), + //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + //.load => return self.genLoad(inst.castTag(.load).?), + //.loop => return self.genLoop(inst.castTag(.loop).?), + //.not => return self.genNot(inst.castTag(.not).?), + //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + //.ref => return self.genRef(inst.castTag(.ref).?), + //.ret => return self.genRet(inst.castTag(.ret).?), + //.store => return self.genStore(inst.castTag(.store).?), + //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), + //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + //.constant => unreachable, // excluded from function bodies + //.unreach => return MCValue{ .unreach = {} }, + + //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), // zig fmt: on + + else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), } } @@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError { + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - const src_loc = if (src != .unneeded) - src.toSrcLocWithDecl(self.mod_fn.owner_decl) - else - self.src_loc; - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4743494f35..0ee6972654 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -25,7 +25,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: *Inst, + constant: Air.Inst.Index, /// Index into the parameters arg: usize, /// By-value @@ -99,7 +99,7 @@ pub const Object = struct { gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, @@ -133,7 +133,12 @@ pub const Object = struct { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?), + .constant => |inst| { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const ty = o.air.getRefType(ty_pl.ty); + const val = o.air.values[ty_pl.payload]; + return o.dg.renderValue(w, ty, val); + }, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -213,8 +218,9 @@ pub const DeclGen = struct { error_msg: ?*Module.ErrorMsg, typedefs: TypedefMap, - fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(dg.decl); dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args); return error.AnalysisFail; @@ -230,7 +236,7 @@ pub const DeclGen = struct { // This should lower to 0xaa bytes in safe modes, and for unsafe modes should // lower to leaving variables uninitialized (that might need to be implemented // outside of this function). - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{}); + return dg.fail("TODO: C backend: implement renderValue undef", .{}); } switch (t.zigTypeTag()) { .Int => { @@ -440,7 +446,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ + else => |e| return dg.fail("TODO: C backend: implement value {s}", .{ @tagName(e), }), } @@ -519,14 +525,14 @@ pub const DeclGen = struct { break; } } else { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{}); + return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, - .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}), + .Float => return dg.fail("TODO: C backend: implement type Float", .{}), .Pointer => { if (t.isSlice()) { @@ -681,7 +687,7 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, - .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), + .Union => return dg.fail("TODO: C backend: implement type Union", .{}), .Fn => { try dg.renderType(w, t.fnReturnType()); try w.writeAll(" (*)("); @@ -704,10 +710,10 @@ pub const DeclGen = struct { } try w.writeByte(')'); }, - .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), - .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), - .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), - .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}), + .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}), + .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}), + .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}), + .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}), .Null, .Undefined, @@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderFunctionSignature(o.writer(), is_global); try o.writer().writeByte(' '); - try genBody(o, func.body); + const main_body = o.air.getMainBody(); + try genBody(o, main_body); try o.indent_writer.insertNewline(); return; @@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void { +fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const writer = o.writer(); - if (body.instructions.len == 0) { + if (body.len == 0) { try writer.writeAll("{}"); return; } @@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi try writer.writeAll("{\n"); o.indent_writer.pushIndent(); - for (body.instructions) |inst| { - const result_value = switch (inst.tag) { - // TODO use a different strategy for add that communicates to the optimizer - // that wrapping is UB. - .add => try genBinOp(o, inst.castTag(.add).?, " + "), - .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - // TODO use a different strategy for sub that communicates to the optimizer - // that wrapping is UB. - .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - // TODO use a different strategy for mul that communicates to the optimizer - // that wrapping is UB. - .mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - // TODO use a different strategy for div that communicates to the optimizer - // that wrapping is UB. - .div => try genBinOp(o, inst.castTag(.div).?, " / "), - - .constant => unreachable, // excluded from function bodies - .alloc => try genAlloc(o, inst.castTag(.alloc).?), - .arg => genArg(o), - .assembly => try genAsm(o, inst.castTag(.assembly).?), - .block => try genBlock(o, inst.castTag(.block).?), - .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - .call => try genCall(o, inst.castTag(.call).?), - .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - .intcast => try genIntCast(o, inst.castTag(.intcast).?), - .load => try genLoad(o, inst.castTag(.load).?), - .ret => try genRet(o, inst.castTag(.ret).?), - .retvoid => try genRetVoid(o), - .store => try genStore(o, inst.castTag(.store).?), - .unreach => try genUnreach(o, inst.castTag(.unreach).?), - .loop => try genLoop(o, inst.castTag(.loop).?), - .condbr => try genCondBr(o, inst.castTag(.condbr).?), - .br => try genBr(o, inst.castTag(.br).?), - .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - // bool_and and bool_or are non-short-circuit operations - .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - .not => try genUnOp(o, inst.castTag(.not).?, "!"), - .is_null => try genIsNull(o, inst.castTag(.is_null).?), - .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - .ref => try genRef(o, inst.castTag(.ref).?), - .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), - - .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}), - .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}), - .varptr => try genVarPtr(o, inst.castTag(.varptr).?), - .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}), + const air_tags = o.air.instructions.items(.tag); + + for (body) |inst| { + const result_value = switch (air_tags[inst]) { + //// TODO use a different strategy for add that communicates to the optimizer + //// that wrapping is UB. + //.add => try genBinOp(o, inst.castTag(.add).?, " + "), + //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), + //// TODO use a different strategy for sub that communicates to the optimizer + //// that wrapping is UB. + //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), + //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), + //// TODO use a different strategy for mul that communicates to the optimizer + //// that wrapping is UB. + //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), + //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), + //// TODO use a different strategy for div that communicates to the optimizer + //// that wrapping is UB. + //.div => try genBinOp(o, inst.castTag(.div).?, " / "), + + //.constant => unreachable, // excluded from function bodies + //.alloc => try genAlloc(o, inst.castTag(.alloc).?), + //.arg => genArg(o), + //.assembly => try genAsm(o, inst.castTag(.assembly).?), + //.block => try genBlock(o, inst.castTag(.block).?), + //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), + //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), + //.call => try genCall(o, inst.castTag(.call).?), + //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), + //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), + //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), + //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), + //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), + //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), + //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), + //.intcast => try genIntCast(o, inst.castTag(.intcast).?), + //.load => try genLoad(o, inst.castTag(.load).?), + //.ret => try genRet(o, inst.castTag(.ret).?), + //.retvoid => try genRetVoid(o), + //.store => try genStore(o, inst.castTag(.store).?), + //.unreach => try genUnreach(o, inst.castTag(.unreach).?), + //.loop => try genLoop(o, inst.castTag(.loop).?), + //.condbr => try genCondBr(o, inst.castTag(.condbr).?), + //.br => try genBr(o, inst.castTag(.br).?), + //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), + //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), + //// bool_and and bool_or are non-short-circuit operations + //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), + //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), + //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), + //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), + //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), + //.not => try genUnOp(o, inst.castTag(.not).?, "!"), + //.is_null => try genIsNull(o, inst.castTag(.is_null).?), + //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), + //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), + //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), + //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), + //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), + //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), + //.ref => try genRef(o, inst.castTag(.ref).?), + //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + + //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + + //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), + //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), + //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), + //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), + //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), + //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), }; switch (result_value) { .none => {}, @@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c } if (bits > 64) { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { try writer.writeAll(");\n"); return result_local; } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{}); + return o.dg.fail("TODO: C backend: implement function pointers", .{}); } } @@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{}); + return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output_constraint) |_| { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{}); + return o.dg.fail("TODO: CBE inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output_constraint == null) { @@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused()) return CValue.none; - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{}); + return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0d05b97846..c93f04f618 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); defer dbg_info_buffer.deinit(); diff --git a/src/value.zig b/src/value.zig index 48cd6fffc4..0f7194d8c1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }, }; -- cgit v1.2.3 From 3a41e4430eae16e5aa739b7a71b1fded1f1029e3 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Tue, 13 Jul 2021 20:38:55 -0400 Subject: codegen: add FnResult type which is a Result that removes externally_managed --- src/codegen.zig | 17 +++++++++++------ src/link/Elf.zig | 1 - 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'src/link') diff --git a/src/codegen.zig b/src/codegen.zig index c27a1444ef..1495b19673 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -23,6 +23,11 @@ const RegisterManager = @import("register_manager.zig").RegisterManager; const X8664Encoder = @import("codegen/x86_64.zig").Encoder; +pub const FnResult = union(enum) { + /// The `code` parameter passed to `generateSymbol` has the value appended. + appended: void, + fail: *ErrorMsg, +}; pub const Result = union(enum) { /// The `code` parameter passed to `generateSymbol` has the value appended. appended: void, @@ -54,7 +59,7 @@ pub fn generateFunction( liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, -) GenerateSymbolError!Result { +) GenerateSymbolError!FnResult { switch (bin_file.options.target.cpu.arch) { .wasm32 => unreachable, // has its own code path .wasm64 => unreachable, // has its own code path @@ -451,7 +456,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, - ) GenerateSymbolError!Result { + ) GenerateSymbolError!FnResult { if (build_options.skip_non_native and std.Target.current.cpu.arch != arch) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -495,7 +500,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.exitlude_jump_relocs.deinit(bin_file.allocator); var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, else => |e| return e, }; defer call_info.deinit(&function); @@ -506,14 +511,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, else => |e| return e, }; if (function.err_msg) |em| { - return Result{ .fail = em }; + return FnResult{ .fail = em }; } else { - return Result{ .appended = {} }; + return FnResult{ .appended = {} }; } } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c93f04f618..815c0c9f23 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2363,7 +2363,6 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven }, }); const code = switch (res) { - .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; -- cgit v1.2.3 From 91b1896184cc89e21d12dd246ce7d658b6d3f365 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Tue, 13 Jul 2021 20:40:29 -0400 Subject: plan9 linker: make more incremental The incrementalness is now roughly the same as the c backend rather than the spirv backend before. --- src/link/Plan9.zig | 321 +++++++++++++++++++++++++++-------------------------- 1 file changed, 166 insertions(+), 155 deletions(-) (limited to 'src/link') diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bc044ce414..9b123f56aa 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -25,20 +25,22 @@ sixtyfour_bit: bool, error_flags: File.ErrorFlags = File.ErrorFlags{}, bases: Bases, -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, -/// is just casted down when 32 bit +/// A symbol's value is just casted down when compiling +/// for a 32 bit target. syms: std.ArrayListUnmanaged(aout.Sym) = .{}, -text_buf: std.ArrayListUnmanaged(u8) = .{}, -data_buf: std.ArrayListUnmanaged(u8) = .{}, + +fn_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, +data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, hdr: aout.ExecHdr = undefined, entry_decl: ?*Module.Decl = null, -got: std.ArrayListUnmanaged(u64) = .{}, +got_len: u64 = 0, + const Bases = struct { text: u64, - /// the addr of the got + /// the Global Offset Table starts at the beginning of the data section data: u64, }; @@ -49,14 +51,6 @@ fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { else => unreachable, }; } -/// opposite of getAddr -fn takeAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { - return addr - switch (t) { - .T, .t, .l, .L => self.bases.text, - .D, .d, .B, .b => self.bases.data, - else => unreachable, - }; -} fn getSymAddr(self: Plan9, s: aout.Sym) u64 { return self.getAddr(s.value, s.type); @@ -127,18 +121,80 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - _ = module; - // Keep track of all decls so we can iterate over them on flush(). - _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); - _ = air; - _ = liveness; - @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); + const decl = func.owner_decl; + log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .none = .{} }); + const code = switch (res) { + .appended => code_buffer.toOwnedSlice(), + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + try self.fn_decl_table.put(self.base.allocator, decl, code); + return self.updateFinish(decl); } pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { - _ = module; - _ = try self.decl_table.getOrPut(self.base.allocator, decl); + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ .none = .{} }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + var duped_code = try std.mem.dupe(self.base.allocator, u8, code); + errdefer self.base.allocator.free(duped_code); + try self.data_decl_table.put(self.base.allocator, decl, duped_code); + return self.updateFinish(decl); +} +/// called at the end of update{Decl,Func} +fn updateFinish(self: *Plan9, decl: *Module.Decl) !void { + const is_fn = (decl.ty.zigTypeTag() == .Fn); + log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); + const sym_t: aout.Sym.Type = if (is_fn) .t else .d; + // write the internal linker metadata + decl.link.plan9.type = sym_t; + // write the symbol + // we already have the got index because that got allocated in allocateDeclIndexes + const sym: aout.Sym = .{ + .value = undefined, // the value of stuff gets filled in in flushModule + .type = decl.link.plan9.type, + .name = mem.span(decl.name), + }; + + if (decl.link.plan9.sym_index) |s| { + self.syms.items[s] = sym; + } else { + try self.syms.append(self.base.allocator, sym); + decl.link.plan9.sym_index = self.syms.items.len - 1; + } } pub fn flush(self: *Plan9, comp: *Compilation) !void { @@ -165,160 +221,107 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { defer assert(self.hdr.entry != 0x0); - const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + _ = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; - self.text_buf.items.len = 0; - self.data_buf.items.len = 0; - // ensure space to write the got later - assert(self.got.items.len == self.decl_table.count()); - try self.data_buf.appendNTimes(self.base.allocator, 0x69, self.got.items.len * if (!self.sixtyfour_bit) @as(u32, 4) else 8); - // temporary buffer - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - { - for (self.decl_table.keys()) |decl| { - if (!decl.has_tv) continue; - const is_fn = (decl.ty.zigTypeTag() == .Fn); - - log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); - decl.link.plan9 = if (is_fn) .{ - .offset = self.getAddr(self.text_buf.items.len, .t), - .type = .t, - .sym_index = decl.link.plan9.sym_index, - .got_index = decl.link.plan9.got_index, - } else .{ - .offset = self.getAddr(self.data_buf.items.len, .d), - .type = .d, - .sym_index = decl.link.plan9.sym_index, - .got_index = decl.link.plan9.got_index, - }; - self.got.items[decl.link.plan9.got_index.?] = decl.link.plan9.offset.?; - if (decl.link.plan9.sym_index) |s| { - self.syms.items[s] = .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type, - .name = mem.span(decl.name), - }; - } else { - try self.syms.append(self.base.allocator, .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type, - .name = mem.span(decl.name), - }); - decl.link.plan9.sym_index = self.syms.items.len - 1; - } + assert(self.got_len == self.fn_decl_table.count() + self.data_decl_table.count()); + const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; + var got_table = try self.base.allocator.alloc(u8, got_size); + defer self.base.allocator.free(got_table); - if (module.decl_exports.get(decl)) |exports| { - for (exports) |exp| { - // plan9 does not support custom sections - if (exp.options.section) |section_name| { - if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{})); - break; - } - } - if (std.mem.eql(u8, exp.options.name, "_start")) { - assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry - self.entry_decl = decl; - } - if (exp.link.plan9) |i| { - self.syms.items[i] = .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type.toGlobal(), - .name = exp.options.name, - }; - } else { - try self.syms.append(self.base.allocator, .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type.toGlobal(), - .name = exp.options.name, - }); - exp.link.plan9 = self.syms.items.len - 1; - } - } - } + // + 2 for header, got, symbols + var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3); + + const file = self.base.file.?; - log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl.val, - }, &code_buffer, .{ .none = {} }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - // TODO try to do more decls - return; - }, - }; - if (is_fn) { - try self.text_buf.appendSlice(self.base.allocator, code); - code_buffer.items.len = 0; + var hdr_buf: [40]u8 = undefined; + // account for the fat header + const hdr_size = if (self.sixtyfour_bit) @as(usize, 40) else 32; + const hdr_slice: []u8 = hdr_buf[0..hdr_size]; + var foff = hdr_size; + iovecs[0] = .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_slice.len }; + var iovecs_i: u64 = 1; + var text_i: u64 = 0; + // text + { + var it = self.fn_decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; + const code = entry.value_ptr.*; + foff += code.len; + text_i += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(text_i, .t); + decl.link.plan9.offset = off; + if (!self.sixtyfour_bit) { + mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off)); + mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - try self.data_buf.appendSlice(self.base.allocator, code); - code_buffer.items.len = 0; + mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } + self.syms.items[decl.link.plan9.sym_index.?].value = off; } + // etext symbol + self.syms.items[2].value = self.getAddr(text_i, .t); } - - // write the got - if (!self.sixtyfour_bit) { - for (self.got.items) |p, i| { - mem.writeInt(u32, self.data_buf.items[i * 4 ..][0..4], @intCast(u32, p), self.base.options.target.cpu.arch.endian()); - } - } else { - for (self.got.items) |p, i| { - mem.writeInt(u64, self.data_buf.items[i * 8 ..][0..8], p, self.base.options.target.cpu.arch.endian()); + // data + var data_i: u64 = got_size; + { + var it = self.data_decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; + const code = entry.value_ptr.*; + foff += code.len; + data_i += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(data_i, .d); + decl.link.plan9.offset = off; + if (!self.sixtyfour_bit) { + mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + } else { + mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + } + self.syms.items[decl.link.plan9.sym_index.?].value = off; } + // edata symbol + self.syms.items[0].value = self.getAddr(data_i, .b); } - - self.hdr.entry = @truncate(u32, self.entry_decl.?.link.plan9.offset.?); - - // edata, end, etext - self.syms.items[0].value = self.getAddr(0x0, .b); + // edata self.syms.items[1].value = self.getAddr(0x0, .b); - self.syms.items[2].value = self.getAddr(self.text_buf.items.len, .t); - var sym_buf = std.ArrayList(u8).init(self.base.allocator); defer sym_buf.deinit(); try self.writeSyms(&sym_buf); - + iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; + iovecs_i += 1; + assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls + iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }; + iovecs_i += 1; // generate the header self.hdr = .{ .magic = try aout.magicFromArch(self.base.options.target.cpu.arch), - .text = @intCast(u32, self.text_buf.items.len), - .data = @intCast(u32, self.data_buf.items.len), + .text = @intCast(u32, text_i), + .data = @intCast(u32, data_i), .syms = @intCast(u32, sym_buf.items.len), .bss = 0, .pcsz = 0, .spsz = 0, - .entry = self.hdr.entry, + .entry = @intCast(u32, self.entry_decl.?.link.plan9.offset.?), }; - - const file = self.base.file.?; - - var hdr_buf = self.hdr.toU8s(); - const hdr_slice: []const u8 = &hdr_buf; - // account for the fat header - const hdr_size: u8 = if (!self.sixtyfour_bit) 32 else 40; + std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points if (self.sixtyfour_bit) { - mem.writeIntSliceBig(u64, hdr_buf[32..40], self.hdr.entry); + mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_decl.?.link.plan9.offset.?); } // write it all! - var vectors: [4]std.os.iovec_const = .{ - .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_size }, - .{ .iov_base = self.text_buf.items.ptr, .iov_len = self.text_buf.items.len }, - .{ .iov_base = self.data_buf.items.ptr, .iov_len = self.data_buf.items.len }, - .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }, - // TODO spsz, pcsz - }; - try file.pwritevAll(&vectors, 0); + try file.pwritevAll(iovecs, 0); } pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { - assert(self.decl_table.swapRemove(decl)); + const is_fn = (decl.ty.zigTypeTag() == .Fn); + if (is_fn) + assert(self.fn_decl_table.swapRemove(decl)) + else + assert(self.data_decl_table.swapRemove(decl)); } pub fn updateDeclExports( @@ -334,11 +337,17 @@ pub fn updateDeclExports( _ = exports; } pub fn deinit(self: *Plan9) void { - self.decl_table.deinit(self.base.allocator); + var itf = self.fn_decl_table.iterator(); + while (itf.next()) |entry| { + self.base.allocator.free(entry.value_ptr.*); + } + self.fn_decl_table.deinit(self.base.allocator); + var itd = self.data_decl_table.iterator(); + while (itd.next()) |entry| { + self.base.allocator.free(entry.value_ptr.*); + } + self.data_decl_table.deinit(self.base.allocator); self.syms.deinit(self.base.allocator); - self.text_buf.deinit(self.base.allocator); - self.data_buf.deinit(self.base.allocator); - self.got.deinit(self.base.allocator); } pub const Export = ?usize; @@ -397,6 +406,8 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { - try self.got.append(self.base.allocator, 0xdeadbeef); - decl.link.plan9.got_index = self.got.items.len - 1; + if (decl.link.plan9.got_index != null) { + self.got_len += 1; + decl.link.plan9.got_index = self.got_len - 1; + } } -- cgit v1.2.3 From 424f260f850cb22637888bbfdf5bfaf9c08a4dae Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 14:48:51 +0200 Subject: Fix wasm-related compile errors: - Update `fail()` to not require a `srcLoc`. This brings it in line with other backends, and we were always passing 'node_offset = 0', anyway. - Fix unused local due to change of architecture wrt function/decl generation. - Replace all old instructions to indexes within the function signatures. --- src/codegen/wasm.zig | 221 +++++++++++++++++++++++++-------------------------- src/link/Wasm.zig | 17 ++-- 2 files changed, 118 insertions(+), 120 deletions(-) (limited to 'src/link') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 912577a358..33ab07faf3 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -25,7 +25,7 @@ const WValue = union(enum) { /// Index of the local variable local: u32, /// Instruction holding a constant `Value` - constant: *Inst, + constant: Air.Inst.Index, /// Offset position in the list of bytecode instructions code_offset: usize, /// Used for variables that create multiple locals on the stack when allocated @@ -484,7 +484,7 @@ pub const Result = union(enum) { }; /// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(*Inst, WValue); +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -497,8 +497,8 @@ pub const Context = struct { gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, - /// Mapping from *Inst.Block to block ids - blocks: std.AutoArrayHashMapUnmanaged(*Inst.Block, u32) = .{}, + /// Mapping from Air.Inst.Index to block ids + blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, /// `bytes` contains the wasm bytecode belonging to the 'code' section. code: ArrayList(u8), /// Contains the generated function type bytecode for the current function @@ -538,7 +538,8 @@ pub const Context = struct { } /// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig - fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError { + fn fail(self: *Context, comptime fmt: []const u8, args: anytype) InnerError { + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args); return error.CodegenFail; @@ -546,7 +547,7 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: *Inst) WValue { + fn resolveInst(self: Context, inst: Air.Inst) Index { if (!inst.ty.hasCodeGenBits()) return .none; if (inst.value()) |_| { @@ -557,48 +558,45 @@ pub const Context = struct { } /// Using a given `Type`, returns the corresponding wasm Valtype - fn typeToValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!wasm.Valtype { + fn typeToValtype(self: *Context, ty: Type) InnerError!wasm.Valtype { return switch (ty.zigTypeTag()) { .Float => blk: { const bits = ty.floatBits(self.target); if (bits == 16 or bits == 32) break :blk wasm.Valtype.f32; if (bits == 64) break :blk wasm.Valtype.f64; - return self.fail(src, "Float bit size not supported by wasm: '{d}'", .{bits}); + return self.fail("Float bit size not supported by wasm: '{d}'", .{bits}); }, .Int => blk: { const info = ty.intInfo(self.target); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 64) break :blk wasm.Valtype.i64; - return self.fail(src, "Integer bit size not supported by wasm: '{d}'", .{info.bits}); + return self.fail("Integer bit size not supported by wasm: '{d}'", .{info.bits}); }, .Enum => switch (ty.tag()) { .enum_simple => wasm.Valtype.i32, - else => self.typeToValtype( - src, - ty.cast(Type.Payload.EnumFull).?.data.tag_ty, - ), + else => self.typeToValtype(ty.cast(Type.Payload.EnumFull).?.data.tag_ty), }, .Bool, .Pointer, .ErrorSet, => wasm.Valtype.i32, .Struct, .ErrorUnion => unreachable, // Multi typed, must be handled individually. - else => self.fail(src, "TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), + else => self.fail("TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), }; } /// Using a given `Type`, returns the byte representation of its wasm value type - fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { - return wasm.valtype(try self.typeToValtype(src, ty)); + fn genValtype(self: *Context, ty: Type) InnerError!u8 { + return wasm.valtype(try self.typeToValtype(ty)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type - fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { + fn genBlockType(self: *Context, ty: Type) InnerError!u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => self.genValtype(src, ty), + else => self.genValtype(ty), }; } @@ -612,7 +610,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.src, inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack } } @@ -682,7 +680,7 @@ pub const Context = struct { ty.fnParamTypes(params); for (params) |param_type| { // Can we maybe get the source index of each param? - const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type); + const val_type = try self.genValtype(param_type); try writer.writeByte(val_type); } } @@ -691,13 +689,10 @@ pub const Context = struct { const return_type = ty.fnReturnType(); switch (return_type.zigTypeTag()) { .Void, .NoReturn => try leb.writeULEB128(writer, @as(u32, 0)), - .Struct => return self.fail(.{ .node_offset = 0 }, "TODO: Implement struct as return type for wasm", .{}), - .Optional => return self.fail(.{ .node_offset = 0 }, "TODO: Implement optionals as return type for wasm", .{}), + .Struct => return self.fail("TODO: Implement struct as return type for wasm", .{}), + .Optional => return self.fail("TODO: Implement optionals as return type for wasm", .{}), .ErrorUnion => { - const val_type = try self.genValtype( - .{ .node_offset = 0 }, - return_type.errorUnionChild(), - ); + const val_type = try self.genValtype(return_type.errorUnionChild()); // write down the amount of return values try leb.writeULEB128(writer, @as(u32, 2)); @@ -707,22 +702,21 @@ pub const Context = struct { else => { try leb.writeULEB128(writer, @as(u32, 1)); // Can we maybe get the source index of the return type? - const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type); + const val_type = try self.genValtype(return_type); try writer.writeByte(val_type); }, } } pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + _ = func; try self.genFunctype(); - - // Write instructions // TODO: check for and handle death of instructions // Reserve space to write the size after generating the code as well as space for locals count try self.code.resize(10); - try self.genBody(func.body); + try self.genBody(self.air.getMainBody()); // finally, write our local types at the 'offset' position { @@ -753,7 +747,7 @@ pub const Context = struct { return Result.appended; } - /// Generates the wasm bytecode for the function declaration belonging to `Context` + /// Generates the wasm bytecode for the declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { @@ -793,58 +787,59 @@ pub const Context = struct { } } - fn genInst(self: *Context, inst: *Inst) InnerError!WValue { - return switch (inst.tag) { - .add => self.genBinOp(inst.castTag(.add).?, .add), - .alloc => self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(inst.castTag(.arg).?), - .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - .block => self.genBlock(inst.castTag(.block).?), - .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - .br => self.genBr(inst.castTag(.br).?), - .call => self.genCall(inst.castTag(.call).?), - .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => WValue.none, - .div => self.genBinOp(inst.castTag(.div).?, .div), - .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - .load => self.genLoad(inst.castTag(.load).?), - .loop => self.genLoop(inst.castTag(.loop).?), - .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - .not => self.genNot(inst.castTag(.not).?), - .ret => self.genRet(inst.castTag(.ret).?), - .retvoid => WValue.none, - .store => self.genStore(inst.castTag(.store).?), - .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - .unreach => self.genUnreachable(inst.castTag(.unreach).?), - .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .xor => self.genBinOp(inst.castTag(.xor).?, .xor), - else => self.fail(.{ .node_offset = 0 }, "TODO: Implement wasm inst: {s}", .{inst.tag}), + fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { + const air_tags = self.air.instructions.items(.tag); + return switch (air_tags[inst]) { + // .add => self.genBinOp(inst.castTag(.add).?, .add), + // .alloc => self.genAlloc(inst.castTag(.alloc).?), + // .arg => self.genArg(inst.castTag(.arg).?), + // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), + // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), + // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), + // .block => self.genBlock(inst.castTag(.block).?), + // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), + // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), + // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), + // .br => self.genBr(inst.castTag(.br).?), + // .call => self.genCall(inst.castTag(.call).?), + // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), + // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), + // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), + // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), + // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), + // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), + // .condbr => self.genCondBr(inst.castTag(.condbr).?), + // .constant => unreachable, + // .dbg_stmt => WValue.none, + // .div => self.genBinOp(inst.castTag(.div).?, .div), + // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), + // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), + // .load => self.genLoad(inst.castTag(.load).?), + // .loop => self.genLoop(inst.castTag(.loop).?), + // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), + // .not => self.genNot(inst.castTag(.not).?), + // .ret => self.genRet(inst.castTag(.ret).?), + // .retvoid => WValue.none, + // .store => self.genStore(inst.castTag(.store).?), + // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), + // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), + // .unreach => self.genUnreachable(inst.castTag(.unreach).?), + // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), + // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } - fn genBody(self: *Context, body: ir.Body) InnerError!void { - for (body.instructions) |inst| { + fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { + for (body) |inst| { const result = try self.genInst(inst); try self.values.putNoClobber(self.gpa, inst, result); } } - fn genRet(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { // TODO: Implement tail calls const operand = self.resolveInst(inst.operand); try self.emitWValue(operand); @@ -852,7 +847,7 @@ pub const Context = struct { return .none; } - fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue { + fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const func_val = inst.func.value().?; const target: *Decl = blk: { @@ -861,7 +856,7 @@ pub const Context = struct { } else if (func_val.castTag(.extern_fn)) |ext_fn| { break :blk ext_fn.data; } - return self.fail(inst.base.src, "Expected a function, but instead found type '{s}'", .{func_val.tag()}); + return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; for (inst.args) |arg| { @@ -881,12 +876,12 @@ pub const Context = struct { return .none; } - fn genAlloc(self: *Context, inst: *Inst.NoOp) InnerError!WValue { + fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const elem_type = inst.base.ty.elemType(); return self.allocLocal(elem_type); } - fn genStore(self: *Context, inst: *Inst.BinOp) InnerError!WValue { + fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const writer = self.code.writer(); const lhs = self.resolveInst(inst.lhs); @@ -924,18 +919,18 @@ pub const Context = struct { return .none; } - fn genLoad(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } - fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue { + fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; // arguments share the index with locals defer self.local_index += 1; return WValue{ .local = self.local_index }; } - fn genBinOp(self: *Context, inst: *Inst.BinOp, op: Op) InnerError!WValue { + fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { const lhs = self.resolveInst(inst.lhs); const rhs = self.resolveInst(inst.rhs); @@ -952,21 +947,21 @@ pub const Context = struct { const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.src, inst.base.ty), + .valtype1 = try self.typeToValtype(inst.base.ty), .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; } - fn emitConstant(self: *Context, src: LazySrcLoc, value: Value, ty: Type) InnerError!void { + fn emitConstant(self: *Context, value: Value, ty: Type) InnerError!void { const writer = self.code.writer(); switch (ty.zigTypeTag()) { .Int => { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant @@ -985,14 +980,14 @@ pub const Context = struct { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant switch (ty.floatBits(self.target)) { 0...32 => try writer.writeIntLittle(u32, @bitCast(u32, value.toFloat(f32))), 64 => try writer.writeIntLittle(u64, @bitCast(u64, value.toFloat(f64))), - else => |bits| return self.fail(src, "Wasm TODO: emitConstant for float with {d} bits", .{bits}), + else => |bits| return self.fail("Wasm TODO: emitConstant for float with {d} bits", .{bits}), } }, .Pointer => { @@ -1009,7 +1004,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_load)); try leb.writeULEB128(writer, @as(u32, 0)); try leb.writeULEB128(writer, @as(u32, 0)); - } else return self.fail(src, "Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); + } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); }, .Void => {}, .Enum => { @@ -1023,7 +1018,7 @@ pub const Context = struct { const enum_full = ty.cast(Type.Payload.EnumFull).?.data; if (enum_full.values.count() != 0) { const tag_val = enum_full.values.keys()[field_index.data]; - try self.emitConstant(src, tag_val, enum_full.tag_ty); + try self.emitConstant(tag_val, enum_full.tag_ty); } else { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, field_index.data); @@ -1034,7 +1029,7 @@ pub const Context = struct { } else { var int_tag_buffer: Type.Payload.Bits = undefined; const int_tag_ty = ty.intTagType(&int_tag_buffer); - try self.emitConstant(src, value, int_tag_ty); + try self.emitConstant(value, int_tag_ty); } }, .ErrorSet => { @@ -1048,12 +1043,12 @@ pub const Context = struct { const payload_type = ty.errorUnionChild(); if (value.getError()) |_| { // write the error value - try self.emitConstant(src, data, error_type); + try self.emitConstant(data, error_type); // no payload, so write a '0' const const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, payload_type), + .valtype1 = try self.typeToValtype(payload_type), }); try writer.writeByte(wasm.opcode(opcode)); try leb.writeULEB128(writer, @as(u32, 0)); @@ -1062,15 +1057,15 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, @as(u32, 0)); // after the error code, we emit the payload - try self.emitConstant(src, data, payload_type); + try self.emitConstant(data, payload_type); } }, - else => |zig_type| return self.fail(src, "Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), + else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), } } - fn genBlock(self: *Context, block: *Inst.Block) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.src, block.base.ty); + fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { + const block_ty = try self.genBlockType(block.base.ty); try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump @@ -1100,8 +1095,8 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: *Inst.Loop) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.src, loop.base.ty); + fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { + const loop_ty = try self.genBlockType(loop.base.ty); try self.startBlock(.loop, loop_ty, null); try self.genBody(loop.body); @@ -1115,7 +1110,7 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: *Inst.CondBr) InnerError!WValue { + fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { const condition = self.resolveInst(condbr.condition); const writer = self.code.writer(); @@ -1131,7 +1126,7 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty); + const block_ty = try self.genBlockType(condbr.base.ty); try self.startBlock(.block, block_ty, offset); // we inserted the block in front of the condition @@ -1149,7 +1144,7 @@ pub const Context = struct { return .none; } - fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue { + fn genCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { // save offset, so potential conditions can insert blocks in front of // the comparison that we can later jump back to const offset = self.code.items.len; @@ -1168,7 +1163,7 @@ pub const Context = struct { break :blk inst.lhs.ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.base.src, inst.lhs.ty), + .valtype1 = try self.typeToValtype(inst.lhs.ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1183,7 +1178,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: *Inst.Br) InnerError!WValue { + fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { // if operand has codegen bits we should break with a value if (br.operand.ty.hasCodeGenBits()) { const operand = self.resolveInst(br.operand); @@ -1200,7 +1195,7 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: *Inst.UnOp) InnerError!WValue { + fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { const offset = self.code.items.len; const operand = self.resolveInst(not.operand); @@ -1217,7 +1212,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue { + fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { _ = self; _ = breakpoint; // unsupported by wasm itself. Can be implemented once we support DWARF @@ -1225,27 +1220,27 @@ pub const Context = struct { return .none; } - fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue { + fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { _ = unreach; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: *Inst.UnOp) InnerError!WValue { + fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { return self.resolveInst(bitcast.operand); } - fn genStructFieldPtr(self: *Context, inst: *Inst.StructFieldPtr) InnerError!WValue { + fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const struct_ptr = self.resolveInst(inst.struct_ptr); return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; } - fn genSwitchBr(self: *Context, inst: *Inst.SwitchBr) InnerError!WValue { + fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const target = self.resolveInst(inst.target); const target_ty = inst.target.ty; const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.src, inst.base.ty); + const blocktype = try self.genBlockType(inst.base.ty); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1282,7 +1277,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: *Inst.UnOp, opcode: wasm.Opcode) InnerError!WValue { + fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const operand = self.resolveInst(inst.operand); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1298,7 +1293,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genUnwrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const operand = self.resolveInst(inst.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` @@ -1307,7 +1302,7 @@ pub const Context = struct { return WValue{ .local = operand.multi_value.index + 1 }; } - fn genWrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index be6ad78701..1387615d15 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -228,7 +228,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + return self.finishUpdateDecl(decl, result, &context); } // Generate code for the Decl, storing it in memory to be later written to @@ -270,18 +270,21 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + + return self.finishUpdateDecl(decl, result, &context); } -fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { - const code: []const u8 = switch (result) { - .appended => @as([]const u8, context.code.items), - .externally_managed => |payload| payload, - }; +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result, context: *codegen.Context) !void { + const fn_data: *FnData = &decl.fn_link.wasm; fn_data.code = context.code.toUnmanaged(); fn_data.functype = context.func_type_data.toUnmanaged(); + const code: []const u8 = switch (result) { + .appended => @as([]const u8, fn_data.code.items), + .externally_managed => |payload| payload, + }; + const block = &decl.link.wasm; if (decl.ty.zigTypeTag() == .Fn) { // as locals are patched afterwards, the offsets of funcidx's are off, -- cgit v1.2.3 From 2438f61f1c37aefa16852130370df44b3fabf785 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 22:43:06 +0200 Subject: Refactor entire wasm-backend to use new AIR memory layout --- src/codegen/wasm.zig | 275 ++++++++++++++++++++++++++++++--------------------- src/link/Wasm.zig | 2 +- 2 files changed, 161 insertions(+), 116 deletions(-) (limited to 'src/link') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 33ab07faf3..5cf3fb15fd 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -483,8 +483,8 @@ pub const Result = union(enum) { externally_managed: []const u8, }; -/// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); +/// Hashmap to store generated `WValue` for each `Air.Inst.Ref` +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Ref, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -495,7 +495,7 @@ pub const Context = struct { air: Air, liveness: Liveness, gpa: *mem.Allocator, - /// Table to save `WValue`'s generated by an `Inst` + /// Table to save `WValue`'s generated by an `Air.Inst` values: ValueTable, /// Mapping from Air.Inst.Index to block ids blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, @@ -547,14 +547,15 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: Air.Inst) Index { - if (!inst.ty.hasCodeGenBits()) return .none; + fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue { + const ref_type = self.air.getRefType(ref); + if (ref_type.hasCodeGenBits()) return .none; - if (inst.value()) |_| { - return WValue{ .constant = inst }; + if (self.air.instructions.items(.tag)[@enumToInt(ref)] == .constant) { + return WValue{ .constant = @enumToInt(ref) }; } - return self.values.get(inst).?; // Instruction does not dominate all uses! + return self.values.get(ref).?; // Instruction does not dominate all uses! } /// Using a given `Type`, returns the corresponding wasm Valtype @@ -610,7 +611,12 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |index| { + const ty_pl = self.air.instructions.items(.data)[index].ty_pl; + const value = self.air.values[ty_pl.payload]; + // create a new constant onto the stack + try self.emitConstant(value, self.air.getRefType(ty_pl.ty)); + }, } } @@ -626,10 +632,7 @@ pub const Context = struct { const fields_len = @intCast(u32, struct_data.fields.count()); try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len); for (struct_data.fields.values()) |*value| { - const val_type = try self.genValtype( - .{ .node_offset = struct_data.node_offset }, - value.ty, - ); + const val_type = try self.genValtype(value.ty); self.locals.appendAssumeCapacity(val_type); self.local_index += 1; } @@ -640,7 +643,7 @@ pub const Context = struct { }, .ErrorUnion => { const payload_type = ty.errorUnionChild(); - const val_type = try self.genValtype(.{ .node_offset = 0 }, payload_type); + const val_type = try self.genValtype(payload_type); // we emit the error value as the first local, and the payload as the following. // The first local is also used to find the index of the error and payload. @@ -657,7 +660,7 @@ pub const Context = struct { } }; }, else => { - const valtype = try self.genValtype(.{ .node_offset = 0 }, ty); + const valtype = try self.genValtype(ty); try self.locals.append(self.gpa, valtype); self.local_index += 1; return WValue{ .local = initial_index }; @@ -708,8 +711,7 @@ pub const Context = struct { } } - pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { - _ = func; + pub fn genFunc(self: *Context) InnerError!Result { try self.genFunctype(); // TODO: check for and handle death of instructions @@ -790,44 +792,43 @@ pub const Context = struct { fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { const air_tags = self.air.instructions.items(.tag); return switch (air_tags[inst]) { - // .add => self.genBinOp(inst.castTag(.add).?, .add), - // .alloc => self.genAlloc(inst.castTag(.alloc).?), - // .arg => self.genArg(inst.castTag(.arg).?), - // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - // .block => self.genBlock(inst.castTag(.block).?), - // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - // .br => self.genBr(inst.castTag(.br).?), - // .call => self.genCall(inst.castTag(.call).?), - // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - // .condbr => self.genCondBr(inst.castTag(.condbr).?), - // .constant => unreachable, - // .dbg_stmt => WValue.none, - // .div => self.genBinOp(inst.castTag(.div).?, .div), - // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - // .load => self.genLoad(inst.castTag(.load).?), - // .loop => self.genLoop(inst.castTag(.loop).?), - // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - // .not => self.genNot(inst.castTag(.not).?), - // .ret => self.genRet(inst.castTag(.ret).?), - // .retvoid => WValue.none, - // .store => self.genStore(inst.castTag(.store).?), - // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - // .unreach => self.genUnreachable(inst.castTag(.unreach).?), - // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + .add => self.genBinOp(inst, .add), + .alloc => self.genAlloc(inst), + .arg => self.genArg(inst), + .bit_and => self.genBinOp(inst, .@"and"), + .bitcast => self.genBitcast(inst), + .bit_or => self.genBinOp(inst, .@"or"), + .block => self.genBlock(inst), + .bool_and => self.genBinOp(inst, .@"and"), + .bool_or => self.genBinOp(inst, .@"or"), + .breakpoint => self.genBreakpoint(inst), + .br => self.genBr(inst), + .call => self.genCall(inst), + .cmp_eq => self.genCmp(inst, .eq), + .cmp_gte => self.genCmp(inst, .gte), + .cmp_gt => self.genCmp(inst, .gt), + .cmp_lte => self.genCmp(inst, .lte), + .cmp_lt => self.genCmp(inst, .lt), + .cmp_neq => self.genCmp(inst, .neq), + .cond_br => self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => WValue.none, + .div => self.genBinOp(inst, .div), + .is_err => self.genIsErr(inst, .i32_ne), + .is_non_err => self.genIsErr(inst, .i32_eq), + .load => self.genLoad(inst), + .loop => self.genLoop(inst), + .mul => self.genBinOp(inst, .mul), + .not => self.genNot(inst), + .ret => self.genRet(inst), + .store => self.genStore(inst), + .struct_field_ptr => self.genStructFieldPtr(inst), + .sub => self.genBinOp(inst, .sub), + .switch_br => self.genSwitchBr(inst), + .unreach => self.genUnreachable(inst), + .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst), + .wrap_errunion_payload => self.genWrapErrUnionPayload(inst), + .xor => self.genBinOp(inst, .xor), else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } @@ -835,22 +836,27 @@ pub const Context = struct { fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { const result = try self.genInst(inst); - try self.values.putNoClobber(self.gpa, inst, result); + try self.values.putNoClobber(self.gpa, @intToEnum(Air.Inst.Ref, inst), result); } } fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - // TODO: Implement tail calls - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); try self.emitWValue(operand); try self.code.append(wasm.opcode(.@"return")); return .none; } fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const func_val = inst.func.value().?; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = self.air.extra[extra.end..][0..extra.data.args_len]; const target: *Decl = blk: { + const ty_pl = self.air.instructions.items(.data)[@enumToInt(pl_op.operand)].ty_pl; + const func_val = self.air.values[ty_pl.payload]; + if (func_val.castTag(.function)) |func| { break :blk func.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |ext_fn| { @@ -859,8 +865,8 @@ pub const Context = struct { return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; - for (inst.args) |arg| { - const arg_val = self.resolveInst(arg); + for (args) |arg| { + const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); try self.emitWValue(arg_val); } @@ -877,15 +883,16 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = inst.base.ty.elemType(); + const elem_type = self.air.getType(inst).elemType(); return self.allocLocal(elem_type); } fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; const writer = self.code.writer(); - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); switch (lhs) { .multi_value => |multi_value| switch (rhs) { @@ -893,7 +900,7 @@ pub const Context = struct { // we simply assign the local_index to the rhs one. // This allows us to update struct fields without having to individually // set each local as each field's index will be calculated off the struct's base index - .multi_value => self.values.put(self.gpa, inst.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! + .multi_value => self.values.put(self.gpa, bin_op.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! .constant, .none => { // emit all values onto the stack if constant try self.emitWValue(rhs); @@ -920,7 +927,8 @@ pub const Context = struct { } fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { @@ -931,8 +939,9 @@ pub const Context = struct { } fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); // it's possible for both lhs and/or rhs to return an offset as well, // in which case we return the first offset occurance we find. @@ -945,10 +954,11 @@ pub const Context = struct { try self.emitWValue(lhs); try self.emitWValue(rhs); + const bin_ty = self.air.getRefType(bin_op.lhs); const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.ty), - .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, + .valtype1 = try self.typeToValtype(bin_ty), + .signedness = if (bin_ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; @@ -1064,14 +1074,17 @@ pub const Context = struct { } } - fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.ty); + fn genBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty)); + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump // to when breaking out. - try self.blocks.putNoClobber(self.gpa, block, self.block_depth); - try self.genBody(block.body); + try self.blocks.putNoClobber(self.gpa, inst, self.block_depth); + try self.genBody(body); try self.endBlock(); return .none; @@ -1095,11 +1108,15 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.ty); + fn genLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; - try self.startBlock(.loop, loop_ty, null); - try self.genBody(loop.body); + // result type of loop is always 'noreturn', meaning we can always + // emit the wasm type 'block_empty'. + try self.startBlock(.loop, wasm.block_empty, null); + try self.genBody(body); // breaking to the index of a loop block will continue the loop instead try self.code.append(wasm.opcode(.br)); @@ -1110,8 +1127,12 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { - const condition = self.resolveInst(condbr.condition); + fn genCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const writer = self.code.writer(); // TODO: Handle death instructions for then and else body @@ -1126,8 +1147,9 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.ty); - try self.startBlock(.block, block_ty, offset); + + // result type is always noreturn, so use `block_empty` as type. + try self.startBlock(.block, wasm.block_empty, offset); // we inserted the block in front of the condition // so now check if condition matches. If not, break outside this block @@ -1135,11 +1157,11 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.br_if)); try leb.writeULEB128(writer, @as(u32, 0)); - try self.genBody(condbr.else_body); + try self.genBody(else_body); try self.endBlock(); // Outer block that matches the condition - try self.genBody(condbr.then_body); + try self.genBody(then_body); return .none; } @@ -1149,21 +1171,23 @@ pub const Context = struct { // the comparison that we can later jump back to const offset = self.code.items.len; - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const data: Air.Inst.Data = self.air.instructions.items(.data)[inst]; + const lhs = self.resolveInst(data.bin_op.lhs); + const rhs = self.resolveInst(data.bin_op.rhs); + const lhs_ty = self.air.getRefType(data.bin_op.lhs); try self.emitWValue(lhs); try self.emitWValue(rhs); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (inst.lhs.ty.zigTypeTag() != .Int) break :blk .unsigned; + if (lhs_ty.zigTypeTag() != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk inst.lhs.ty.intInfo(self.target).signedness; + break :blk lhs_ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.lhs.ty), + .valtype1 = try self.typeToValtype(lhs_ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1178,16 +1202,17 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { + fn genBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const br = self.air.instructions.items(.data)[inst].br; + // if operand has codegen bits we should break with a value - if (br.operand.ty.hasCodeGenBits()) { - const operand = self.resolveInst(br.operand); - try self.emitWValue(operand); + if (self.air.getRefType(br.operand).hasCodeGenBits()) { + try self.emitWValue(self.resolveInst(br.operand)); } // We map every block to its block index. // We then determine how far we have to jump to it by substracting it from current block depth - const idx: u32 = self.block_depth - self.blocks.get(br.block).?; + const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?; const writer = self.code.writer(); try writer.writeByte(wasm.opcode(.br)); try leb.writeULEB128(writer, idx); @@ -1195,10 +1220,11 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { + fn genNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; const offset = self.code.items.len; - const operand = self.resolveInst(not.operand); + const operand = self.resolveInst(ty_op.operand); try self.emitWValue(operand); // wasm does not have booleans nor the `not` instruction, therefore compare with 0 @@ -1212,35 +1238,44 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { + fn genBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = self; - _ = breakpoint; + _ = inst; // unsupported by wasm itself. Can be implemented once we support DWARF // for wasm return .none; } - fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { - _ = unreach; + fn genUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + _ = inst; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(bitcast.operand); + fn genBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const struct_ptr = self.resolveInst(inst.struct_ptr); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload); + const struct_ptr = self.resolveInst(extra.data.struct_ptr); - return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; + return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) }; } fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const target = self.resolveInst(inst.target); - const target_ty = inst.target.ty; - const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.ty); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.SwitchBr, pl_op.payload); + const cases = self.air.extra[extra.end..][0..extra.data.cases_len]; + const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + + const target = self.resolveInst(pl_op.operand); + const target_ty = self.air.getRefType(pl_op.operand); + const valtype = try self.typeToValtype(target_ty); + // result type is always 'noreturn' + const blocktype = wasm.block_empty; const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1249,11 +1284,18 @@ pub const Context = struct { // incase of an actual integer, we emit the correct signedness break :blk target_ty.intInfo(self.target).signedness; }; - for (inst.cases) |case| { + for (cases) |case_idx| { + const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + const case_body = self.air.extra[case.end..][0..case.data.body_len]; + // create a block for each case, when the condition does not match we break out of it try self.startBlock(.block, blocktype, null); try self.emitWValue(target); - try self.emitConstant(.{ .node_offset = 0 }, case.item, target_ty); + + // cases must represent a constant of which its type is in the `typed_value_map` + // Therefore we can simply retrieve it. + const ty_val = Air.Inst.Ref.typed_value_map[@enumToInt(case.data.item)]; + try self.emitConstant(ty_val.val, target_ty); const opcode = buildOpcode(.{ .valtype1 = valtype, .op = .ne, // not equal because we jump out the block if it does not match the condition @@ -1264,7 +1306,7 @@ pub const Context = struct { try leb.writeULEB128(self.code.writer(), @as(u32, 0)); // emit our block code - try self.genBody(case.body); + try self.genBody(case_body); // end the block we created earlier try self.endBlock(); @@ -1272,13 +1314,14 @@ pub const Context = struct { // finally, emit the else case if it exists. Here we will not have to // check for a condition, so also no need to emit a block. - try self.genBody(inst.else_body); + try self.genBody(else_body); return .none; } fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1294,7 +1337,8 @@ pub const Context = struct { } fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = self.resolveInst(ty_op.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` // @@ -1303,6 +1347,7 @@ pub const Context = struct { } fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 1387615d15..81e50c46b6 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -220,7 +220,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live defer context.deinit(); // generate the 'code' section for the function declaration - const result = context.genFunc(func) catch |err| switch (err) { + const result = context.genFunc() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, context.err_msg); -- cgit v1.2.3 From a804de13c8e2d7a6a99c55355f964f658a5a76bc Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Sat, 17 Jul 2021 10:39:20 -0400 Subject: plan9 linker: fix after testing * exports get rendered properly in symbol table * global offset table is at the start of data section instead of after symtab * various null use fixes --- src/link/Plan9.zig | 65 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 10 deletions(-) (limited to 'src/link') diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 9b123f56aa..135b59f82b 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -34,7 +34,7 @@ data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, hdr: aout.ExecHdr = undefined, -entry_decl: ?*Module.Decl = null, +entry_val: ?u64 = null, got_len: u64 = 0, @@ -213,6 +213,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -221,7 +222,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { defer assert(self.hdr.entry != 0x0); - _ = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; assert(self.got_len == self.fn_decl_table.count() + self.data_decl_table.count()); const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; @@ -230,6 +231,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { // + 2 for header, got, symbols var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3); + defer self.base.allocator.free(iovecs); const file = self.base.file.?; @@ -247,11 +249,12 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { while (it.next()) |entry| { const decl = entry.key_ptr.*; const code = entry.value_ptr.*; + log.debug("write text decl {*} ({s})", .{ decl, decl.name }); foff += code.len; - text_i += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; const off = self.getAddr(text_i, .t); + text_i += code.len; decl.link.plan9.offset = off; if (!self.sixtyfour_bit) { mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off)); @@ -260,10 +263,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; + if (mod.decl_exports.get(decl)) |exports| { + try self.addDeclExports(mod, decl, exports); + } } // etext symbol self.syms.items[2].value = self.getAddr(text_i, .t); } + // global offset table is in data + iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; + iovecs_i += 1; // data var data_i: u64 = got_size; { @@ -271,11 +280,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { while (it.next()) |entry| { const decl = entry.key_ptr.*; const code = entry.value_ptr.*; + log.debug("write data decl {*} ({s})", .{ decl, decl.name }); + foff += code.len; - data_i += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; const off = self.getAddr(data_i, .d); + data_i += code.len; decl.link.plan9.offset = off; if (!self.sixtyfour_bit) { mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); @@ -283,6 +294,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; + if (mod.decl_exports.get(decl)) |exports| { + try self.addDeclExports(mod, decl, exports); + } } // edata symbol self.syms.items[0].value = self.getAddr(data_i, .b); @@ -292,8 +306,6 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { var sym_buf = std.ArrayList(u8).init(self.base.allocator); defer sym_buf.deinit(); try self.writeSyms(&sym_buf); - iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; - iovecs_i += 1; assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }; iovecs_i += 1; @@ -306,16 +318,45 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { .bss = 0, .pcsz = 0, .spsz = 0, - .entry = @intCast(u32, self.entry_decl.?.link.plan9.offset.?), + .entry = @intCast(u32, self.entry_val.?), }; std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points if (self.sixtyfour_bit) { - mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_decl.?.link.plan9.offset.?); + mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_val.?); } // write it all! try file.pwritevAll(iovecs, 0); } +fn addDeclExports( + self: *Plan9, + module: *Module, + decl: *Module.Decl, + exports: []const *Module.Export, +) !void { + for (exports) |exp| { + // plan9 does not support custom sections + if (exp.options.section) |section_name| { + if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { + try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{})); + break; + } + } + const sym = .{ + .value = decl.link.plan9.offset.?, + .type = decl.link.plan9.type.toGlobal(), + .name = exp.options.name, + }; + + if (exp.link.plan9) |i| { + self.syms.items[i] = sym; + } else { + try self.syms.append(self.base.allocator, sym); + exp.link.plan9 = self.syms.items.len - 1; + } + } +} + pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { const is_fn = (decl.ty.zigTypeTag() == .Fn); if (is_fn) @@ -394,19 +435,23 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const writer = buf.writer(); for (self.syms.items) |sym| { + log.debug("sym.name: {s}", .{sym.name}); + log.debug("sym.value: {x}", .{sym.value}); + if (mem.eql(u8, sym.name, "_start")) + self.entry_val = sym.value; if (!self.sixtyfour_bit) { try writer.writeIntBig(u32, @intCast(u32, sym.value)); } else { try writer.writeIntBig(u64, sym.value); } try writer.writeByte(@enumToInt(sym.type)); - try writer.writeAll(std.mem.span(sym.name)); + try writer.writeAll(sym.name); try writer.writeByte(0); } } pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { - if (decl.link.plan9.got_index != null) { + if (decl.link.plan9.got_index == null) { self.got_len += 1; decl.link.plan9.got_index = self.got_len - 1; } -- cgit v1.2.3 From 33aab2c1bbe55cdd3d2d08dc429260d06898d36d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 17 Jul 2021 12:42:05 -0700 Subject: stage2: ELF linking: avoid crashing for stupidly large functions --- src/link/Elf.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/link') diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 815c0c9f23..315dfb563b 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3080,7 +3080,7 @@ fn pwriteDbgLineNops( const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096; const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 }; - var vecs: [256]std.os.iovec_const = undefined; + var vecs: [512]std.os.iovec_const = undefined; var vec_index: usize = 0; { var padding_left = prev_padding_size; -- cgit v1.2.3 From 4a0f38bb7671750be1590e815def72e3b4a34ccf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:26:36 -0700 Subject: stage2: update LLVM backend to new AIR memory layout Also fix compile errors when not using -Dskip-non-native --- src/codegen.zig | 54 ++++---- src/codegen/c.zig | 3 + src/codegen/llvm.zig | 359 +++++++++++++++++++++++++++++--------------------- src/codegen/spirv.zig | 152 ++++++++++----------- src/codegen/wasm.zig | 8 +- src/link/Coff.zig | 15 ++- src/link/MachO.zig | 245 +++++++++++++++++++--------------- src/link/SpirV.zig | 16 ++- src/link/Wasm.zig | 2 + 9 files changed, 482 insertions(+), 372 deletions(-) (limited to 'src/link') diff --git a/src/codegen.zig b/src/codegen.zig index 11a2603aac..20d7035822 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -642,7 +642,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch push callee saved regs var saved_regs = Instruction.RegisterList{ @@ -703,7 +703,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -727,7 +727,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch stack offset const stack_end = self.max_end_stack; @@ -779,13 +779,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, else => { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); }, } @@ -1492,7 +1492,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue { + fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -1514,14 +1514,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1542,7 +1542,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1572,10 +1572,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } try self.genArmBinOpCode( @@ -1594,7 +1594,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv: MCValue, rhs_mcv: MCValue, swap_lhs_and_rhs: bool, - op: ir.Inst.Tag, + op: Air.Inst.Tag, ) !void { assert(lhs_mcv == .register or rhs_mcv == .register); @@ -1665,14 +1665,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1690,7 +1690,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1701,10 +1701,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (!lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (!rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); @@ -2704,9 +2704,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .aarch64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; + const arg = args[arg_i]; const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2733,7 +2733,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2899,15 +2899,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate registers if (rhs_should_be_register) { if (!lhs_is_register and !rhs_is_register) { - const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ + Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?, + }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) }; } // Move the operands to the newly allocated registers @@ -3538,7 +3540,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .aarch64 => result: { @@ -3576,7 +3578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .riscv64 => result: { @@ -3612,7 +3614,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .x86_64, .i386 => result: { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1fe330a894..7137116037 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -974,6 +974,9 @@ fn airArg(o: *Object) CValue { fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and o.liveness.isUnused(inst)) + return CValue.none; const inst_ty = o.air.typeOfIndex(inst); const operand = try o.resolveInst(ty_op.operand); const writer = o.writer(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ddf2883259..d9090c9f2c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10,7 +10,7 @@ const math = std.math; const Module = @import("../Module.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); -const Inst = ir.Inst; +const Liveness = @import("../Liveness.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; @@ -355,6 +355,7 @@ pub const DeclGen = struct { builder.positionBuilderAtEnd(entry_block); var fg: FuncGen = .{ + .gpa = self.gpa, .dg = self, .builder = builder, .args = args, @@ -593,29 +594,29 @@ pub const DeclGen = struct { }; pub const FuncGen = struct { + gpa: *Allocator, dg: *DeclGen, builder: *const llvm.Builder, - /// This stores the LLVM values used in a function, such that they can be - /// referred to in other instructions. This table is cleared before every function is generated. - /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks - /// in here, however if a block ends, the instructions can be thrown away. - func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value), + /// This stores the LLVM values used in a function, such that they can be referred to + /// in other instructions. This table is cleared before every function is generated. + func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value), - /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction. + /// These fields are used to refer to the LLVM value of the function paramaters + /// in an Arg instruction. args: []*const llvm.Value, arg_index: usize, entry_block: *const llvm.BasicBlock, - /// This fields stores the last alloca instruction, such that we can append more alloca instructions - /// to the top of the function. + /// This fields stores the last alloca instruction, such that we can append + /// more alloca instructions to the top of the function. latest_alloca_inst: ?*const llvm.Value, llvm_func: *const llvm.Value, /// This data structure is used to implement breaking to blocks. - blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct { + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { parent_bb: *const llvm.BasicBlock, break_bbs: *BreakBasicBlocks, break_vals: *BreakValues, @@ -626,9 +627,9 @@ pub const FuncGen = struct { fn deinit(self: *FuncGen) void { self.builder.dispose(); - self.func_inst_table.deinit(self.gpa()); - self.gpa().free(self.args); - self.blocks.deinit(self.gpa()); + self.func_inst_table.deinit(self.gpa); + self.gpa.free(self.args); + self.blocks.deinit(self.gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @@ -644,13 +645,9 @@ pub const FuncGen = struct { return self.dg.object.context; } - fn gpa(self: *FuncGen) *Allocator { - return self.dg.gpa; - } - - fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value { - if (inst.value()) |val| { - return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self); + fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value { + if (self.air.value(inst)) |val| { + return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self); } if (self.func_inst_table.get(inst)) |value| return value; @@ -658,51 +655,57 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void { + const air_tags = self.air.instructions.items(.tag); for (body.instructions) |inst| { - const opt_value = switch (inst.tag) { - .add => try self.genAdd(inst.castTag(.add).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => try self.genArg(inst.castTag(.arg).?), - .bitcast => try self.genBitCast(inst.castTag(.bitcast).?), - .block => try self.genBlock(inst.castTag(.block).?), - .br => try self.genBr(inst.castTag(.br).?), - .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?), - .br_void => try self.genBrVoid(inst.castTag(.br_void).?), - .call => try self.genCall(inst.castTag(.call).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => try self.genCondBr(inst.castTag(.condbr).?), - .intcast => try self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false), - .is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true), - .is_null => try self.genIsNull(inst.castTag(.is_null).?, false), - .is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => try self.genLoop(inst.castTag(.loop).?), - .not => try self.genNot(inst.castTag(.not).?), - .ret => try self.genRet(inst.castTag(.ret).?), - .retvoid => self.genRetVoid(inst.castTag(.retvoid).?), - .store => try self.genStore(inst.castTag(.store).?), - .sub => try self.genSub(inst.castTag(.sub).?), - .unreach => self.genUnreach(inst.castTag(.unreach).?), - .optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false), - .optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true), + const opt_value = switch (air_tags[inst]) { + .add => try self.airAdd(inst), + .sub => try self.airSub(inst), + + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_neq => try self.airCmp(inst, .neq), + + .is_non_null => try self.airIsNonNull(inst, false), + .is_non_null_ptr => try self.airIsNonNull(inst, true), + .is_null => try self.airIsNull(inst, false), + .is_null_ptr => try self.airIsNull(inst, true), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(inst), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .intcast => try self.airIntCast(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .unreach => self.airUnreach(inst), + .optional_payload => try self.airOptionalPayload(inst, false), + .optional_payload_ptr => try self.airOptionalPayload(inst, true), .dbg_stmt => blk: { // TODO: implement debug info break :blk null; }, - else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}), + else => |tag| return self.todo("implement AIR instruction: {}", .{tag}), }; - if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val); + if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val); } } - fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value { - if (inst.func.value()) |func_value| { + fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + + if (self.air.value(pl_op.operand)) |func_value| { const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn| extern_fn.data else if (func_value.castTag(.function)) |func_payload| @@ -714,12 +717,10 @@ pub const FuncGen = struct { const zig_fn_type = fn_decl.ty; const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl); - const num_args = inst.args.len; - - const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args); - defer self.gpa().free(llvm_param_vals); + const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len); + defer self.gpa.free(llvm_param_vals); - for (inst.args) |arg, i| { + for (args) |arg, i| { llvm_param_vals[i] = try self.resolveInst(arg); } @@ -727,8 +728,8 @@ pub const FuncGen = struct { // Do we need that? const call = self.builder.buildCall( llvm_fn, - if (num_args == 0) null else llvm_param_vals.ptr, - @intCast(c_uint, num_args), + if (args.len == 0) null else llvm_param_vals.ptr, + @intCast(c_uint, args.len), "", ); @@ -746,31 +747,31 @@ pub const FuncGen = struct { } } - fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { - _ = inst; - _ = self.builder.buildRetVoid(); - return null; - } - - fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `retvoid` instructions. + fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const un_op = self.air.instructions.items(.data)[inst].un_op; + if (!self.air.typeOf(un_op).hasCodeGenBits()) { _ = self.builder.buildRetVoid(); return null; } - _ = self.builder.buildRet(try self.resolveInst(inst.operand)); + const operand = try self.resolveInst(un_op); + _ = self.builder.buildRet(operand); return null; } - fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - if (inst.base.ty.tag() != .bool) - return self.todo("implement 'genCmp' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + if (inst_ty.tag() != .bool) + return self.todo("implement 'airCmp' for type {}", .{inst_ty}); - const is_signed = inst.base.ty.isSignedInt(); + const is_signed = inst_ty.isSignedInt(); const operation = switch (op) { .eq => .EQ, .neq => .NE, @@ -783,32 +784,36 @@ pub const FuncGen = struct { return self.builder.buildICmp(operation, lhs, rhs, ""); } - fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value { + fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; const parent_bb = self.context().createBasicBlock("Block"); // 5 breaks to a block seems like a reasonable default. - var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5); - var break_vals = try BreakValues.initCapacity(self.gpa(), 5); - try self.blocks.putNoClobber(self.gpa(), inst, .{ + var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5); + var break_vals = try BreakValues.initCapacity(self.gpa, 5); + try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .break_bbs = &break_bbs, .break_vals = &break_vals, }); defer { assert(self.blocks.remove(inst)); - break_bbs.deinit(self.gpa()); - break_vals.deinit(self.gpa()); + break_bbs.deinit(self.gpa); + break_vals.deinit(self.gpa); } - try self.genBody(inst.body); + try self.genBody(body); self.llvm_func.appendExistingBasicBlock(parent_bb); self.builder.positionBuilderAtEnd(parent_bb); // If the block does not return a value, we dont have to create a phi node. - if (!inst.base.ty.hasCodeGenBits()) return null; + const inst_ty = self.air.typeOfIndex(inst); + if (!inst_ty.hasCodeGenBits()) return null; - const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), ""); + const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst_ty), ""); phi_node.addIncoming( break_vals.items.ptr, break_bbs.items.ptr, @@ -817,35 +822,30 @@ pub const FuncGen = struct { return phi_node; } - fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const branch = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(branch.block_inst).?; // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `br_void` instructions. - _ = self.builder.buildBr(block.parent_bb); - } else { - const val = try self.resolveInst(inst.operand); + if (self.air.typeOf(branch.result).hasCodeGenBits()) { + const val = try self.resolveInst(branch.result); // For the phi node, we need the basic blocks and the values of the // break instructions. - try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock()); - try block.break_vals.append(self.gpa(), val); - - _ = self.builder.buildBr(block.parent_bb); + try block.break_bbs.append(self.gpa, self.builder.getInsertBlock()); + try block.break_vals.append(self.gpa, val); } - return null; - } - - fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; _ = self.builder.buildBr(block.parent_bb); return null; } - fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value { - const condition_value = try self.resolveInst(inst.condition); + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const then_block = self.context().appendBasicBlock(self.llvm_func, "Then"); const else_block = self.context().appendBasicBlock(self.llvm_func, "Else"); @@ -854,38 +854,51 @@ pub const FuncGen = struct { defer self.builder.positionBuilderAtEnd(prev_block); self.builder.positionBuilderAtEnd(then_block); - try self.genBody(inst.then_body); + try self.genBody(then_body); self.builder.positionBuilderAtEnd(else_block); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - _ = self.builder.buildCondBr(condition_value, then_block, else_block); + _ = self.builder.buildCondBr(cond, then_block, else_block); return null; } - fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value { + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop"); _ = self.builder.buildBr(loop_block); self.builder.positionBuilderAtEnd(loop_block); - try self.genBody(inst.body); + try self.genBody(body); _ = self.builder.buildBr(loop_block); return null; } - fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - return self.builder.buildNot(try self.resolveInst(inst.operand), ""); + fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + + return self.builder.buildNot(operand, ""); } - fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value { _ = inst; _ = self.builder.buildUnreachable(); return null; } - fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airIsNonNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -901,12 +914,23 @@ pub const FuncGen = struct { } } - fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, ""); + fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + return self.builder.buildNot((try self.airIsNonNull(inst, operand_is_ptr)).?, ""); } - fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airOptionalPayload( + self: *FuncGen, + inst: Air.Inst.Index, + operand_is_ptr: bool, + ) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -922,61 +946,83 @@ pub const FuncGen = struct { } } - fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genAdd' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airAdd' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWAdd(lhs, rhs, "") else self.builder.buildNUWAdd(lhs, rhs, ""); } - fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genSub' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airSub' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWSub(lhs, rhs, "") else self.builder.buildNUWSub(lhs, rhs, ""); } - fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); + fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); - const signed = inst.base.ty.isSignedInt(); + const signed = inst_ty.isSignedInt(); // TODO: Should we use intcast here or just a simple bitcast? // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes - return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), ""); + return self.builder.buildIntCast2(operand, try self.dg.getLLVMType(inst_ty), llvm.Bool.fromBool(signed), ""); } - fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); - const dest_type = try self.dg.getLLVMType(inst.base.ty); + fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); + const dest_type = try self.dg.getLLVMType(inst_ty); - return self.builder.buildBitCast(val, dest_type, ""); + return self.builder.buildBitCast(operand, dest_type, ""); } - fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value { + fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty)); + const inst_ty = self.air.typeOfIndex(inst); + const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst_ty)); _ = self.builder.buildStore(arg_val, ptr_val); return self.builder.buildLoad(ptr_val, ""); } - fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; // buildAlloca expects the pointee type, not the pointer type, so assert that // a Payload.PointerSimple is passed to the alloc instruction. - const pointee_type = inst.base.ty.castPointer().?.data; + const inst_ty = self.air.typeOfIndex(inst); + const pointee_type = inst_ty.castPointer().?.data; // TODO: figure out a way to get the name of the var decl. // TODO: set alignment and volatile @@ -1007,19 +1053,26 @@ pub const FuncGen = struct { return val; } - fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.rhs); - const ptr = try self.resolveInst(inst.lhs); - _ = self.builder.buildStore(val, ptr); + fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dest_ptr = try self.resolveInst(bin_op.lhs); + const src_operand = try self.resolveInst(bin_op.rhs); + // TODO set volatile on this store properly + _ = self.builder.buildStore(src_operand, dest_ptr); return null; } - fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const ptr_val = try self.resolveInst(inst.operand); - return self.builder.buildLoad(ptr_val, ""); + fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and self.liveness.isUnused(inst)) + return null; + const ptr = try self.resolveInst(ty_op.operand); + // TODO set volatile on this load properly + return self.builder.buildLoad(ptr, ""); } - fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; const llvn_fn = self.getIntrinsic("llvm.debugtrap"); _ = self.builder.buildCall(llvn_fn, null, 0, ""); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4da320b087..7429e3c3b0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -13,6 +13,7 @@ const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const Word = u32; pub const ResultId = u32; @@ -247,6 +248,7 @@ pub const DeclGen = struct { return .{ .spv = spv, .air = undefined, + .liveness = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -259,11 +261,12 @@ pub const DeclGen = struct { } /// Generate the code for `decl`. If a reportable error occured during code generation, - /// a message is returned by this function. Callee owns the memory. If this function returns such - /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { + /// a message is returned by this function. Callee owns the memory. If this function + /// returns such a reportable error, it is valid to be called again for a different decl. + pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. - self.air = &air; + self.air = air; + self.liveness = liveness; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -297,12 +300,12 @@ pub const DeclGen = struct { return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - if (inst.value()) |val| { - return self.genConstant(inst.ty, val); + fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId { + if (self.air.value(inst)) |val| { + return self.genConstant(self.air.typeOf(inst), val); } - - return self.inst_results.get(inst).?; // Instruction does not dominate all uses! + const index = Air.refToIndex(inst).?; + return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage. } fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void { @@ -663,40 +666,40 @@ pub const DeclGen = struct { const air_tags = self.air.instructions.items(.tag); const result_id = switch (air_tags[inst]) { // zig fmt: off - .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), - .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), - .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), - .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), - - .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), - .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), - .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), - .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), - .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), - - .not => try self.genNot(inst), - - .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), - .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), - .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), - .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), - .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), - .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), - - .arg => self.genArg(), - .alloc => try self.genAlloc(inst), - .block => (try self.genBlock(inst)) orelse return, - .load => try self.genLoad(inst), - - .br => return self.genBr(inst), + .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.airBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr), + + .not => try self.airNot(inst), + + .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.airArg(), + .alloc => try self.airAlloc(inst), + .block => (try self.airBlock(inst)) orelse return, + .load => try self.airLoad(inst), + + .br => return self.airBr(inst), .breakpoint => return, - .cond_br => return self.genCondBr(inst), + .cond_br => return self.airCondBr(inst), .constant => unreachable, - .dbg_stmt => return self.genDbgStmt(inst), - .loop => return self.genLoop(inst), - .ret => return self.genRet(inst), - .store => return self.genStore(inst), - .unreach => return self.genUnreach(), + .dbg_stmt => return self.airDbgStmt(inst), + .loop => return self.airLoop(inst), + .ret => return self.airRet(inst), + .store => return self.airStore(inst), + .unreach => return self.airUnreach(), // zig fmt: on else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ @@ -707,21 +710,22 @@ pub const DeclGen = struct { try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); + const result_type_id = try self.genType(self.air.typeOfIndex(inst)); try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id, }); return result_id; } - fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); @@ -729,8 +733,8 @@ pub const DeclGen = struct { const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(ty); - assert(self.air.getType(bin_op.lhs).eql(ty)); - assert(self.air.getType(bin_op.rhs).eql(ty)); + assert(self.air.typeOf(bin_op.lhs).eql(ty)); + assert(self.air.typeOf(bin_op.rhs).eql(ty)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -744,8 +748,8 @@ pub const DeclGen = struct { return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); }, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, .float => 0, else => unreachable, @@ -759,14 +763,14 @@ pub const DeclGen = struct { return result_id; } - fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(Type.initTag(.bool)); - const op_ty = self.air.getType(bin_op.lhs); - assert(op_ty.eql(self.air.getType(bin_op.rhs))); + const op_ty = self.air.typeOf(bin_op.lhs); + assert(op_ty.eql(self.air.typeOf(bin_op.rhs))); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. @@ -782,10 +786,9 @@ pub const DeclGen = struct { .float => 0, .bool => 1, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, - else => unreachable, }; const opcode = ops[opcode_index]; @@ -793,7 +796,7 @@ pub const DeclGen = struct { return result_id; } - fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); @@ -803,8 +806,8 @@ pub const DeclGen = struct { return result_id; } - fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - const ty = self.air.getType(inst); + fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.typeOfIndex(inst); const storage_class = spec.StorageClass.Function; const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); @@ -816,12 +819,12 @@ pub const DeclGen = struct { return result_id; } - fn genArg(self: *DeclGen) ResultId { + fn airArg(self: *DeclGen) ResultId { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { + fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -841,7 +844,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -872,10 +875,10 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.getType(br.operand); + const operand_ty = self.air.typeOf(br.operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(br.operand); @@ -886,7 +889,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } - fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { + fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; @@ -912,16 +915,16 @@ pub const DeclGen = struct { try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); @@ -936,8 +939,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); @@ -952,9 +956,9 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { - const operand = inst_datas[inst].un_op; - const operand_ty = self.air.getType(operand); + fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = self.air.instructions.items(.data)[inst].un_op; + const operand_ty = self.air.typeOf(operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(operand); try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); @@ -963,11 +967,11 @@ pub const DeclGen = struct { } } - fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr_id = try self.resolve(bin_op.lhs); const src_val_id = try self.resolve(bin_op.rhs); - const lhs_ty = self.air.getType(bin_op.lhs); + const lhs_ty = self.air.typeOf(bin_op.lhs); const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } @@ -977,7 +981,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpStore, operands); } - fn genUnreach(self: *DeclGen) !void { + fn airUnreach(self: *DeclGen) !void { try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 5cf3fb15fd..dbca818297 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -774,7 +774,7 @@ pub const Context = struct { } } return Result{ .externally_managed = payload.data }; - } else return self.fail(.{ .node_offset = 0 }, "TODO implement gen for more kinds of arrays", .{}); + } else return self.fail("TODO implement gen for more kinds of arrays", .{}); }, .Int => { const info = typed_value.ty.intInfo(self.target); @@ -783,9 +783,9 @@ pub const Context = struct { try self.code.append(@intCast(u8, int_byte)); return Result.appended; } - return self.fail(.{ .node_offset = 0 }, "TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); + return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); }, - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: Implement zig type codegen for type: '{s}'", .{tag}), + else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}), } } @@ -883,7 +883,7 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = self.air.getType(inst).elemType(); + const elem_type = self.air.typeOfIndex(inst).elemType(); return self.allocLocal(elem_type); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 44442b73a3..50ad6bc1a0 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -657,11 +657,16 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { - if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + if (build_options.skip_non_native and + builtin.object_format != .coff and + builtin.object_format != .pe) + { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| { + return llvm_object.updateFunc(module, func, air, liveness); + } } const tracy = trace(@src()); defer tracy.end(); @@ -669,6 +674,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + const decl = func.owner_decl; const res = try codegen.generateFunction( &self.base, decl.srcLoc(), @@ -679,7 +685,6 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .none, ); const code = switch (res) { - .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; @@ -725,10 +730,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; - return self.finishUpdateDecl(module, func.owner_decl, code); + return self.finishUpdateDecl(module, decl, code); } -fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { +fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index cd020c1b27..4107607924 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1150,9 +1150,13 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1163,7 +1167,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, @@ -1172,9 +1176,109 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv }, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + switch (res) { + .appended => {}, + .fail => |em| { + // Clear any PIE fixups for this decl. + self.pie_fixups.shrinkRetainingCapacity(0); + // Clear any stub fixups for this decl. + self.stub_fixups.shrinkRetainingCapacity(0); + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + } + const symbol = try self.placeDecl(decl, code_buffer.items.len); + + // Calculate displacements to target addr (if any). + while (self.pie_fixups.popOrNull()) |fixup| { + assert(fixup.size == 4); + const this_addr = symbol.n_value + fixup.offset; + const target_addr = fixup.target_addr; + + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + const displacement = try math.cast(u32, target_addr - this_addr - 4); + mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); + }, + .aarch64 => { + // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). + { + const inst = code_buffer.items[fixup.offset..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.pc_relative_address, + ), inst); + const this_page = @intCast(i32, this_addr >> 12); + const target_page = @intCast(i32, target_addr >> 12); + const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); + parsed.immhi = @truncate(u19, pages >> 2); + parsed.immlo = @truncate(u2, pages); + } + { + const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.load_store_register, + ), inst); + const narrowed = @truncate(u12, target_addr); + const offset = try math.divExact(u12, narrowed, 8); + parsed.offset = offset; + } + }, + else => unreachable, // unsupported target architecture + } + } + + // Resolve stubs (if any) + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stubs = text_segment.sections.items[self.stubs_section_index.?]; + for (self.stub_fixups.items) |fixup| { + const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; + const text_addr = symbol.n_value + fixup.start; + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + assert(stub_addr >= text_addr + fixup.len); + const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); + const placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; + mem.writeIntSliceLittle(u32, placeholder, displacement); + }, + .aarch64 => { + assert(stub_addr >= text_addr); + const displacement = try math.cast(i28, stub_addr - text_addr); + const placeholder = code_buffer.items[fixup.start..][0..fixup.len]; + mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); + }, + else => unreachable, // unsupported target architecture + } + if (!fixup.already_defined) { + try self.writeStub(fixup.symbol); + try self.writeStubInStubHelper(fixup.symbol); + try self.writeLazySymbolPointer(fixup.symbol); + + self.rebase_info_dirty = true; + self.lazy_binding_info_dirty = true; + } + } + self.stub_fixups.shrinkRetainingCapacity(0); + + try self.writeCode(symbol, code_buffer.items); - return self.finishUpdateDecl(module, decl, res); + if (debug_buffers) |db| { + try self.d_sym.?.commitDeclDebugInfo( + self.base.allocator, + module, + decl, + db, + self.base.options.target, + ); + } + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { @@ -1194,9 +1298,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1207,7 +1315,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, @@ -1224,33 +1332,37 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); - return self.finishUpdateDecl(module, decl, res); -} - -fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { - // Clear any PIE fixups for this decl. - self.pie_fixups.shrinkRetainingCapacity(0); - // Clear any stub fixups for this decl. - self.stub_fixups.shrinkRetainingCapacity(0); decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, }; + const symbol = try self.placeDecl(decl, code.len); + assert(self.pie_fixups.items.len == 0); + assert(self.stub_fixups.items.len == 0); + try self.writeCode(symbol, code); + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); +} + +fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 { const required_alignment = decl.ty.abiAlignment(self.base.options.target); assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes() const symbol = &self.locals.items[decl.link.macho.local_sym_index]; if (decl.link.macho.size != 0) { const capacity = decl.link.macho.capacity(self.*); - const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); + const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); if (need_realloc) { - const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment); + const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); @@ -1265,10 +1377,10 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code } symbol.n_value = vaddr; - } else if (code.len < decl.link.macho.size) { - self.shrinkTextBlock(&decl.link.macho, code.len); + } else if (code_len < decl.link.macho.size) { + self.shrinkTextBlock(&decl.link.macho, code_len); } - decl.link.macho.size = code.len; + decl.link.macho.size = code_len; const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)}); defer self.base.allocator.free(new_name); @@ -1286,7 +1398,7 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code defer self.base.allocator.free(decl_name); const name_str_index = try self.makeString(decl_name); - const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); + const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr }); @@ -1311,96 +1423,15 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code try self.writeOffsetTableEntry(decl.link.macho.offset_table_index); } - // Calculate displacements to target addr (if any). - while (self.pie_fixups.popOrNull()) |fixup| { - assert(fixup.size == 4); - const this_addr = symbol.n_value + fixup.offset; - const target_addr = fixup.target_addr; - - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - const displacement = try math.cast(u32, target_addr - this_addr - 4); - mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); - }, - .aarch64 => { - // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). - { - const inst = code_buffer.items[fixup.offset..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.pc_relative_address, - ), inst); - const this_page = @intCast(i32, this_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); - parsed.immhi = @truncate(u19, pages >> 2); - parsed.immlo = @truncate(u2, pages); - } - { - const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.load_store_register, - ), inst); - const narrowed = @truncate(u12, target_addr); - const offset = try math.divExact(u12, narrowed, 8); - parsed.offset = offset; - } - }, - else => unreachable, // unsupported target architecture - } - } - - // Resolve stubs (if any) - const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; - const stubs = text_segment.sections.items[self.stubs_section_index.?]; - for (self.stub_fixups.items) |fixup| { - const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; - const text_addr = symbol.n_value + fixup.start; - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - assert(stub_addr >= text_addr + fixup.len); - const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); - var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; - mem.writeIntSliceLittle(u32, placeholder, displacement); - }, - .aarch64 => { - assert(stub_addr >= text_addr); - const displacement = try math.cast(i28, stub_addr - text_addr); - var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; - mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); - }, - else => unreachable, // unsupported target architecture - } - if (!fixup.already_defined) { - try self.writeStub(fixup.symbol); - try self.writeStubInStubHelper(fixup.symbol); - try self.writeLazySymbolPointer(fixup.symbol); - - self.rebase_info_dirty = true; - self.lazy_binding_info_dirty = true; - } - } - self.stub_fixups.shrinkRetainingCapacity(0); + return symbol; +} +fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void { + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_section = text_segment.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - - if (debug_buffers) |*db| { - try self.d_sym.?.commitDeclDebugInfo( - self.base.allocator, - module, - decl, - db, - self.base.options.target, - ); - } - - // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bc9e560582..17b656a06c 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -51,7 +51,12 @@ base: link.File, /// This linker backend does not try to incrementally link output SPIR-V code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function. -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{}, + +const DeclGenContext = struct { + air: Air, + liveness: Liveness, +}; pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV { const spirv = try gpa.create(SpirV); @@ -181,10 +186,15 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { var decl_gen = codegen.DeclGen.init(&spv); defer decl_gen.deinit(); - for (self.decl_table.keys()) |decl| { + var it = self.decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; if (!decl.has_tv) continue; - if (try decl_gen.gen(decl)) |msg| { + const air = entry.value_ptr.air; + const liveness = entry.value_ptr.liveness; + + if (try decl_gen.gen(decl, air, liveness)) |msg| { try module.failed_decls.put(module.gpa, decl, msg); return; // TODO: Attempt to generate more decls? } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 81e50c46b6..d9139a178c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -250,6 +250,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { var context = codegen.Context{ .gpa = self.base.allocator, + .air = undefined, + .liveness = undefined, .values = .{}, .code = fn_data.code.toManaged(self.base.allocator), .func_type_data = fn_data.functype.toManaged(self.base.allocator), -- cgit v1.2.3 From 95756299af77a83564c2dbae09884be20ffe0c5c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:49:46 -0700 Subject: stage2: fix compile errors in LLVM backend --- src/codegen/llvm.zig | 121 +++++++++++++++++++++++++++++++-------------------- src/link/MachO.zig | 13 +++++- src/link/Wasm.zig | 12 ++++- 3 files changed, 95 insertions(+), 51 deletions(-) (limited to 'src/link') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 511c3fabf2..81484e93db 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -276,10 +276,71 @@ pub const Object = struct { } } - pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); + pub fn updateFunc( + self: *Object, + module: *Module, + func: *Module.Fn, + air: Air, + liveness: Liveness, + ) !void { + var dg: DeclGen = .{ + .object = self, + .module = module, + .decl = func.owner_decl, + .err_msg = null, + .gpa = module.gpa, + }; + + const llvm_func = try dg.resolveLLVMFunction(func.owner_decl); + + // This gets the LLVM values from the function and stores them in `dg.args`. + const fn_param_len = func.owner_decl.ty.fnParamLen(); + var args = try dg.gpa.alloc(*const llvm.Value, fn_param_len); + for (args) |*arg, i| { + arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i)); + } + + // We remove all the basic blocks of a function to support incremental + // compilation! + // TODO: remove all basic blocks if functions can have more than one + if (llvm_func.getFirstBasicBlock()) |bb| { + bb.deleteBasicBlock(); + } + + const builder = dg.context().createBuilder(); + + const entry_block = dg.context().appendBasicBlock(llvm_func, "Entry"); + builder.positionBuilderAtEnd(entry_block); + + var fg: FuncGen = .{ + .gpa = dg.gpa, + .air = air, + .liveness = liveness, + .dg = &dg, + .builder = builder, + .args = args, + .arg_index = 0, + .func_inst_table = .{}, + .entry_block = entry_block, + .latest_alloca_inst = null, + .llvm_func = llvm_func, + .blocks = .{}, + }; + defer fg.deinit(); + + fg.genBody(air.getMainBody()) catch |err| switch (err) { + error.CodegenFail => { + func.owner_decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, func.owner_decl, dg.err_msg.?); + dg.err_msg = null; + return; + }, + else => |e| return e, + }; + } + + pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { var dg: DeclGen = .{ .object = self, .module = module, @@ -330,45 +391,8 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val }); if (decl.val.castTag(.function)) |func_payload| { - const func = func_payload.data; - - const llvm_func = try self.resolveLLVMFunction(func.owner_decl); - - // This gets the LLVM values from the function and stores them in `self.args`. - const fn_param_len = func.owner_decl.ty.fnParamLen(); - var args = try self.gpa.alloc(*const llvm.Value, fn_param_len); - - for (args) |*arg, i| { - arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i)); - } - - // We remove all the basic blocks of a function to support incremental - // compilation! - // TODO: remove all basic blocks if functions can have more than one - if (llvm_func.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); - } - - const builder = self.context().createBuilder(); - - const entry_block = self.context().appendBasicBlock(llvm_func, "Entry"); - builder.positionBuilderAtEnd(entry_block); - - var fg: FuncGen = .{ - .gpa = self.gpa, - .dg = self, - .builder = builder, - .args = args, - .arg_index = 0, - .func_inst_table = .{}, - .entry_block = entry_block, - .latest_alloca_inst = null, - .llvm_func = llvm_func, - .blocks = .{}, - }; - defer fg.deinit(); - - try fg.genBody(func.body); + _ = func_payload; + @panic("TODO llvm backend genDecl function pointer"); } else if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try self.resolveLLVMFunction(extern_fn.data); } else { @@ -596,6 +620,8 @@ pub const DeclGen = struct { pub const FuncGen = struct { gpa: *Allocator, dg: *DeclGen, + air: Air, + liveness: Liveness, builder: *const llvm.Builder, @@ -649,14 +675,15 @@ pub const FuncGen = struct { if (self.air.value(inst)) |val| { return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self); } - if (self.func_inst_table.get(inst)) |value| return value; + const inst_index = Air.refToIndex(inst).?; + if (self.func_inst_table.get(inst_index)) |value| return value; return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{}); } - fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void { + fn genBody(self: *FuncGen, body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void { const air_tags = self.air.instructions.items(.tag); - for (body.instructions) |inst| { + for (body) |inst| { const opt_value = switch (air_tags[inst]) { .add => try self.airAdd(inst), .sub => try self.airSub(inst), @@ -828,8 +855,8 @@ pub const FuncGen = struct { // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (self.air.typeOf(branch.result).hasCodeGenBits()) { - const val = try self.resolveInst(branch.result); + if (self.air.typeOf(branch.operand).hasCodeGenBits()) { + const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the // break instructions. diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 4107607924..02ea5856f4 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -30,6 +30,7 @@ const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); const CodeSignature = @import("MachO/CodeSignature.zig"); const Zld = @import("MachO/Zld.zig"); +const llvm_backend = @import("../codegen/llvm.zig"); usingnamespace @import("MachO/commands.zig"); @@ -37,6 +38,9 @@ pub const base_tag: File.Tag = File.Tag.macho; base: File, +/// If this is not null, an object file is created by LLVM and linked with LLD afterwards. +llvm_object: ?*llvm_backend.Object = null, + /// Debug symbols bundle (or dSym). d_sym: ?DebugSymbols = null, @@ -347,8 +351,13 @@ pub const SrcFn = struct { pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO { assert(options.object_format == .macho); - if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO - if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO + if (build_options.have_llvm and options.use_llvm) { + const self = try createEmpty(allocator, options); + errdefer self.base.destroy(); + + self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options); + return self; + } const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = false, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index d9139a178c..f478d2ee47 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -19,12 +19,15 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const llvm_backend = @import("../codegen/llvm.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; base: link.File, +/// If this is not null, an object file is created by LLVM and linked with LLD afterwards. +llvm_object: ?*llvm_backend.Object = null, /// List of all function Decls to be written to the output file. The index of /// each Decl in this list at the time of writing the binary is used as the /// function index. In the event where ext_funcs' size is not 0, the index of @@ -114,8 +117,13 @@ pub const DeclBlock = struct { pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm { assert(options.object_format == .wasm); - if (options.use_llvm) return error.LLVM_BackendIsTODO_ForWasm; // TODO - if (options.use_lld) return error.LLD_LinkingIsTODO_ForWasm; // TODO + if (build_options.have_llvm and options.use_llvm) { + const self = try createEmpty(allocator, options); + errdefer self.base.destroy(); + + self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options); + return self; + } // TODO: read the file and keep valid parts instead of truncating const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true }); -- cgit v1.2.3