From 5d6f7b44c19b064a543b0c1eecb6ef5c671b612e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jul 2021 20:42:47 -0700 Subject: stage2: rework AIR memory layout This commit changes the AIR file and the documentation of the memory layout. The actual work of modifying the surrounding code (in Sema and codegen) is not yet done. --- src/codegen/wasm.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 3476ab2ce6..45b00ddfad 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,8 +9,7 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const ir = @import("../air.zig"); -const Inst = ir.Inst; +const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); -- cgit v1.2.3 From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 762 +++++++++++++++++++++++++------------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 +++--- src/link.zig | 34 ++- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++++----------------- src/link/MachO.zig | 55 ++++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1023 insertions(+), 713 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), - - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), - - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), - - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on - - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), + + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), + + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), + + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on + + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - } - - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; + table.deinit(gpa); +} +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; -- cgit v1.2.3 From 424f260f850cb22637888bbfdf5bfaf9c08a4dae Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 14:48:51 +0200 Subject: Fix wasm-related compile errors: - Update `fail()` to not require a `srcLoc`. This brings it in line with other backends, and we were always passing 'node_offset = 0', anyway. - Fix unused local due to change of architecture wrt function/decl generation. - Replace all old instructions to indexes within the function signatures. --- src/codegen/wasm.zig | 221 +++++++++++++++++++++++++-------------------------- src/link/Wasm.zig | 17 ++-- 2 files changed, 118 insertions(+), 120 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 912577a358..33ab07faf3 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -25,7 +25,7 @@ const WValue = union(enum) { /// Index of the local variable local: u32, /// Instruction holding a constant `Value` - constant: *Inst, + constant: Air.Inst.Index, /// Offset position in the list of bytecode instructions code_offset: usize, /// Used for variables that create multiple locals on the stack when allocated @@ -484,7 +484,7 @@ pub const Result = union(enum) { }; /// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(*Inst, WValue); +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -497,8 +497,8 @@ pub const Context = struct { gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, - /// Mapping from *Inst.Block to block ids - blocks: std.AutoArrayHashMapUnmanaged(*Inst.Block, u32) = .{}, + /// Mapping from Air.Inst.Index to block ids + blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, /// `bytes` contains the wasm bytecode belonging to the 'code' section. code: ArrayList(u8), /// Contains the generated function type bytecode for the current function @@ -538,7 +538,8 @@ pub const Context = struct { } /// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig - fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError { + fn fail(self: *Context, comptime fmt: []const u8, args: anytype) InnerError { + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args); return error.CodegenFail; @@ -546,7 +547,7 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: *Inst) WValue { + fn resolveInst(self: Context, inst: Air.Inst) Index { if (!inst.ty.hasCodeGenBits()) return .none; if (inst.value()) |_| { @@ -557,48 +558,45 @@ pub const Context = struct { } /// Using a given `Type`, returns the corresponding wasm Valtype - fn typeToValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!wasm.Valtype { + fn typeToValtype(self: *Context, ty: Type) InnerError!wasm.Valtype { return switch (ty.zigTypeTag()) { .Float => blk: { const bits = ty.floatBits(self.target); if (bits == 16 or bits == 32) break :blk wasm.Valtype.f32; if (bits == 64) break :blk wasm.Valtype.f64; - return self.fail(src, "Float bit size not supported by wasm: '{d}'", .{bits}); + return self.fail("Float bit size not supported by wasm: '{d}'", .{bits}); }, .Int => blk: { const info = ty.intInfo(self.target); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 64) break :blk wasm.Valtype.i64; - return self.fail(src, "Integer bit size not supported by wasm: '{d}'", .{info.bits}); + return self.fail("Integer bit size not supported by wasm: '{d}'", .{info.bits}); }, .Enum => switch (ty.tag()) { .enum_simple => wasm.Valtype.i32, - else => self.typeToValtype( - src, - ty.cast(Type.Payload.EnumFull).?.data.tag_ty, - ), + else => self.typeToValtype(ty.cast(Type.Payload.EnumFull).?.data.tag_ty), }, .Bool, .Pointer, .ErrorSet, => wasm.Valtype.i32, .Struct, .ErrorUnion => unreachable, // Multi typed, must be handled individually. - else => self.fail(src, "TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), + else => self.fail("TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), }; } /// Using a given `Type`, returns the byte representation of its wasm value type - fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { - return wasm.valtype(try self.typeToValtype(src, ty)); + fn genValtype(self: *Context, ty: Type) InnerError!u8 { + return wasm.valtype(try self.typeToValtype(ty)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type - fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { + fn genBlockType(self: *Context, ty: Type) InnerError!u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => self.genValtype(src, ty), + else => self.genValtype(ty), }; } @@ -612,7 +610,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.src, inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack } } @@ -682,7 +680,7 @@ pub const Context = struct { ty.fnParamTypes(params); for (params) |param_type| { // Can we maybe get the source index of each param? - const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type); + const val_type = try self.genValtype(param_type); try writer.writeByte(val_type); } } @@ -691,13 +689,10 @@ pub const Context = struct { const return_type = ty.fnReturnType(); switch (return_type.zigTypeTag()) { .Void, .NoReturn => try leb.writeULEB128(writer, @as(u32, 0)), - .Struct => return self.fail(.{ .node_offset = 0 }, "TODO: Implement struct as return type for wasm", .{}), - .Optional => return self.fail(.{ .node_offset = 0 }, "TODO: Implement optionals as return type for wasm", .{}), + .Struct => return self.fail("TODO: Implement struct as return type for wasm", .{}), + .Optional => return self.fail("TODO: Implement optionals as return type for wasm", .{}), .ErrorUnion => { - const val_type = try self.genValtype( - .{ .node_offset = 0 }, - return_type.errorUnionChild(), - ); + const val_type = try self.genValtype(return_type.errorUnionChild()); // write down the amount of return values try leb.writeULEB128(writer, @as(u32, 2)); @@ -707,22 +702,21 @@ pub const Context = struct { else => { try leb.writeULEB128(writer, @as(u32, 1)); // Can we maybe get the source index of the return type? - const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type); + const val_type = try self.genValtype(return_type); try writer.writeByte(val_type); }, } } pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + _ = func; try self.genFunctype(); - - // Write instructions // TODO: check for and handle death of instructions // Reserve space to write the size after generating the code as well as space for locals count try self.code.resize(10); - try self.genBody(func.body); + try self.genBody(self.air.getMainBody()); // finally, write our local types at the 'offset' position { @@ -753,7 +747,7 @@ pub const Context = struct { return Result.appended; } - /// Generates the wasm bytecode for the function declaration belonging to `Context` + /// Generates the wasm bytecode for the declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { @@ -793,58 +787,59 @@ pub const Context = struct { } } - fn genInst(self: *Context, inst: *Inst) InnerError!WValue { - return switch (inst.tag) { - .add => self.genBinOp(inst.castTag(.add).?, .add), - .alloc => self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(inst.castTag(.arg).?), - .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - .block => self.genBlock(inst.castTag(.block).?), - .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - .br => self.genBr(inst.castTag(.br).?), - .call => self.genCall(inst.castTag(.call).?), - .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => WValue.none, - .div => self.genBinOp(inst.castTag(.div).?, .div), - .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - .load => self.genLoad(inst.castTag(.load).?), - .loop => self.genLoop(inst.castTag(.loop).?), - .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - .not => self.genNot(inst.castTag(.not).?), - .ret => self.genRet(inst.castTag(.ret).?), - .retvoid => WValue.none, - .store => self.genStore(inst.castTag(.store).?), - .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - .unreach => self.genUnreachable(inst.castTag(.unreach).?), - .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .xor => self.genBinOp(inst.castTag(.xor).?, .xor), - else => self.fail(.{ .node_offset = 0 }, "TODO: Implement wasm inst: {s}", .{inst.tag}), + fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { + const air_tags = self.air.instructions.items(.tag); + return switch (air_tags[inst]) { + // .add => self.genBinOp(inst.castTag(.add).?, .add), + // .alloc => self.genAlloc(inst.castTag(.alloc).?), + // .arg => self.genArg(inst.castTag(.arg).?), + // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), + // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), + // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), + // .block => self.genBlock(inst.castTag(.block).?), + // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), + // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), + // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), + // .br => self.genBr(inst.castTag(.br).?), + // .call => self.genCall(inst.castTag(.call).?), + // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), + // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), + // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), + // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), + // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), + // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), + // .condbr => self.genCondBr(inst.castTag(.condbr).?), + // .constant => unreachable, + // .dbg_stmt => WValue.none, + // .div => self.genBinOp(inst.castTag(.div).?, .div), + // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), + // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), + // .load => self.genLoad(inst.castTag(.load).?), + // .loop => self.genLoop(inst.castTag(.loop).?), + // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), + // .not => self.genNot(inst.castTag(.not).?), + // .ret => self.genRet(inst.castTag(.ret).?), + // .retvoid => WValue.none, + // .store => self.genStore(inst.castTag(.store).?), + // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), + // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), + // .unreach => self.genUnreachable(inst.castTag(.unreach).?), + // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), + // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } - fn genBody(self: *Context, body: ir.Body) InnerError!void { - for (body.instructions) |inst| { + fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { + for (body) |inst| { const result = try self.genInst(inst); try self.values.putNoClobber(self.gpa, inst, result); } } - fn genRet(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { // TODO: Implement tail calls const operand = self.resolveInst(inst.operand); try self.emitWValue(operand); @@ -852,7 +847,7 @@ pub const Context = struct { return .none; } - fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue { + fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const func_val = inst.func.value().?; const target: *Decl = blk: { @@ -861,7 +856,7 @@ pub const Context = struct { } else if (func_val.castTag(.extern_fn)) |ext_fn| { break :blk ext_fn.data; } - return self.fail(inst.base.src, "Expected a function, but instead found type '{s}'", .{func_val.tag()}); + return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; for (inst.args) |arg| { @@ -881,12 +876,12 @@ pub const Context = struct { return .none; } - fn genAlloc(self: *Context, inst: *Inst.NoOp) InnerError!WValue { + fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const elem_type = inst.base.ty.elemType(); return self.allocLocal(elem_type); } - fn genStore(self: *Context, inst: *Inst.BinOp) InnerError!WValue { + fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const writer = self.code.writer(); const lhs = self.resolveInst(inst.lhs); @@ -924,18 +919,18 @@ pub const Context = struct { return .none; } - fn genLoad(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } - fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue { + fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; // arguments share the index with locals defer self.local_index += 1; return WValue{ .local = self.local_index }; } - fn genBinOp(self: *Context, inst: *Inst.BinOp, op: Op) InnerError!WValue { + fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { const lhs = self.resolveInst(inst.lhs); const rhs = self.resolveInst(inst.rhs); @@ -952,21 +947,21 @@ pub const Context = struct { const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.src, inst.base.ty), + .valtype1 = try self.typeToValtype(inst.base.ty), .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; } - fn emitConstant(self: *Context, src: LazySrcLoc, value: Value, ty: Type) InnerError!void { + fn emitConstant(self: *Context, value: Value, ty: Type) InnerError!void { const writer = self.code.writer(); switch (ty.zigTypeTag()) { .Int => { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant @@ -985,14 +980,14 @@ pub const Context = struct { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant switch (ty.floatBits(self.target)) { 0...32 => try writer.writeIntLittle(u32, @bitCast(u32, value.toFloat(f32))), 64 => try writer.writeIntLittle(u64, @bitCast(u64, value.toFloat(f64))), - else => |bits| return self.fail(src, "Wasm TODO: emitConstant for float with {d} bits", .{bits}), + else => |bits| return self.fail("Wasm TODO: emitConstant for float with {d} bits", .{bits}), } }, .Pointer => { @@ -1009,7 +1004,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_load)); try leb.writeULEB128(writer, @as(u32, 0)); try leb.writeULEB128(writer, @as(u32, 0)); - } else return self.fail(src, "Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); + } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); }, .Void => {}, .Enum => { @@ -1023,7 +1018,7 @@ pub const Context = struct { const enum_full = ty.cast(Type.Payload.EnumFull).?.data; if (enum_full.values.count() != 0) { const tag_val = enum_full.values.keys()[field_index.data]; - try self.emitConstant(src, tag_val, enum_full.tag_ty); + try self.emitConstant(tag_val, enum_full.tag_ty); } else { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, field_index.data); @@ -1034,7 +1029,7 @@ pub const Context = struct { } else { var int_tag_buffer: Type.Payload.Bits = undefined; const int_tag_ty = ty.intTagType(&int_tag_buffer); - try self.emitConstant(src, value, int_tag_ty); + try self.emitConstant(value, int_tag_ty); } }, .ErrorSet => { @@ -1048,12 +1043,12 @@ pub const Context = struct { const payload_type = ty.errorUnionChild(); if (value.getError()) |_| { // write the error value - try self.emitConstant(src, data, error_type); + try self.emitConstant(data, error_type); // no payload, so write a '0' const const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, payload_type), + .valtype1 = try self.typeToValtype(payload_type), }); try writer.writeByte(wasm.opcode(opcode)); try leb.writeULEB128(writer, @as(u32, 0)); @@ -1062,15 +1057,15 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, @as(u32, 0)); // after the error code, we emit the payload - try self.emitConstant(src, data, payload_type); + try self.emitConstant(data, payload_type); } }, - else => |zig_type| return self.fail(src, "Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), + else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), } } - fn genBlock(self: *Context, block: *Inst.Block) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.src, block.base.ty); + fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { + const block_ty = try self.genBlockType(block.base.ty); try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump @@ -1100,8 +1095,8 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: *Inst.Loop) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.src, loop.base.ty); + fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { + const loop_ty = try self.genBlockType(loop.base.ty); try self.startBlock(.loop, loop_ty, null); try self.genBody(loop.body); @@ -1115,7 +1110,7 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: *Inst.CondBr) InnerError!WValue { + fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { const condition = self.resolveInst(condbr.condition); const writer = self.code.writer(); @@ -1131,7 +1126,7 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty); + const block_ty = try self.genBlockType(condbr.base.ty); try self.startBlock(.block, block_ty, offset); // we inserted the block in front of the condition @@ -1149,7 +1144,7 @@ pub const Context = struct { return .none; } - fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue { + fn genCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { // save offset, so potential conditions can insert blocks in front of // the comparison that we can later jump back to const offset = self.code.items.len; @@ -1168,7 +1163,7 @@ pub const Context = struct { break :blk inst.lhs.ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.base.src, inst.lhs.ty), + .valtype1 = try self.typeToValtype(inst.lhs.ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1183,7 +1178,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: *Inst.Br) InnerError!WValue { + fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { // if operand has codegen bits we should break with a value if (br.operand.ty.hasCodeGenBits()) { const operand = self.resolveInst(br.operand); @@ -1200,7 +1195,7 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: *Inst.UnOp) InnerError!WValue { + fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { const offset = self.code.items.len; const operand = self.resolveInst(not.operand); @@ -1217,7 +1212,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue { + fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { _ = self; _ = breakpoint; // unsupported by wasm itself. Can be implemented once we support DWARF @@ -1225,27 +1220,27 @@ pub const Context = struct { return .none; } - fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue { + fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { _ = unreach; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: *Inst.UnOp) InnerError!WValue { + fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { return self.resolveInst(bitcast.operand); } - fn genStructFieldPtr(self: *Context, inst: *Inst.StructFieldPtr) InnerError!WValue { + fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const struct_ptr = self.resolveInst(inst.struct_ptr); return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; } - fn genSwitchBr(self: *Context, inst: *Inst.SwitchBr) InnerError!WValue { + fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const target = self.resolveInst(inst.target); const target_ty = inst.target.ty; const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.src, inst.base.ty); + const blocktype = try self.genBlockType(inst.base.ty); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1282,7 +1277,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: *Inst.UnOp, opcode: wasm.Opcode) InnerError!WValue { + fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const operand = self.resolveInst(inst.operand); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1298,7 +1293,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genUnwrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const operand = self.resolveInst(inst.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` @@ -1307,7 +1302,7 @@ pub const Context = struct { return WValue{ .local = operand.multi_value.index + 1 }; } - fn genWrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index be6ad78701..1387615d15 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -228,7 +228,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + return self.finishUpdateDecl(decl, result, &context); } // Generate code for the Decl, storing it in memory to be later written to @@ -270,18 +270,21 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + + return self.finishUpdateDecl(decl, result, &context); } -fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { - const code: []const u8 = switch (result) { - .appended => @as([]const u8, context.code.items), - .externally_managed => |payload| payload, - }; +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result, context: *codegen.Context) !void { + const fn_data: *FnData = &decl.fn_link.wasm; fn_data.code = context.code.toUnmanaged(); fn_data.functype = context.func_type_data.toUnmanaged(); + const code: []const u8 = switch (result) { + .appended => @as([]const u8, fn_data.code.items), + .externally_managed => |payload| payload, + }; + const block = &decl.link.wasm; if (decl.ty.zigTypeTag() == .Fn) { // as locals are patched afterwards, the offsets of funcidx's are off, -- cgit v1.2.3 From 2438f61f1c37aefa16852130370df44b3fabf785 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 22:43:06 +0200 Subject: Refactor entire wasm-backend to use new AIR memory layout --- src/codegen/wasm.zig | 275 ++++++++++++++++++++++++++++++--------------------- src/link/Wasm.zig | 2 +- 2 files changed, 161 insertions(+), 116 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 33ab07faf3..5cf3fb15fd 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -483,8 +483,8 @@ pub const Result = union(enum) { externally_managed: []const u8, }; -/// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); +/// Hashmap to store generated `WValue` for each `Air.Inst.Ref` +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Ref, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -495,7 +495,7 @@ pub const Context = struct { air: Air, liveness: Liveness, gpa: *mem.Allocator, - /// Table to save `WValue`'s generated by an `Inst` + /// Table to save `WValue`'s generated by an `Air.Inst` values: ValueTable, /// Mapping from Air.Inst.Index to block ids blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, @@ -547,14 +547,15 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: Air.Inst) Index { - if (!inst.ty.hasCodeGenBits()) return .none; + fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue { + const ref_type = self.air.getRefType(ref); + if (ref_type.hasCodeGenBits()) return .none; - if (inst.value()) |_| { - return WValue{ .constant = inst }; + if (self.air.instructions.items(.tag)[@enumToInt(ref)] == .constant) { + return WValue{ .constant = @enumToInt(ref) }; } - return self.values.get(inst).?; // Instruction does not dominate all uses! + return self.values.get(ref).?; // Instruction does not dominate all uses! } /// Using a given `Type`, returns the corresponding wasm Valtype @@ -610,7 +611,12 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |index| { + const ty_pl = self.air.instructions.items(.data)[index].ty_pl; + const value = self.air.values[ty_pl.payload]; + // create a new constant onto the stack + try self.emitConstant(value, self.air.getRefType(ty_pl.ty)); + }, } } @@ -626,10 +632,7 @@ pub const Context = struct { const fields_len = @intCast(u32, struct_data.fields.count()); try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len); for (struct_data.fields.values()) |*value| { - const val_type = try self.genValtype( - .{ .node_offset = struct_data.node_offset }, - value.ty, - ); + const val_type = try self.genValtype(value.ty); self.locals.appendAssumeCapacity(val_type); self.local_index += 1; } @@ -640,7 +643,7 @@ pub const Context = struct { }, .ErrorUnion => { const payload_type = ty.errorUnionChild(); - const val_type = try self.genValtype(.{ .node_offset = 0 }, payload_type); + const val_type = try self.genValtype(payload_type); // we emit the error value as the first local, and the payload as the following. // The first local is also used to find the index of the error and payload. @@ -657,7 +660,7 @@ pub const Context = struct { } }; }, else => { - const valtype = try self.genValtype(.{ .node_offset = 0 }, ty); + const valtype = try self.genValtype(ty); try self.locals.append(self.gpa, valtype); self.local_index += 1; return WValue{ .local = initial_index }; @@ -708,8 +711,7 @@ pub const Context = struct { } } - pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { - _ = func; + pub fn genFunc(self: *Context) InnerError!Result { try self.genFunctype(); // TODO: check for and handle death of instructions @@ -790,44 +792,43 @@ pub const Context = struct { fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { const air_tags = self.air.instructions.items(.tag); return switch (air_tags[inst]) { - // .add => self.genBinOp(inst.castTag(.add).?, .add), - // .alloc => self.genAlloc(inst.castTag(.alloc).?), - // .arg => self.genArg(inst.castTag(.arg).?), - // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - // .block => self.genBlock(inst.castTag(.block).?), - // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - // .br => self.genBr(inst.castTag(.br).?), - // .call => self.genCall(inst.castTag(.call).?), - // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - // .condbr => self.genCondBr(inst.castTag(.condbr).?), - // .constant => unreachable, - // .dbg_stmt => WValue.none, - // .div => self.genBinOp(inst.castTag(.div).?, .div), - // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - // .load => self.genLoad(inst.castTag(.load).?), - // .loop => self.genLoop(inst.castTag(.loop).?), - // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - // .not => self.genNot(inst.castTag(.not).?), - // .ret => self.genRet(inst.castTag(.ret).?), - // .retvoid => WValue.none, - // .store => self.genStore(inst.castTag(.store).?), - // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - // .unreach => self.genUnreachable(inst.castTag(.unreach).?), - // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + .add => self.genBinOp(inst, .add), + .alloc => self.genAlloc(inst), + .arg => self.genArg(inst), + .bit_and => self.genBinOp(inst, .@"and"), + .bitcast => self.genBitcast(inst), + .bit_or => self.genBinOp(inst, .@"or"), + .block => self.genBlock(inst), + .bool_and => self.genBinOp(inst, .@"and"), + .bool_or => self.genBinOp(inst, .@"or"), + .breakpoint => self.genBreakpoint(inst), + .br => self.genBr(inst), + .call => self.genCall(inst), + .cmp_eq => self.genCmp(inst, .eq), + .cmp_gte => self.genCmp(inst, .gte), + .cmp_gt => self.genCmp(inst, .gt), + .cmp_lte => self.genCmp(inst, .lte), + .cmp_lt => self.genCmp(inst, .lt), + .cmp_neq => self.genCmp(inst, .neq), + .cond_br => self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => WValue.none, + .div => self.genBinOp(inst, .div), + .is_err => self.genIsErr(inst, .i32_ne), + .is_non_err => self.genIsErr(inst, .i32_eq), + .load => self.genLoad(inst), + .loop => self.genLoop(inst), + .mul => self.genBinOp(inst, .mul), + .not => self.genNot(inst), + .ret => self.genRet(inst), + .store => self.genStore(inst), + .struct_field_ptr => self.genStructFieldPtr(inst), + .sub => self.genBinOp(inst, .sub), + .switch_br => self.genSwitchBr(inst), + .unreach => self.genUnreachable(inst), + .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst), + .wrap_errunion_payload => self.genWrapErrUnionPayload(inst), + .xor => self.genBinOp(inst, .xor), else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } @@ -835,22 +836,27 @@ pub const Context = struct { fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { const result = try self.genInst(inst); - try self.values.putNoClobber(self.gpa, inst, result); + try self.values.putNoClobber(self.gpa, @intToEnum(Air.Inst.Ref, inst), result); } } fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - // TODO: Implement tail calls - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); try self.emitWValue(operand); try self.code.append(wasm.opcode(.@"return")); return .none; } fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const func_val = inst.func.value().?; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = self.air.extra[extra.end..][0..extra.data.args_len]; const target: *Decl = blk: { + const ty_pl = self.air.instructions.items(.data)[@enumToInt(pl_op.operand)].ty_pl; + const func_val = self.air.values[ty_pl.payload]; + if (func_val.castTag(.function)) |func| { break :blk func.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |ext_fn| { @@ -859,8 +865,8 @@ pub const Context = struct { return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; - for (inst.args) |arg| { - const arg_val = self.resolveInst(arg); + for (args) |arg| { + const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); try self.emitWValue(arg_val); } @@ -877,15 +883,16 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = inst.base.ty.elemType(); + const elem_type = self.air.getType(inst).elemType(); return self.allocLocal(elem_type); } fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; const writer = self.code.writer(); - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); switch (lhs) { .multi_value => |multi_value| switch (rhs) { @@ -893,7 +900,7 @@ pub const Context = struct { // we simply assign the local_index to the rhs one. // This allows us to update struct fields without having to individually // set each local as each field's index will be calculated off the struct's base index - .multi_value => self.values.put(self.gpa, inst.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! + .multi_value => self.values.put(self.gpa, bin_op.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! .constant, .none => { // emit all values onto the stack if constant try self.emitWValue(rhs); @@ -920,7 +927,8 @@ pub const Context = struct { } fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { @@ -931,8 +939,9 @@ pub const Context = struct { } fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); // it's possible for both lhs and/or rhs to return an offset as well, // in which case we return the first offset occurance we find. @@ -945,10 +954,11 @@ pub const Context = struct { try self.emitWValue(lhs); try self.emitWValue(rhs); + const bin_ty = self.air.getRefType(bin_op.lhs); const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.ty), - .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, + .valtype1 = try self.typeToValtype(bin_ty), + .signedness = if (bin_ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; @@ -1064,14 +1074,17 @@ pub const Context = struct { } } - fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.ty); + fn genBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty)); + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump // to when breaking out. - try self.blocks.putNoClobber(self.gpa, block, self.block_depth); - try self.genBody(block.body); + try self.blocks.putNoClobber(self.gpa, inst, self.block_depth); + try self.genBody(body); try self.endBlock(); return .none; @@ -1095,11 +1108,15 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.ty); + fn genLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; - try self.startBlock(.loop, loop_ty, null); - try self.genBody(loop.body); + // result type of loop is always 'noreturn', meaning we can always + // emit the wasm type 'block_empty'. + try self.startBlock(.loop, wasm.block_empty, null); + try self.genBody(body); // breaking to the index of a loop block will continue the loop instead try self.code.append(wasm.opcode(.br)); @@ -1110,8 +1127,12 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { - const condition = self.resolveInst(condbr.condition); + fn genCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const writer = self.code.writer(); // TODO: Handle death instructions for then and else body @@ -1126,8 +1147,9 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.ty); - try self.startBlock(.block, block_ty, offset); + + // result type is always noreturn, so use `block_empty` as type. + try self.startBlock(.block, wasm.block_empty, offset); // we inserted the block in front of the condition // so now check if condition matches. If not, break outside this block @@ -1135,11 +1157,11 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.br_if)); try leb.writeULEB128(writer, @as(u32, 0)); - try self.genBody(condbr.else_body); + try self.genBody(else_body); try self.endBlock(); // Outer block that matches the condition - try self.genBody(condbr.then_body); + try self.genBody(then_body); return .none; } @@ -1149,21 +1171,23 @@ pub const Context = struct { // the comparison that we can later jump back to const offset = self.code.items.len; - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const data: Air.Inst.Data = self.air.instructions.items(.data)[inst]; + const lhs = self.resolveInst(data.bin_op.lhs); + const rhs = self.resolveInst(data.bin_op.rhs); + const lhs_ty = self.air.getRefType(data.bin_op.lhs); try self.emitWValue(lhs); try self.emitWValue(rhs); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (inst.lhs.ty.zigTypeTag() != .Int) break :blk .unsigned; + if (lhs_ty.zigTypeTag() != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk inst.lhs.ty.intInfo(self.target).signedness; + break :blk lhs_ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.lhs.ty), + .valtype1 = try self.typeToValtype(lhs_ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1178,16 +1202,17 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { + fn genBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const br = self.air.instructions.items(.data)[inst].br; + // if operand has codegen bits we should break with a value - if (br.operand.ty.hasCodeGenBits()) { - const operand = self.resolveInst(br.operand); - try self.emitWValue(operand); + if (self.air.getRefType(br.operand).hasCodeGenBits()) { + try self.emitWValue(self.resolveInst(br.operand)); } // We map every block to its block index. // We then determine how far we have to jump to it by substracting it from current block depth - const idx: u32 = self.block_depth - self.blocks.get(br.block).?; + const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?; const writer = self.code.writer(); try writer.writeByte(wasm.opcode(.br)); try leb.writeULEB128(writer, idx); @@ -1195,10 +1220,11 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { + fn genNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; const offset = self.code.items.len; - const operand = self.resolveInst(not.operand); + const operand = self.resolveInst(ty_op.operand); try self.emitWValue(operand); // wasm does not have booleans nor the `not` instruction, therefore compare with 0 @@ -1212,35 +1238,44 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { + fn genBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = self; - _ = breakpoint; + _ = inst; // unsupported by wasm itself. Can be implemented once we support DWARF // for wasm return .none; } - fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { - _ = unreach; + fn genUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + _ = inst; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(bitcast.operand); + fn genBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const struct_ptr = self.resolveInst(inst.struct_ptr); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload); + const struct_ptr = self.resolveInst(extra.data.struct_ptr); - return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; + return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) }; } fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const target = self.resolveInst(inst.target); - const target_ty = inst.target.ty; - const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.ty); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.SwitchBr, pl_op.payload); + const cases = self.air.extra[extra.end..][0..extra.data.cases_len]; + const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + + const target = self.resolveInst(pl_op.operand); + const target_ty = self.air.getRefType(pl_op.operand); + const valtype = try self.typeToValtype(target_ty); + // result type is always 'noreturn' + const blocktype = wasm.block_empty; const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1249,11 +1284,18 @@ pub const Context = struct { // incase of an actual integer, we emit the correct signedness break :blk target_ty.intInfo(self.target).signedness; }; - for (inst.cases) |case| { + for (cases) |case_idx| { + const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + const case_body = self.air.extra[case.end..][0..case.data.body_len]; + // create a block for each case, when the condition does not match we break out of it try self.startBlock(.block, blocktype, null); try self.emitWValue(target); - try self.emitConstant(.{ .node_offset = 0 }, case.item, target_ty); + + // cases must represent a constant of which its type is in the `typed_value_map` + // Therefore we can simply retrieve it. + const ty_val = Air.Inst.Ref.typed_value_map[@enumToInt(case.data.item)]; + try self.emitConstant(ty_val.val, target_ty); const opcode = buildOpcode(.{ .valtype1 = valtype, .op = .ne, // not equal because we jump out the block if it does not match the condition @@ -1264,7 +1306,7 @@ pub const Context = struct { try leb.writeULEB128(self.code.writer(), @as(u32, 0)); // emit our block code - try self.genBody(case.body); + try self.genBody(case_body); // end the block we created earlier try self.endBlock(); @@ -1272,13 +1314,14 @@ pub const Context = struct { // finally, emit the else case if it exists. Here we will not have to // check for a condition, so also no need to emit a block. - try self.genBody(inst.else_body); + try self.genBody(else_body); return .none; } fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1294,7 +1337,8 @@ pub const Context = struct { } fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = self.resolveInst(ty_op.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` // @@ -1303,6 +1347,7 @@ pub const Context = struct { } fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 1387615d15..81e50c46b6 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -220,7 +220,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live defer context.deinit(); // generate the 'code' section for the function declaration - const result = context.genFunc(func) catch |err| switch (err) { + const result = context.genFunc() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, context.err_msg); -- cgit v1.2.3 From 4a0f38bb7671750be1590e815def72e3b4a34ccf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:26:36 -0700 Subject: stage2: update LLVM backend to new AIR memory layout Also fix compile errors when not using -Dskip-non-native --- src/codegen.zig | 54 ++++---- src/codegen/c.zig | 3 + src/codegen/llvm.zig | 359 +++++++++++++++++++++++++++++--------------------- src/codegen/spirv.zig | 152 ++++++++++----------- src/codegen/wasm.zig | 8 +- src/link/Coff.zig | 15 ++- src/link/MachO.zig | 245 +++++++++++++++++++--------------- src/link/SpirV.zig | 16 ++- src/link/Wasm.zig | 2 + 9 files changed, 482 insertions(+), 372 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen.zig b/src/codegen.zig index 11a2603aac..20d7035822 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -642,7 +642,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch push callee saved regs var saved_regs = Instruction.RegisterList{ @@ -703,7 +703,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -727,7 +727,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch stack offset const stack_end = self.max_end_stack; @@ -779,13 +779,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, else => { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); }, } @@ -1492,7 +1492,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue { + fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -1514,14 +1514,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1542,7 +1542,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1572,10 +1572,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } try self.genArmBinOpCode( @@ -1594,7 +1594,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv: MCValue, rhs_mcv: MCValue, swap_lhs_and_rhs: bool, - op: ir.Inst.Tag, + op: Air.Inst.Tag, ) !void { assert(lhs_mcv == .register or rhs_mcv == .register); @@ -1665,14 +1665,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1690,7 +1690,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1701,10 +1701,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (!lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (!rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); @@ -2704,9 +2704,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .aarch64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; + const arg = args[arg_i]; const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2733,7 +2733,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2899,15 +2899,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate registers if (rhs_should_be_register) { if (!lhs_is_register and !rhs_is_register) { - const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ + Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?, + }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) }; } // Move the operands to the newly allocated registers @@ -3538,7 +3540,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .aarch64 => result: { @@ -3576,7 +3578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .riscv64 => result: { @@ -3612,7 +3614,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .x86_64, .i386 => result: { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1fe330a894..7137116037 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -974,6 +974,9 @@ fn airArg(o: *Object) CValue { fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and o.liveness.isUnused(inst)) + return CValue.none; const inst_ty = o.air.typeOfIndex(inst); const operand = try o.resolveInst(ty_op.operand); const writer = o.writer(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ddf2883259..d9090c9f2c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10,7 +10,7 @@ const math = std.math; const Module = @import("../Module.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); -const Inst = ir.Inst; +const Liveness = @import("../Liveness.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; @@ -355,6 +355,7 @@ pub const DeclGen = struct { builder.positionBuilderAtEnd(entry_block); var fg: FuncGen = .{ + .gpa = self.gpa, .dg = self, .builder = builder, .args = args, @@ -593,29 +594,29 @@ pub const DeclGen = struct { }; pub const FuncGen = struct { + gpa: *Allocator, dg: *DeclGen, builder: *const llvm.Builder, - /// This stores the LLVM values used in a function, such that they can be - /// referred to in other instructions. This table is cleared before every function is generated. - /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks - /// in here, however if a block ends, the instructions can be thrown away. - func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value), + /// This stores the LLVM values used in a function, such that they can be referred to + /// in other instructions. This table is cleared before every function is generated. + func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value), - /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction. + /// These fields are used to refer to the LLVM value of the function paramaters + /// in an Arg instruction. args: []*const llvm.Value, arg_index: usize, entry_block: *const llvm.BasicBlock, - /// This fields stores the last alloca instruction, such that we can append more alloca instructions - /// to the top of the function. + /// This fields stores the last alloca instruction, such that we can append + /// more alloca instructions to the top of the function. latest_alloca_inst: ?*const llvm.Value, llvm_func: *const llvm.Value, /// This data structure is used to implement breaking to blocks. - blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct { + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { parent_bb: *const llvm.BasicBlock, break_bbs: *BreakBasicBlocks, break_vals: *BreakValues, @@ -626,9 +627,9 @@ pub const FuncGen = struct { fn deinit(self: *FuncGen) void { self.builder.dispose(); - self.func_inst_table.deinit(self.gpa()); - self.gpa().free(self.args); - self.blocks.deinit(self.gpa()); + self.func_inst_table.deinit(self.gpa); + self.gpa.free(self.args); + self.blocks.deinit(self.gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @@ -644,13 +645,9 @@ pub const FuncGen = struct { return self.dg.object.context; } - fn gpa(self: *FuncGen) *Allocator { - return self.dg.gpa; - } - - fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value { - if (inst.value()) |val| { - return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self); + fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value { + if (self.air.value(inst)) |val| { + return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self); } if (self.func_inst_table.get(inst)) |value| return value; @@ -658,51 +655,57 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void { + const air_tags = self.air.instructions.items(.tag); for (body.instructions) |inst| { - const opt_value = switch (inst.tag) { - .add => try self.genAdd(inst.castTag(.add).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => try self.genArg(inst.castTag(.arg).?), - .bitcast => try self.genBitCast(inst.castTag(.bitcast).?), - .block => try self.genBlock(inst.castTag(.block).?), - .br => try self.genBr(inst.castTag(.br).?), - .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?), - .br_void => try self.genBrVoid(inst.castTag(.br_void).?), - .call => try self.genCall(inst.castTag(.call).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => try self.genCondBr(inst.castTag(.condbr).?), - .intcast => try self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false), - .is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true), - .is_null => try self.genIsNull(inst.castTag(.is_null).?, false), - .is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => try self.genLoop(inst.castTag(.loop).?), - .not => try self.genNot(inst.castTag(.not).?), - .ret => try self.genRet(inst.castTag(.ret).?), - .retvoid => self.genRetVoid(inst.castTag(.retvoid).?), - .store => try self.genStore(inst.castTag(.store).?), - .sub => try self.genSub(inst.castTag(.sub).?), - .unreach => self.genUnreach(inst.castTag(.unreach).?), - .optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false), - .optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true), + const opt_value = switch (air_tags[inst]) { + .add => try self.airAdd(inst), + .sub => try self.airSub(inst), + + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_neq => try self.airCmp(inst, .neq), + + .is_non_null => try self.airIsNonNull(inst, false), + .is_non_null_ptr => try self.airIsNonNull(inst, true), + .is_null => try self.airIsNull(inst, false), + .is_null_ptr => try self.airIsNull(inst, true), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(inst), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .intcast => try self.airIntCast(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .unreach => self.airUnreach(inst), + .optional_payload => try self.airOptionalPayload(inst, false), + .optional_payload_ptr => try self.airOptionalPayload(inst, true), .dbg_stmt => blk: { // TODO: implement debug info break :blk null; }, - else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}), + else => |tag| return self.todo("implement AIR instruction: {}", .{tag}), }; - if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val); + if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val); } } - fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value { - if (inst.func.value()) |func_value| { + fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + + if (self.air.value(pl_op.operand)) |func_value| { const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn| extern_fn.data else if (func_value.castTag(.function)) |func_payload| @@ -714,12 +717,10 @@ pub const FuncGen = struct { const zig_fn_type = fn_decl.ty; const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl); - const num_args = inst.args.len; - - const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args); - defer self.gpa().free(llvm_param_vals); + const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len); + defer self.gpa.free(llvm_param_vals); - for (inst.args) |arg, i| { + for (args) |arg, i| { llvm_param_vals[i] = try self.resolveInst(arg); } @@ -727,8 +728,8 @@ pub const FuncGen = struct { // Do we need that? const call = self.builder.buildCall( llvm_fn, - if (num_args == 0) null else llvm_param_vals.ptr, - @intCast(c_uint, num_args), + if (args.len == 0) null else llvm_param_vals.ptr, + @intCast(c_uint, args.len), "", ); @@ -746,31 +747,31 @@ pub const FuncGen = struct { } } - fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { - _ = inst; - _ = self.builder.buildRetVoid(); - return null; - } - - fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `retvoid` instructions. + fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const un_op = self.air.instructions.items(.data)[inst].un_op; + if (!self.air.typeOf(un_op).hasCodeGenBits()) { _ = self.builder.buildRetVoid(); return null; } - _ = self.builder.buildRet(try self.resolveInst(inst.operand)); + const operand = try self.resolveInst(un_op); + _ = self.builder.buildRet(operand); return null; } - fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - if (inst.base.ty.tag() != .bool) - return self.todo("implement 'genCmp' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + if (inst_ty.tag() != .bool) + return self.todo("implement 'airCmp' for type {}", .{inst_ty}); - const is_signed = inst.base.ty.isSignedInt(); + const is_signed = inst_ty.isSignedInt(); const operation = switch (op) { .eq => .EQ, .neq => .NE, @@ -783,32 +784,36 @@ pub const FuncGen = struct { return self.builder.buildICmp(operation, lhs, rhs, ""); } - fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value { + fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; const parent_bb = self.context().createBasicBlock("Block"); // 5 breaks to a block seems like a reasonable default. - var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5); - var break_vals = try BreakValues.initCapacity(self.gpa(), 5); - try self.blocks.putNoClobber(self.gpa(), inst, .{ + var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5); + var break_vals = try BreakValues.initCapacity(self.gpa, 5); + try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .break_bbs = &break_bbs, .break_vals = &break_vals, }); defer { assert(self.blocks.remove(inst)); - break_bbs.deinit(self.gpa()); - break_vals.deinit(self.gpa()); + break_bbs.deinit(self.gpa); + break_vals.deinit(self.gpa); } - try self.genBody(inst.body); + try self.genBody(body); self.llvm_func.appendExistingBasicBlock(parent_bb); self.builder.positionBuilderAtEnd(parent_bb); // If the block does not return a value, we dont have to create a phi node. - if (!inst.base.ty.hasCodeGenBits()) return null; + const inst_ty = self.air.typeOfIndex(inst); + if (!inst_ty.hasCodeGenBits()) return null; - const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), ""); + const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst_ty), ""); phi_node.addIncoming( break_vals.items.ptr, break_bbs.items.ptr, @@ -817,35 +822,30 @@ pub const FuncGen = struct { return phi_node; } - fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const branch = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(branch.block_inst).?; // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `br_void` instructions. - _ = self.builder.buildBr(block.parent_bb); - } else { - const val = try self.resolveInst(inst.operand); + if (self.air.typeOf(branch.result).hasCodeGenBits()) { + const val = try self.resolveInst(branch.result); // For the phi node, we need the basic blocks and the values of the // break instructions. - try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock()); - try block.break_vals.append(self.gpa(), val); - - _ = self.builder.buildBr(block.parent_bb); + try block.break_bbs.append(self.gpa, self.builder.getInsertBlock()); + try block.break_vals.append(self.gpa, val); } - return null; - } - - fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; _ = self.builder.buildBr(block.parent_bb); return null; } - fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value { - const condition_value = try self.resolveInst(inst.condition); + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const then_block = self.context().appendBasicBlock(self.llvm_func, "Then"); const else_block = self.context().appendBasicBlock(self.llvm_func, "Else"); @@ -854,38 +854,51 @@ pub const FuncGen = struct { defer self.builder.positionBuilderAtEnd(prev_block); self.builder.positionBuilderAtEnd(then_block); - try self.genBody(inst.then_body); + try self.genBody(then_body); self.builder.positionBuilderAtEnd(else_block); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - _ = self.builder.buildCondBr(condition_value, then_block, else_block); + _ = self.builder.buildCondBr(cond, then_block, else_block); return null; } - fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value { + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop"); _ = self.builder.buildBr(loop_block); self.builder.positionBuilderAtEnd(loop_block); - try self.genBody(inst.body); + try self.genBody(body); _ = self.builder.buildBr(loop_block); return null; } - fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - return self.builder.buildNot(try self.resolveInst(inst.operand), ""); + fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + + return self.builder.buildNot(operand, ""); } - fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value { _ = inst; _ = self.builder.buildUnreachable(); return null; } - fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airIsNonNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -901,12 +914,23 @@ pub const FuncGen = struct { } } - fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, ""); + fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + return self.builder.buildNot((try self.airIsNonNull(inst, operand_is_ptr)).?, ""); } - fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airOptionalPayload( + self: *FuncGen, + inst: Air.Inst.Index, + operand_is_ptr: bool, + ) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -922,61 +946,83 @@ pub const FuncGen = struct { } } - fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genAdd' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airAdd' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWAdd(lhs, rhs, "") else self.builder.buildNUWAdd(lhs, rhs, ""); } - fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genSub' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airSub' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWSub(lhs, rhs, "") else self.builder.buildNUWSub(lhs, rhs, ""); } - fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); + fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); - const signed = inst.base.ty.isSignedInt(); + const signed = inst_ty.isSignedInt(); // TODO: Should we use intcast here or just a simple bitcast? // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes - return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), ""); + return self.builder.buildIntCast2(operand, try self.dg.getLLVMType(inst_ty), llvm.Bool.fromBool(signed), ""); } - fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); - const dest_type = try self.dg.getLLVMType(inst.base.ty); + fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); + const dest_type = try self.dg.getLLVMType(inst_ty); - return self.builder.buildBitCast(val, dest_type, ""); + return self.builder.buildBitCast(operand, dest_type, ""); } - fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value { + fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty)); + const inst_ty = self.air.typeOfIndex(inst); + const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst_ty)); _ = self.builder.buildStore(arg_val, ptr_val); return self.builder.buildLoad(ptr_val, ""); } - fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; // buildAlloca expects the pointee type, not the pointer type, so assert that // a Payload.PointerSimple is passed to the alloc instruction. - const pointee_type = inst.base.ty.castPointer().?.data; + const inst_ty = self.air.typeOfIndex(inst); + const pointee_type = inst_ty.castPointer().?.data; // TODO: figure out a way to get the name of the var decl. // TODO: set alignment and volatile @@ -1007,19 +1053,26 @@ pub const FuncGen = struct { return val; } - fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.rhs); - const ptr = try self.resolveInst(inst.lhs); - _ = self.builder.buildStore(val, ptr); + fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dest_ptr = try self.resolveInst(bin_op.lhs); + const src_operand = try self.resolveInst(bin_op.rhs); + // TODO set volatile on this store properly + _ = self.builder.buildStore(src_operand, dest_ptr); return null; } - fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const ptr_val = try self.resolveInst(inst.operand); - return self.builder.buildLoad(ptr_val, ""); + fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and self.liveness.isUnused(inst)) + return null; + const ptr = try self.resolveInst(ty_op.operand); + // TODO set volatile on this load properly + return self.builder.buildLoad(ptr, ""); } - fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; const llvn_fn = self.getIntrinsic("llvm.debugtrap"); _ = self.builder.buildCall(llvn_fn, null, 0, ""); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4da320b087..7429e3c3b0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -13,6 +13,7 @@ const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const Word = u32; pub const ResultId = u32; @@ -247,6 +248,7 @@ pub const DeclGen = struct { return .{ .spv = spv, .air = undefined, + .liveness = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -259,11 +261,12 @@ pub const DeclGen = struct { } /// Generate the code for `decl`. If a reportable error occured during code generation, - /// a message is returned by this function. Callee owns the memory. If this function returns such - /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { + /// a message is returned by this function. Callee owns the memory. If this function + /// returns such a reportable error, it is valid to be called again for a different decl. + pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. - self.air = &air; + self.air = air; + self.liveness = liveness; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -297,12 +300,12 @@ pub const DeclGen = struct { return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - if (inst.value()) |val| { - return self.genConstant(inst.ty, val); + fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId { + if (self.air.value(inst)) |val| { + return self.genConstant(self.air.typeOf(inst), val); } - - return self.inst_results.get(inst).?; // Instruction does not dominate all uses! + const index = Air.refToIndex(inst).?; + return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage. } fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void { @@ -663,40 +666,40 @@ pub const DeclGen = struct { const air_tags = self.air.instructions.items(.tag); const result_id = switch (air_tags[inst]) { // zig fmt: off - .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), - .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), - .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), - .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), - - .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), - .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), - .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), - .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), - .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), - - .not => try self.genNot(inst), - - .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), - .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), - .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), - .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), - .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), - .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), - - .arg => self.genArg(), - .alloc => try self.genAlloc(inst), - .block => (try self.genBlock(inst)) orelse return, - .load => try self.genLoad(inst), - - .br => return self.genBr(inst), + .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.airBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr), + + .not => try self.airNot(inst), + + .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.airArg(), + .alloc => try self.airAlloc(inst), + .block => (try self.airBlock(inst)) orelse return, + .load => try self.airLoad(inst), + + .br => return self.airBr(inst), .breakpoint => return, - .cond_br => return self.genCondBr(inst), + .cond_br => return self.airCondBr(inst), .constant => unreachable, - .dbg_stmt => return self.genDbgStmt(inst), - .loop => return self.genLoop(inst), - .ret => return self.genRet(inst), - .store => return self.genStore(inst), - .unreach => return self.genUnreach(), + .dbg_stmt => return self.airDbgStmt(inst), + .loop => return self.airLoop(inst), + .ret => return self.airRet(inst), + .store => return self.airStore(inst), + .unreach => return self.airUnreach(), // zig fmt: on else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ @@ -707,21 +710,22 @@ pub const DeclGen = struct { try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); + const result_type_id = try self.genType(self.air.typeOfIndex(inst)); try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id, }); return result_id; } - fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); @@ -729,8 +733,8 @@ pub const DeclGen = struct { const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(ty); - assert(self.air.getType(bin_op.lhs).eql(ty)); - assert(self.air.getType(bin_op.rhs).eql(ty)); + assert(self.air.typeOf(bin_op.lhs).eql(ty)); + assert(self.air.typeOf(bin_op.rhs).eql(ty)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -744,8 +748,8 @@ pub const DeclGen = struct { return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); }, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, .float => 0, else => unreachable, @@ -759,14 +763,14 @@ pub const DeclGen = struct { return result_id; } - fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(Type.initTag(.bool)); - const op_ty = self.air.getType(bin_op.lhs); - assert(op_ty.eql(self.air.getType(bin_op.rhs))); + const op_ty = self.air.typeOf(bin_op.lhs); + assert(op_ty.eql(self.air.typeOf(bin_op.rhs))); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. @@ -782,10 +786,9 @@ pub const DeclGen = struct { .float => 0, .bool => 1, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, - else => unreachable, }; const opcode = ops[opcode_index]; @@ -793,7 +796,7 @@ pub const DeclGen = struct { return result_id; } - fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); @@ -803,8 +806,8 @@ pub const DeclGen = struct { return result_id; } - fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - const ty = self.air.getType(inst); + fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.typeOfIndex(inst); const storage_class = spec.StorageClass.Function; const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); @@ -816,12 +819,12 @@ pub const DeclGen = struct { return result_id; } - fn genArg(self: *DeclGen) ResultId { + fn airArg(self: *DeclGen) ResultId { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { + fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -841,7 +844,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -872,10 +875,10 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.getType(br.operand); + const operand_ty = self.air.typeOf(br.operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(br.operand); @@ -886,7 +889,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } - fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { + fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; @@ -912,16 +915,16 @@ pub const DeclGen = struct { try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); @@ -936,8 +939,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); @@ -952,9 +956,9 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { - const operand = inst_datas[inst].un_op; - const operand_ty = self.air.getType(operand); + fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = self.air.instructions.items(.data)[inst].un_op; + const operand_ty = self.air.typeOf(operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(operand); try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); @@ -963,11 +967,11 @@ pub const DeclGen = struct { } } - fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr_id = try self.resolve(bin_op.lhs); const src_val_id = try self.resolve(bin_op.rhs); - const lhs_ty = self.air.getType(bin_op.lhs); + const lhs_ty = self.air.typeOf(bin_op.lhs); const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } @@ -977,7 +981,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpStore, operands); } - fn genUnreach(self: *DeclGen) !void { + fn airUnreach(self: *DeclGen) !void { try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 5cf3fb15fd..dbca818297 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -774,7 +774,7 @@ pub const Context = struct { } } return Result{ .externally_managed = payload.data }; - } else return self.fail(.{ .node_offset = 0 }, "TODO implement gen for more kinds of arrays", .{}); + } else return self.fail("TODO implement gen for more kinds of arrays", .{}); }, .Int => { const info = typed_value.ty.intInfo(self.target); @@ -783,9 +783,9 @@ pub const Context = struct { try self.code.append(@intCast(u8, int_byte)); return Result.appended; } - return self.fail(.{ .node_offset = 0 }, "TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); + return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); }, - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: Implement zig type codegen for type: '{s}'", .{tag}), + else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}), } } @@ -883,7 +883,7 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = self.air.getType(inst).elemType(); + const elem_type = self.air.typeOfIndex(inst).elemType(); return self.allocLocal(elem_type); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 44442b73a3..50ad6bc1a0 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -657,11 +657,16 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { - if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + if (build_options.skip_non_native and + builtin.object_format != .coff and + builtin.object_format != .pe) + { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| { + return llvm_object.updateFunc(module, func, air, liveness); + } } const tracy = trace(@src()); defer tracy.end(); @@ -669,6 +674,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + const decl = func.owner_decl; const res = try codegen.generateFunction( &self.base, decl.srcLoc(), @@ -679,7 +685,6 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .none, ); const code = switch (res) { - .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; @@ -725,10 +730,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; - return self.finishUpdateDecl(module, func.owner_decl, code); + return self.finishUpdateDecl(module, decl, code); } -fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { +fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index cd020c1b27..4107607924 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1150,9 +1150,13 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1163,7 +1167,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, @@ -1172,9 +1176,109 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv }, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + switch (res) { + .appended => {}, + .fail => |em| { + // Clear any PIE fixups for this decl. + self.pie_fixups.shrinkRetainingCapacity(0); + // Clear any stub fixups for this decl. + self.stub_fixups.shrinkRetainingCapacity(0); + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + } + const symbol = try self.placeDecl(decl, code_buffer.items.len); + + // Calculate displacements to target addr (if any). + while (self.pie_fixups.popOrNull()) |fixup| { + assert(fixup.size == 4); + const this_addr = symbol.n_value + fixup.offset; + const target_addr = fixup.target_addr; + + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + const displacement = try math.cast(u32, target_addr - this_addr - 4); + mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); + }, + .aarch64 => { + // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). + { + const inst = code_buffer.items[fixup.offset..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.pc_relative_address, + ), inst); + const this_page = @intCast(i32, this_addr >> 12); + const target_page = @intCast(i32, target_addr >> 12); + const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); + parsed.immhi = @truncate(u19, pages >> 2); + parsed.immlo = @truncate(u2, pages); + } + { + const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.load_store_register, + ), inst); + const narrowed = @truncate(u12, target_addr); + const offset = try math.divExact(u12, narrowed, 8); + parsed.offset = offset; + } + }, + else => unreachable, // unsupported target architecture + } + } + + // Resolve stubs (if any) + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stubs = text_segment.sections.items[self.stubs_section_index.?]; + for (self.stub_fixups.items) |fixup| { + const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; + const text_addr = symbol.n_value + fixup.start; + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + assert(stub_addr >= text_addr + fixup.len); + const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); + const placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; + mem.writeIntSliceLittle(u32, placeholder, displacement); + }, + .aarch64 => { + assert(stub_addr >= text_addr); + const displacement = try math.cast(i28, stub_addr - text_addr); + const placeholder = code_buffer.items[fixup.start..][0..fixup.len]; + mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); + }, + else => unreachable, // unsupported target architecture + } + if (!fixup.already_defined) { + try self.writeStub(fixup.symbol); + try self.writeStubInStubHelper(fixup.symbol); + try self.writeLazySymbolPointer(fixup.symbol); + + self.rebase_info_dirty = true; + self.lazy_binding_info_dirty = true; + } + } + self.stub_fixups.shrinkRetainingCapacity(0); + + try self.writeCode(symbol, code_buffer.items); - return self.finishUpdateDecl(module, decl, res); + if (debug_buffers) |db| { + try self.d_sym.?.commitDeclDebugInfo( + self.base.allocator, + module, + decl, + db, + self.base.options.target, + ); + } + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { @@ -1194,9 +1298,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1207,7 +1315,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, @@ -1224,33 +1332,37 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); - return self.finishUpdateDecl(module, decl, res); -} - -fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { - // Clear any PIE fixups for this decl. - self.pie_fixups.shrinkRetainingCapacity(0); - // Clear any stub fixups for this decl. - self.stub_fixups.shrinkRetainingCapacity(0); decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, }; + const symbol = try self.placeDecl(decl, code.len); + assert(self.pie_fixups.items.len == 0); + assert(self.stub_fixups.items.len == 0); + try self.writeCode(symbol, code); + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); +} + +fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 { const required_alignment = decl.ty.abiAlignment(self.base.options.target); assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes() const symbol = &self.locals.items[decl.link.macho.local_sym_index]; if (decl.link.macho.size != 0) { const capacity = decl.link.macho.capacity(self.*); - const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); + const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); if (need_realloc) { - const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment); + const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); @@ -1265,10 +1377,10 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code } symbol.n_value = vaddr; - } else if (code.len < decl.link.macho.size) { - self.shrinkTextBlock(&decl.link.macho, code.len); + } else if (code_len < decl.link.macho.size) { + self.shrinkTextBlock(&decl.link.macho, code_len); } - decl.link.macho.size = code.len; + decl.link.macho.size = code_len; const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)}); defer self.base.allocator.free(new_name); @@ -1286,7 +1398,7 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code defer self.base.allocator.free(decl_name); const name_str_index = try self.makeString(decl_name); - const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); + const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr }); @@ -1311,96 +1423,15 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code try self.writeOffsetTableEntry(decl.link.macho.offset_table_index); } - // Calculate displacements to target addr (if any). - while (self.pie_fixups.popOrNull()) |fixup| { - assert(fixup.size == 4); - const this_addr = symbol.n_value + fixup.offset; - const target_addr = fixup.target_addr; - - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - const displacement = try math.cast(u32, target_addr - this_addr - 4); - mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); - }, - .aarch64 => { - // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). - { - const inst = code_buffer.items[fixup.offset..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.pc_relative_address, - ), inst); - const this_page = @intCast(i32, this_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); - parsed.immhi = @truncate(u19, pages >> 2); - parsed.immlo = @truncate(u2, pages); - } - { - const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.load_store_register, - ), inst); - const narrowed = @truncate(u12, target_addr); - const offset = try math.divExact(u12, narrowed, 8); - parsed.offset = offset; - } - }, - else => unreachable, // unsupported target architecture - } - } - - // Resolve stubs (if any) - const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; - const stubs = text_segment.sections.items[self.stubs_section_index.?]; - for (self.stub_fixups.items) |fixup| { - const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; - const text_addr = symbol.n_value + fixup.start; - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - assert(stub_addr >= text_addr + fixup.len); - const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); - var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; - mem.writeIntSliceLittle(u32, placeholder, displacement); - }, - .aarch64 => { - assert(stub_addr >= text_addr); - const displacement = try math.cast(i28, stub_addr - text_addr); - var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; - mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); - }, - else => unreachable, // unsupported target architecture - } - if (!fixup.already_defined) { - try self.writeStub(fixup.symbol); - try self.writeStubInStubHelper(fixup.symbol); - try self.writeLazySymbolPointer(fixup.symbol); - - self.rebase_info_dirty = true; - self.lazy_binding_info_dirty = true; - } - } - self.stub_fixups.shrinkRetainingCapacity(0); + return symbol; +} +fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void { + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_section = text_segment.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - - if (debug_buffers) |*db| { - try self.d_sym.?.commitDeclDebugInfo( - self.base.allocator, - module, - decl, - db, - self.base.options.target, - ); - } - - // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bc9e560582..17b656a06c 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -51,7 +51,12 @@ base: link.File, /// This linker backend does not try to incrementally link output SPIR-V code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function. -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{}, + +const DeclGenContext = struct { + air: Air, + liveness: Liveness, +}; pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV { const spirv = try gpa.create(SpirV); @@ -181,10 +186,15 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { var decl_gen = codegen.DeclGen.init(&spv); defer decl_gen.deinit(); - for (self.decl_table.keys()) |decl| { + var it = self.decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; if (!decl.has_tv) continue; - if (try decl_gen.gen(decl)) |msg| { + const air = entry.value_ptr.air; + const liveness = entry.value_ptr.liveness; + + if (try decl_gen.gen(decl, air, liveness)) |msg| { try module.failed_decls.put(module.gpa, decl, msg); return; // TODO: Attempt to generate more decls? } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 81e50c46b6..d9139a178c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -250,6 +250,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { var context = codegen.Context{ .gpa = self.base.allocator, + .air = undefined, + .liveness = undefined, .values = .{}, .code = fn_data.code.toManaged(self.base.allocator), .func_type_data = fn_data.functype.toManaged(self.base.allocator), -- cgit v1.2.3 From 44fe9c52e15deb1a1ea82760c5f4204d50fb0e9f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:32:07 -0700 Subject: stage2: wasm backend: update to latest naming convention --- src/codegen/wasm.zig | 114 ++++++++++++++++++++++++++------------------------- 1 file changed, 58 insertions(+), 56 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index dbca818297..e7e498ef40 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -792,43 +792,45 @@ pub const Context = struct { fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { const air_tags = self.air.instructions.items(.tag); return switch (air_tags[inst]) { - .add => self.genBinOp(inst, .add), - .alloc => self.genAlloc(inst), - .arg => self.genArg(inst), - .bit_and => self.genBinOp(inst, .@"and"), - .bitcast => self.genBitcast(inst), - .bit_or => self.genBinOp(inst, .@"or"), - .block => self.genBlock(inst), - .bool_and => self.genBinOp(inst, .@"and"), - .bool_or => self.genBinOp(inst, .@"or"), - .breakpoint => self.genBreakpoint(inst), - .br => self.genBr(inst), - .call => self.genCall(inst), - .cmp_eq => self.genCmp(inst, .eq), - .cmp_gte => self.genCmp(inst, .gte), - .cmp_gt => self.genCmp(inst, .gt), - .cmp_lte => self.genCmp(inst, .lte), - .cmp_lt => self.genCmp(inst, .lt), - .cmp_neq => self.genCmp(inst, .neq), - .cond_br => self.genCondBr(inst), + .add => self.airBinOp(inst, .add), + .sub => self.airBinOp(inst, .sub), + .mul => self.airBinOp(inst, .mul), + .div => self.airBinOp(inst, .div), + .bit_and => self.airBinOp(inst, .@"and"), + .bit_or => self.airBinOp(inst, .@"or"), + .bool_and => self.airBinOp(inst, .@"and"), + .bool_or => self.airBinOp(inst, .@"or"), + .xor => self.airBinOp(inst, .xor), + + .cmp_eq => self.airCmp(inst, .eq), + .cmp_gte => self.airCmp(inst, .gte), + .cmp_gt => self.airCmp(inst, .gt), + .cmp_lte => self.airCmp(inst, .lte), + .cmp_lt => self.airCmp(inst, .lt), + .cmp_neq => self.airCmp(inst, .neq), + + .alloc => self.airAlloc(inst), + .arg => self.airArg(inst), + .bitcast => self.airBitcast(inst), + .block => self.airBlock(inst), + .breakpoint => self.airBreakpoint(inst), + .br => self.airBr(inst), + .call => self.airCall(inst), + .cond_br => self.airCondBr(inst), .constant => unreachable, .dbg_stmt => WValue.none, - .div => self.genBinOp(inst, .div), - .is_err => self.genIsErr(inst, .i32_ne), - .is_non_err => self.genIsErr(inst, .i32_eq), - .load => self.genLoad(inst), - .loop => self.genLoop(inst), - .mul => self.genBinOp(inst, .mul), - .not => self.genNot(inst), - .ret => self.genRet(inst), - .store => self.genStore(inst), - .struct_field_ptr => self.genStructFieldPtr(inst), - .sub => self.genBinOp(inst, .sub), - .switch_br => self.genSwitchBr(inst), - .unreach => self.genUnreachable(inst), - .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst), - .wrap_errunion_payload => self.genWrapErrUnionPayload(inst), - .xor => self.genBinOp(inst, .xor), + .is_err => self.airIsErr(inst, .i32_ne), + .is_non_err => self.airIsErr(inst, .i32_eq), + .load => self.airLoad(inst), + .loop => self.airLoop(inst), + .not => self.airNot(inst), + .ret => self.airRet(inst), + .store => self.airStore(inst), + .struct_field_ptr => self.airStructFieldPtr(inst), + .switch_br => self.airSwitchBr(inst), + .unreach => self.airUnreachable(inst), + .unwrap_errunion_payload => self.airUnwrapErrUnionPayload(inst), + .wrap_errunion_payload => self.airWrapErrUnionPayload(inst), else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } @@ -840,7 +842,7 @@ pub const Context = struct { } } - fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = self.resolveInst(un_op); try self.emitWValue(operand); @@ -848,7 +850,7 @@ pub const Context = struct { return .none; } - fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = self.air.extra[extra.end..][0..extra.data.args_len]; @@ -882,12 +884,12 @@ pub const Context = struct { return .none; } - fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const elem_type = self.air.typeOfIndex(inst).elemType(); return self.allocLocal(elem_type); } - fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const writer = self.code.writer(); @@ -926,19 +928,19 @@ pub const Context = struct { return .none; } - fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } - fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; // arguments share the index with locals defer self.local_index += 1; return WValue{ .local = self.local_index }; } - fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { + fn airBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = self.resolveInst(bin_op.lhs); const rhs = self.resolveInst(bin_op.rhs); @@ -1074,7 +1076,7 @@ pub const Context = struct { } } - fn genBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty)); const extra = self.air.extraData(Air.Block, ty_pl.payload); @@ -1108,7 +1110,7 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; @@ -1127,7 +1129,7 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); @@ -1166,7 +1168,7 @@ pub const Context = struct { return .none; } - fn genCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { + fn airCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { // save offset, so potential conditions can insert blocks in front of // the comparison that we can later jump back to const offset = self.code.items.len; @@ -1202,7 +1204,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const br = self.air.instructions.items(.data)[inst].br; // if operand has codegen bits we should break with a value @@ -1220,7 +1222,7 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const offset = self.code.items.len; @@ -1238,7 +1240,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = self; _ = inst; // unsupported by wasm itself. Can be implemented once we support DWARF @@ -1246,18 +1248,18 @@ pub const Context = struct { return .none; } - fn genUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } - fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = self.resolveInst(extra.data.struct_ptr); @@ -1265,7 +1267,7 @@ pub const Context = struct { return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) }; } - fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.SwitchBr, pl_op.payload); const cases = self.air.extra[extra.end..][0..extra.data.cases_len]; @@ -1319,7 +1321,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { + fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = self.resolveInst(un_op); const offset = self.code.items.len; @@ -1336,7 +1338,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = self.resolveInst(ty_op.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get @@ -1346,7 +1348,7 @@ pub const Context = struct { return WValue{ .local = operand.multi_value.index + 1 }; } - fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } -- cgit v1.2.3 From 1150fc13dc779c91d54538304466cc068ccbf8ed Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 19 Jul 2021 15:56:02 +0200 Subject: wasm: Resolve regressions, add intcast support --- src/codegen/wasm.zig | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index e7e498ef40..b6edfb7b20 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -24,8 +24,8 @@ const WValue = union(enum) { none: void, /// Index of the local variable local: u32, - /// Instruction holding a constant `Value` - constant: Air.Inst.Index, + /// Holds a memoized typed value + constant: TypedValue, /// Offset position in the list of bytecode instructions code_offset: usize, /// Used for variables that create multiple locals on the stack when allocated @@ -484,7 +484,7 @@ pub const Result = union(enum) { }; /// Hashmap to store generated `WValue` for each `Air.Inst.Ref` -pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Ref, WValue); +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -548,14 +548,23 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue { - const ref_type = self.air.getRefType(ref); - if (ref_type.hasCodeGenBits()) return .none; + const inst_index = Air.refToIndex(ref) orelse { + const tv = Air.Inst.Ref.typed_value_map[@enumToInt(ref)]; + if (!tv.ty.hasCodeGenBits()) { + return WValue.none; + } + return WValue{ .constant = tv }; + }; - if (self.air.instructions.items(.tag)[@enumToInt(ref)] == .constant) { - return WValue{ .constant = @enumToInt(ref) }; + const inst_type = self.air.typeOfIndex(inst_index); + if (!inst_type.hasCodeGenBits()) return .none; + + if (self.air.instructions.items(.tag)[inst_index] == .constant) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + return WValue{ .constant = .{ .ty = inst_type, .val = self.air.values[ty_pl.payload] } }; } - return self.values.get(ref).?; // Instruction does not dominate all uses! + return self.values.get(inst_index).?; // Instruction does not dominate all uses! } /// Using a given `Type`, returns the corresponding wasm Valtype @@ -611,12 +620,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |index| { - const ty_pl = self.air.instructions.items(.data)[index].ty_pl; - const value = self.air.values[ty_pl.payload]; - // create a new constant onto the stack - try self.emitConstant(value, self.air.getRefType(ty_pl.ty)); - }, + .constant => |tv| try self.emitConstant(tv.val, tv.ty), // Creates a new constant on the stack } } @@ -838,7 +842,7 @@ pub const Context = struct { fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { const result = try self.genInst(inst); - try self.values.putNoClobber(self.gpa, @intToEnum(Air.Inst.Ref, inst), result); + try self.values.putNoClobber(self.gpa, inst, result); } } @@ -856,8 +860,7 @@ pub const Context = struct { const args = self.air.extra[extra.end..][0..extra.data.args_len]; const target: *Decl = blk: { - const ty_pl = self.air.instructions.items(.data)[@enumToInt(pl_op.operand)].ty_pl; - const func_val = self.air.values[ty_pl.payload]; + const func_val = self.air.value(pl_op.operand).?; if (func_val.castTag(.function)) |func| { break :blk func.data.owner_decl; @@ -868,7 +871,7 @@ pub const Context = struct { }; for (args) |arg| { - const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); + const arg_val = self.resolveInst(Air.indexToRef(arg)); try self.emitWValue(arg_val); } @@ -902,7 +905,7 @@ pub const Context = struct { // we simply assign the local_index to the rhs one. // This allows us to update struct fields without having to individually // set each local as each field's index will be calculated off the struct's base index - .multi_value => self.values.put(self.gpa, bin_op.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! + .multi_value => self.values.put(self.gpa, Air.refToIndex(bin_op.lhs).?, rhs) catch unreachable, // Instruction does not dominate all uses! .constant, .none => { // emit all values onto the stack if constant try self.emitWValue(rhs); @@ -1294,10 +1297,8 @@ pub const Context = struct { try self.startBlock(.block, blocktype, null); try self.emitWValue(target); - // cases must represent a constant of which its type is in the `typed_value_map` - // Therefore we can simply retrieve it. - const ty_val = Air.Inst.Ref.typed_value_map[@enumToInt(case.data.item)]; - try self.emitConstant(ty_val.val, target_ty); + const val = self.air.value(case.data.item).?; + try self.emitConstant(val, target_ty); const opcode = buildOpcode(.{ .valtype1 = valtype, .op = .ne, // not equal because we jump out the block if it does not match the condition -- cgit v1.2.3 From caa0de545e2f45a96ac3136178f478dab1c89ebd Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 19 Jul 2021 21:50:15 +0200 Subject: Resolve regressions - Get correct types in wasm backend. - `arg` is already a `Ref`, therefore simply use `@intToEnum`. - Fix regression in `zirBoolBr, where the order of insertion was incorrect. --- src/Sema.zig | 10 +++++----- src/codegen/wasm.zig | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/Sema.zig b/src/Sema.zig index d796ae2a5a..b3feeb8b1c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5295,11 +5295,6 @@ fn zirBoolBr( then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); - sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, - ); - sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); - const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), .else_body_len = @intCast(u32, else_block.instructions.items.len), @@ -5312,6 +5307,11 @@ fn zirBoolBr( .payload = cond_br_payload, } } }); + sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + ); + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); + try parent_block.instructions.append(gpa, block_inst); return Air.indexToRef(block_inst); } diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index b6edfb7b20..e72140d826 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -871,7 +871,7 @@ pub const Context = struct { }; for (args) |arg| { - const arg_val = self.resolveInst(Air.indexToRef(arg)); + const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); try self.emitWValue(arg_val); } @@ -959,7 +959,7 @@ pub const Context = struct { try self.emitWValue(lhs); try self.emitWValue(rhs); - const bin_ty = self.air.getRefType(bin_op.lhs); + const bin_ty = self.air.typeOf(bin_op.lhs); const opcode: wasm.Opcode = buildOpcode(.{ .op = op, .valtype1 = try self.typeToValtype(bin_ty), @@ -1179,7 +1179,7 @@ pub const Context = struct { const data: Air.Inst.Data = self.air.instructions.items(.data)[inst]; const lhs = self.resolveInst(data.bin_op.lhs); const rhs = self.resolveInst(data.bin_op.rhs); - const lhs_ty = self.air.getRefType(data.bin_op.lhs); + const lhs_ty = self.air.typeOf(data.bin_op.lhs); try self.emitWValue(lhs); try self.emitWValue(rhs); @@ -1211,7 +1211,7 @@ pub const Context = struct { const br = self.air.instructions.items(.data)[inst].br; // if operand has codegen bits we should break with a value - if (self.air.getRefType(br.operand).hasCodeGenBits()) { + if (self.air.typeOf(br.operand).hasCodeGenBits()) { try self.emitWValue(self.resolveInst(br.operand)); } @@ -1277,7 +1277,7 @@ pub const Context = struct { const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; const target = self.resolveInst(pl_op.operand); - const target_ty = self.air.getRefType(pl_op.operand); + const target_ty = self.air.typeOf(pl_op.operand); const valtype = try self.typeToValtype(target_ty); // result type is always 'noreturn' const blocktype = wasm.block_empty; -- cgit v1.2.3 From ea902ffe8f5f337b04f25b4efc69599db74d99ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 17:35:14 -0700 Subject: Sema: reimplement runtime switch Now supports multiple items pointing to the same body. This is a common pattern even when using a jump table, with multiple cases pointing to the same block of code. In the case of a range specified, the items are moved to branches in the else body. A future improvement may make it possible to have jump table items as well as ranges pointing to the same block of code. --- src/Air.zig | 3 +- src/Module.zig | 6 +- src/Sema.zig | 338 +++++++++++++++++++++++++++++---------------------- src/codegen/wasm.zig | 81 ++++++------ 4 files changed, 240 insertions(+), 188 deletions(-) (limited to 'src/codegen/wasm.zig') diff --git a/src/Air.zig b/src/Air.zig index 0e19202244..718123818b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -352,9 +352,10 @@ pub const SwitchBr = struct { else_body_len: u32, /// Trailing: + /// * item: Inst.Ref // for each `items_len`. /// * instruction index for each `body_len`. pub const Case = struct { - item: Inst.Ref, + items_len: u32, body_len: u32, }; }; diff --git a/src/Module.zig b/src/Module.zig index c101221f2e..9fadf67c6f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1300,6 +1300,10 @@ pub const Scope = struct { } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { + return Air.indexToRef(try block.addInstAsIndex(inst)); + } + + pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; @@ -1309,7 +1313,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Air.indexToRef(result_index); + return result_index; } }; }; diff --git a/src/Sema.zig b/src/Sema.zig index b3feeb8b1c..b9449157e2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4170,159 +4170,201 @@ fn analyzeSwitch( try sema.requireRuntimeBlock(block, src); - // TODO when reworking AIR memory layout make multi cases get generated as cases, - // not as part of the "else" block. - return mod.fail(&block.base, src, "TODO rework runtime switch Sema", .{}); - //const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); - - //var case_block = child_block.makeSubBlock(); - //case_block.runtime_loop = null; - //case_block.runtime_cond = operand.src; - //case_block.runtime_index += 1; - //defer case_block.instructions.deinit(gpa); - - //var extra_index: usize = special.end; - - //var scalar_i: usize = 0; - //while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - // const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; + var cases_extra: std.ArrayListUnmanaged(u32) = .{}; + defer cases_extra.deinit(gpa); - // case_block.instructions.shrinkRetainingCapacity(0); - // const item = sema.resolveInst(item_ref); - // // We validate these above; these two calls are guaranteed to succeed. - // const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; + try cases_extra.ensureTotalCapacity(gpa, (scalar_cases_len + multi_cases_len) * + @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2); - // _ = try sema.analyzeBody(&case_block, body); + var case_block = child_block.makeSubBlock(); + case_block.runtime_loop = null; + case_block.runtime_cond = operand_src; + case_block.runtime_index += 1; + defer case_block.instructions.deinit(gpa); - // cases[scalar_i] = .{ - // .item = item_val, - // .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, - // }; - //} + var extra_index: usize = special.end; - //var first_else_body: Body = undefined; - //var prev_condbr: ?*Inst.CondBr = null; + var scalar_i: usize = 0; + while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; - //var multi_i: usize = 0; - //while (multi_i < multi_cases_len) : (multi_i += 1) { - // const items_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const ranges_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const items = sema.code.refSlice(extra_index, items_len); - // extra_index += items_len; - - // case_block.instructions.shrinkRetainingCapacity(0); - - // var any_ok: ?Air.Inst.Index = null; - - // for (items) |item_ref| { - // const item = sema.resolveInst(item_ref); - // _ = try sema.resolveConstValue(&child_block, item.src, item); - - // const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); - // } else { - // any_ok = cmp_ok; - // } - // } - - // var range_i: usize = 0; - // while (range_i < ranges_len) : (range_i += 1) { - // const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - - // const item_first = sema.resolveInst(first_ref); - // const item_last = sema.resolveInst(last_ref); - - // _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); - // _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); - - // // operand >= first and operand <= last - // const range_first_ok = try case_block.addBinOp( - // .cmp_gte, - // operand, - // item_first, - // ); - // const range_last_ok = try case_block.addBinOp( - // .cmp_lte, - // operand, - // item_last, - // ); - // const range_ok = try case_block.addBinOp( - // .bool_and, - // range_first_ok, - // range_last_ok, - // ); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, range_ok); - // } else { - // any_ok = range_ok; - // } - // } - - // const new_condbr = try sema.arena.create(Inst.CondBr); - // new_condbr.* = .{ - // .base = .{ - // .tag = .condbr, - // .ty = Type.initTag(.noreturn), - // .src = src, - // }, - // .condition = any_ok.?, - // .then_body = undefined, - // .else_body = undefined, - // }; - // try case_block.instructions.append(gpa, &new_condbr.base); - - // const cond_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - - // case_block.instructions.shrinkRetainingCapacity(0); - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; - // _ = try sema.analyzeBody(&case_block, body); - // new_condbr.then_body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = cond_body; - // } else { - // first_else_body = cond_body; - // } - // prev_condbr = new_condbr; - //} - - //const final_else_body: Body = blk: { - // if (special.body.len != 0) { - // case_block.instructions.shrinkRetainingCapacity(0); - // _ = try sema.analyzeBody(&case_block, special.body); - // const else_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = else_body; - // break :blk first_else_body; - // } else { - // break :blk else_body; - // } - // } else { - // break :blk .{ .instructions = &.{} }; - // } - //}; - - //_ = try child_block.addSwitchBr(src, operand, cases, final_else_body); - //return sema.analyzeBlockBody(block, src, &child_block, merges); + case_block.instructions.shrinkRetainingCapacity(0); + const item = sema.resolveInst(item_ref); + // `item` is already guaranteed to be constant known. + + _ = try sema.analyzeBody(&case_block, body); + + try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } + + var is_first = true; + var prev_cond_br: Air.Inst.Index = undefined; + var first_else_body: []const Air.Inst.Index = &.{}; + defer gpa.free(first_else_body); + var prev_then_body: []const Air.Inst.Index = &.{}; + defer gpa.free(prev_then_body); + + var multi_i: usize = 0; + while (multi_i < multi_cases_len) : (multi_i += 1) { + const items_len = sema.code.extra[extra_index]; + extra_index += 1; + const ranges_len = sema.code.extra[extra_index]; + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const items = sema.code.refSlice(extra_index, items_len); + extra_index += items_len; + + case_block.instructions.shrinkRetainingCapacity(0); + + var any_ok: Air.Inst.Ref = .none; + + // If there are any ranges, we have to put all the items into the + // else prong. Otherwise, we can take advantage of multiple items + // mapping to the same body. + if (ranges_len == 0) { + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); + + try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + + case_block.instructions.items.len); + + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + } + + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } else { + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); + } else { + any_ok = cmp_ok; + } + } + + var range_i: usize = 0; + while (range_i < ranges_len) : (range_i += 1) { + const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); + + // operand >= first and operand <= last + const range_first_ok = try case_block.addBinOp( + .cmp_gte, + operand, + item_first, + ); + const range_last_ok = try case_block.addBinOp( + .cmp_lte, + operand, + item_last, + ); + const range_ok = try case_block.addBinOp( + .bool_and, + range_first_ok, + range_last_ok, + ); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); + } else { + any_ok = range_ok; + } + } + + const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ + .pl_op = .{ + .operand = any_ok, + .payload = undefined, + }, + } }); + var cond_body = case_block.instructions.toOwnedSlice(gpa); + defer gpa.free(cond_body); + + case_block.instructions.shrinkRetainingCapacity(0); + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); + + if (is_first) { + is_first = false; + first_else_body = cond_body; + cond_body = &.{}; + } else { + try sema.air_extra.ensureUnusedCapacity( + gpa, + @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, + ); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, cond_body.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(cond_body); + } + prev_then_body = case_block.instructions.toOwnedSlice(gpa); + prev_cond_br = new_cond_br; + } + } + + var final_else_body: []const Air.Inst.Index = &.{}; + if (special.body.len != 0) { + case_block.instructions.shrinkRetainingCapacity(0); + _ = try sema.analyzeBody(&case_block, special.body); + + if (is_first) { + final_else_body = case_block.instructions.items; + } else { + try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, case_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); + final_else_body = first_else_body; + } + } + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + + cases_extra.items.len); + + _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ + .operand = operand, + .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ + .cases_len = @intCast(u32, scalar_cases_len + multi_cases_len), + .else_body_len = @intCast(u32, final_else_body.len), + }), + } } }); + sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); + sema.air_extra.appendSliceAssumeCapacity(final_else_body); + + return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index e72140d826..41397f55f4 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -1282,44 +1282,49 @@ pub const Context = struct { // result type is always 'noreturn' const blocktype = wasm.block_empty; - const signedness: std.builtin.Signedness = blk: { - // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; - - // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(self.target).signedness; - }; - for (cases) |case_idx| { - const case = self.air.extraData(Air.SwitchBr.Case, case_idx); - const case_body = self.air.extra[case.end..][0..case.data.body_len]; - - // create a block for each case, when the condition does not match we break out of it - try self.startBlock(.block, blocktype, null); - try self.emitWValue(target); - - const val = self.air.value(case.data.item).?; - try self.emitConstant(val, target_ty); - const opcode = buildOpcode(.{ - .valtype1 = valtype, - .op = .ne, // not equal because we jump out the block if it does not match the condition - .signedness = signedness, - }); - try self.code.append(wasm.opcode(opcode)); - try self.code.append(wasm.opcode(.br_if)); - try leb.writeULEB128(self.code.writer(), @as(u32, 0)); - - // emit our block code - try self.genBody(case_body); - - // end the block we created earlier - try self.endBlock(); - } - - // finally, emit the else case if it exists. Here we will not have to - // check for a condition, so also no need to emit a block. - try self.genBody(else_body); - - return .none; + _ = valtype; + _ = blocktype; + _ = target; + _ = else_body; + return self.fail("TODO implement wasm codegen for switch", .{}); + //const signedness: std.builtin.Signedness = blk: { + // // by default we tell the operand type is unsigned (i.e. bools and enum values) + // if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + + // // incase of an actual integer, we emit the correct signedness + // break :blk target_ty.intInfo(self.target).signedness; + //}; + //for (cases) |case_idx| { + // const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + // const case_body = self.air.extra[case.end..][0..case.data.body_len]; + + // // create a block for each case, when the condition does not match we break out of it + // try self.startBlock(.block, blocktype, null); + // try self.emitWValue(target); + + // const val = self.air.value(case.data.item).?; + // try self.emitConstant(val, target_ty); + // const opcode = buildOpcode(.{ + // .valtype1 = valtype, + // .op = .ne, // not equal because we jump out the block if it does not match the condition + // .signedness = signedness, + // }); + // try self.code.append(wasm.opcode(opcode)); + // try self.code.append(wasm.opcode(.br_if)); + // try leb.writeULEB128(self.code.writer(), @as(u32, 0)); + + // // emit our block code + // try self.genBody(case_body); + + // // end the block we created earlier + // try self.endBlock(); + //} + + //// finally, emit the else case if it exists. Here we will not have to + //// check for a condition, so also no need to emit a block. + //try self.genBody(else_body); + + //return .none; } fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { -- cgit v1.2.3