diff options
| author | Luuk de Gram <luuk@degram.dev> | 2022-12-08 21:18:11 +0100 |
|---|---|---|
| committer | Luuk de Gram <luuk@degram.dev> | 2022-12-12 17:42:00 +0100 |
| commit | c6d654f73bbe80ed3653be6a31ddcaa4772a4fe2 (patch) | |
| tree | 04c166bceaea1df0044582297f0332291e23119e /src/arch/wasm/CodeGen.zig | |
| parent | db06eed7a3e741a319182b2e4edc889b83787962 (diff) | |
| download | zig-c6d654f73bbe80ed3653be6a31ddcaa4772a4fe2.tar.gz zig-c6d654f73bbe80ed3653be6a31ddcaa4772a4fe2.zip | |
wasm: implement the 'splat' instruction part 1
This implements `airSplat` for the native WebAssembly backend when
the features 'simd128' or 'relaxed-simd' are enabled. The commit
supports splat where the value lives in the linear memory segment,
as well as on the stack. This saves a lot of instruction cost.
When it detects the element type is not 8, 16, 32 or 64 bits,
the backend will instead use the same strategy as if the features
where disabled.
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
| -rw-r--r-- | src/arch/wasm/CodeGen.zig | 51 |
1 files changed, 49 insertions, 2 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 0164b17c0f..5e69860fbc 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4430,9 +4430,56 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); + const ty = func.air.typeOfIndex(inst); + const elem_ty = ty.childType(); - _ = operand; - return func.fail("TODO: Implement wasm airSplat", .{}); + const result = try func.allocLocal(ty); + if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + switch (operand) { + // when the operand lives in the linear memory section, we can directly + // load and splat the value at once. Meaning we do not first have to load + // the scalar value onto the stack. + .stack_offset, .memory, .memory_offset => { + const opcode = switch (elem_ty.bitSize(func.target)) { + 8 => std.wasm.simdOpcode(.v128_load8_splat), + 16 => std.wasm.simdOpcode(.v128_load16_splat), + 32 => std.wasm.simdOpcode(.v128_load32_splat), + 64 => std.wasm.simdOpcode(.v128_load64_splat), + else => break :blk, // Cannot make use of simd-instructions + }; + try func.emitWValue(operand); + // TODO: Add helper functions for simd opcodes + const extra_index = @intCast(u32, func.mir_extra.items.len); + // stores as := opcode, offset, alignment (opcode::memarg) + try func.mir_extra.appendSlice(func.gpa, &[_]u32{ + opcode, + operand.offset(), + elem_ty.abiAlignment(func.target), + }); + try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } }); + try func.addLabel(.local_set, result.local.value); + return func.finishAir(inst, result, &.{ty_op.operand}); + }, + .local => { + const opcode = switch (elem_ty.bitSize(func.target)) { + 8 => std.wasm.simdOpcode(.i8x16_splat), + 16 => std.wasm.simdOpcode(.i16x8_splat), + 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + else => break :blk, // Cannot make use of simd-instructions + }; + try func.emitWValue(operand); + const extra_index = @intCast(u32, func.mir_extra.items.len); + try func.mir_extra.append(func.gpa, opcode); + try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } }); + try func.addLabel(.local_set, result.local.value); + return func.finishAir(inst, result, &.{ty_op.operand}); + }, + else => unreachable, + } + } + + return func.fail("TODO: Implement wasm airSplat unrolled", .{}); } fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { |
