aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/spirv.zig
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2021-07-22 09:34:44 +0200
committerJakub Konka <kubkon@jakubkonka.com>2021-07-22 09:34:44 +0200
commitdef135918740846d9b206c3188563cb77333f3a9 (patch)
tree2d054e8bca8b70ab2d38e384ff4eaac665b86422 /src/codegen/spirv.zig
parentd0edd37f690c3e6cf3f8a7fc7a27016ba9b010ce (diff)
parent8d0671157cdf8bc8b89d047138b42227420a5388 (diff)
downloadzig-def135918740846d9b206c3188563cb77333f3a9.tar.gz
zig-def135918740846d9b206c3188563cb77333f3a9.zip
Merge remote-tracking branch 'origin/master' into zld-incremental-2
Diffstat (limited to 'src/codegen/spirv.zig')
-rw-r--r--src/codegen/spirv.zig492
1 files changed, 247 insertions, 245 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 7fa813e565..7429e3c3b0 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -12,21 +12,21 @@ const Decl = Module.Decl;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
const LazySrcLoc = Module.LazySrcLoc;
-const ir = @import("../air.zig");
-const Inst = ir.Inst;
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
pub const Word = u32;
pub const ResultId = u32;
pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage);
-pub const InstMap = std.AutoHashMap(*Inst, ResultId);
+pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId);
const IncomingBlock = struct {
src_label_id: ResultId,
break_value_id: ResultId,
};
-pub const BlockMap = std.AutoHashMap(*Inst.Block, struct {
+pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct {
label_id: ResultId,
incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock),
});
@@ -160,7 +160,11 @@ pub const DeclGen = struct {
/// The SPIR-V module code should be put in.
spv: *SPIRVModule,
- /// An array of function argument result-ids. Each index corresponds with the function argument of the same index.
+ air: Air,
+ liveness: Liveness,
+
+ /// An array of function argument result-ids. Each index corresponds with the
+ /// function argument of the same index.
args: std.ArrayList(ResultId),
/// A counter to keep track of how many `arg` instructions we've seen yet.
@@ -169,33 +173,35 @@ pub const DeclGen = struct {
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap,
- /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block.
+ /// We need to keep track of result ids for block labels, as well as the 'incoming'
+ /// blocks for a block.
blocks: BlockMap,
/// The label of the SPIR-V block we are currently generating.
current_block_label_id: ResultId,
- /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't
- /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array.
- /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated
- /// into spv.binary.fn_decls directly.
+ /// The actual instructions for this function. We need to declare all locals in
+ /// the first block, and because we don't know which locals there are going to be,
+ /// we're just going to generate everything after the locals-section in this array.
+ /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the
+ /// initial OpLabel. These will be generated into spv.binary.fn_decls directly.
code: std.ArrayList(Word),
/// The decl we are currently generating code for.
decl: *Decl,
- /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by
- /// `module.gpa`.
+ /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message.
+ /// Memory is owned by `module.gpa`.
error_msg: ?*Module.ErrorMsg,
/// Possible errors the `gen` function may return.
const Error = error{ AnalysisFail, OutOfMemory };
- /// This structure is used to return information about a type typically used for arithmetic operations.
- /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors,
- /// so we can easily represent those as arithmetic types.
- /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers
- /// to the vector's element type.
+ /// This structure is used to return information about a type typically used for
+ /// arithmetic operations. These types may either be integers, floats, or a vector
+ /// of these. Most scalar operations also work on vectors, so we can easily represent
+ /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the
+ /// scalar type. Otherwise, if its a vector, it refers to the vector's element type.
const ArithmeticTypeInfo = struct {
/// A classification of the inner type.
const Class = enum {
@@ -207,13 +213,14 @@ pub const DeclGen = struct {
/// the relevant capability is enabled).
integer,
- /// A regular float. These are all required to be natively supported. Floating points for
- /// which the relevant capability is not enabled are not emulated.
+ /// A regular float. These are all required to be natively supported. Floating points
+ /// for which the relevant capability is not enabled are not emulated.
float,
- /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this
- /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still
- /// within the limits of the largest natively supported integer type.
+ /// An integer of a 'strange' size (which' bit size is not the same as its backing
+ /// type. **Note**: this may **also** include power-of-2 integers for which the
+ /// relevant capability is not enabled), but still within the limits of the largest
+ /// natively supported integer type.
strange_integer,
/// An integer with more bits than the largest natively supported integer type.
@@ -221,7 +228,7 @@ pub const DeclGen = struct {
};
/// The number of bits in the inner type.
- /// Note: this is the actual number of bits of the type, not the size of the backing integer.
+ /// This is the actual number of bits of the type, not the size of the backing integer.
bits: u16,
/// Whether the type is a vector.
@@ -235,10 +242,13 @@ pub const DeclGen = struct {
class: Class,
};
- /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called.
+ /// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
+ /// only set when `gen` is called.
pub fn init(spv: *SPIRVModule) DeclGen {
return .{
.spv = spv,
+ .air = undefined,
+ .liveness = undefined,
.args = std.ArrayList(ResultId).init(spv.gpa),
.next_arg_index = undefined,
.inst_results = InstMap.init(spv.gpa),
@@ -251,10 +261,12 @@ pub const DeclGen = struct {
}
/// Generate the code for `decl`. If a reportable error occured during code generation,
- /// a message is returned by this function. Callee owns the memory. If this function returns such
- /// a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg {
+ /// a message is returned by this function. Callee owns the memory. If this function
+ /// returns such a reportable error, it is valid to be called again for a different decl.
+ pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
+ self.air = air;
+ self.liveness = liveness;
self.args.items.len = 0;
self.next_arg_index = 0;
self.inst_results.clearRetainingCapacity();
@@ -280,19 +292,20 @@ pub const DeclGen = struct {
return self.spv.module.getTarget();
}
- fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error {
+ fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
+ const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLocWithDecl(self.decl);
self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args);
return error.AnalysisFail;
}
- fn resolve(self: *DeclGen, inst: *Inst) !ResultId {
- if (inst.value()) |val| {
- return self.genConstant(inst.src, inst.ty, val);
+ fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId {
+ if (self.air.value(inst)) |val| {
+ return self.genConstant(self.air.typeOf(inst), val);
}
-
- return self.inst_results.get(inst).?; // Instruction does not dominate all uses!
+ const index = Air.refToIndex(inst).?;
+ return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void {
@@ -314,7 +327,7 @@ pub const DeclGen = struct {
const target = self.getTarget();
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
- std.debug.assert(bits != 0);
+ assert(bits != 0);
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
@@ -388,19 +401,19 @@ pub const DeclGen = struct {
.composite_integer };
},
// As of yet, there is no vector support in the self-hosted compiler.
- .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
+ .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
- else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
+ else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
};
}
/// Generate a constant representing `val`.
/// TODO: Deduplication?
- fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId {
+ fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId {
const target = self.getTarget();
const code = &self.spv.binary.types_globals_constants;
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(src, ty);
+ const result_type_id = try self.genType(ty);
if (val.isUndef()) {
try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id });
@@ -412,13 +425,13 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
+ return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
};
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
// SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
// might need to be updated.
- std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
+ assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
// Mask the low bits which make up the actual integer. This is to make sure that negative values
@@ -470,13 +483,13 @@ pub const DeclGen = struct {
}
},
.Void => unreachable,
- else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}),
+ else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
}
return result_id;
}
- fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId {
+ fn genType(self: *DeclGen, ty: Type) Error!ResultId {
// We can't use getOrPut here so we can recursively generate types.
if (self.spv.types.get(ty)) |already_generated| {
return already_generated;
@@ -493,7 +506,7 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty});
+ return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
};
// TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
@@ -519,7 +532,7 @@ pub const DeclGen = struct {
};
if (!supported) {
- return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
+ return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits });
@@ -527,19 +540,19 @@ pub const DeclGen = struct {
.Fn => {
// We only support zig-calling-convention functions, no varargs.
if (ty.fnCallingConvention() != .Unspecified)
- return self.fail(src, "Unsupported calling convention for SPIR-V", .{});
+ return self.fail("Unsupported calling convention for SPIR-V", .{});
if (ty.fnIsVarArgs())
- return self.fail(src, "VarArgs unsupported for SPIR-V", .{});
+ return self.fail("VarArgs unsupported for SPIR-V", .{});
// In order to avoid a temporary here, first generate all the required types and then simply look them up
// when generating the function type.
const params = ty.fnParamLen();
var i: usize = 0;
while (i < params) : (i += 1) {
- _ = try self.genType(src, ty.fnParamType(i));
+ _ = try self.genType(ty.fnParamType(i));
}
- const return_type_id = try self.genType(src, ty.fnReturnType());
+ const return_type_id = try self.genType(ty.fnReturnType());
// result id + result type id + parameter type ids.
try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
@@ -552,7 +565,7 @@ pub const DeclGen = struct {
}
},
// When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
- .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}),
+ .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}),
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
// which work on them), so simply use those.
@@ -562,7 +575,7 @@ pub const DeclGen = struct {
// is adequate at all for this.
// TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
- return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{});
+ return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
},
.Null,
.Undefined,
@@ -574,7 +587,7 @@ pub const DeclGen = struct {
.BoundFn => unreachable, // this type will be deleted from the language.
- else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}),
+ else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
}
try self.spv.types.putNoClobber(ty, result_id);
@@ -583,8 +596,8 @@ pub const DeclGen = struct {
/// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
/// TODO: The result of this needs to be cached.
- fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId {
- std.debug.assert(ty.zigTypeTag() == .Pointer);
+ fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId {
+ assert(ty.zigTypeTag() == .Pointer);
const code = &self.spv.binary.types_globals_constants;
const result_id = self.spv.allocResultId();
@@ -592,7 +605,7 @@ pub const DeclGen = struct {
// TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
// if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
// These also relates to the pointer's address space.
- const child_id = try self.genType(src, ty.elemType());
+ const child_id = try self.genType(ty.elemType());
try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id });
@@ -603,9 +616,9 @@ pub const DeclGen = struct {
const decl = self.decl;
const result_id = decl.fn_link.spirv.id;
- if (decl.val.castTag(.function)) |func_payload| {
- std.debug.assert(decl.ty.zigTypeTag() == .Fn);
- const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty);
+ if (decl.val.castTag(.function)) |_| {
+ assert(decl.ty.zigTypeTag() == .Fn);
+ const prototype_id = try self.genType(decl.ty);
try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{
self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
result_id,
@@ -632,189 +645,171 @@ pub const DeclGen = struct {
try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id});
self.current_block_label_id = root_block_id;
- try self.genBody(func_payload.data.body);
+ const main_body = self.air.getMainBody();
+ try self.genBody(main_body);
// Append the actual code into the fn_decls section.
try self.spv.binary.fn_decls.appendSlice(self.code.items);
try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{});
} else {
- return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
+ return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
}
}
- fn genBody(self: *DeclGen, body: ir.Body) Error!void {
- for (body.instructions) |inst| {
+ fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void {
+ for (body) |inst| {
try self.genInst(inst);
}
}
- fn genInst(self: *DeclGen, inst: *Inst) !void {
- const result_id = switch (inst.tag) {
- .add, .addwrap => try self.genBinOp(inst.castTag(.add).?),
- .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?),
- .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?),
- .div => try self.genBinOp(inst.castTag(.div).?),
- .bit_and => try self.genBinOp(inst.castTag(.bit_and).?),
- .bit_or => try self.genBinOp(inst.castTag(.bit_or).?),
- .xor => try self.genBinOp(inst.castTag(.xor).?),
- .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?),
- .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?),
- .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?),
- .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?),
- .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?),
- .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?),
- .bool_and => try self.genBinOp(inst.castTag(.bool_and).?),
- .bool_or => try self.genBinOp(inst.castTag(.bool_or).?),
- .not => try self.genUnOp(inst.castTag(.not).?),
- .alloc => try self.genAlloc(inst.castTag(.alloc).?),
- .arg => self.genArg(),
- .block => (try self.genBlock(inst.castTag(.block).?)) orelse return,
- .br => return try self.genBr(inst.castTag(.br).?),
- .br_void => return try self.genBrVoid(inst.castTag(.br_void).?),
- // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them
- // throughout the IR.
+ fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const air_tags = self.air.instructions.items(.tag);
+ const result_id = switch (air_tags[inst]) {
+ // zig fmt: off
+ .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
+ .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
+ .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
+ .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
+
+ .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
+ .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
+ .xor => try self.airBinOpSimple(inst, .OpBitwiseXor),
+ .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd),
+ .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr),
+
+ .not => try self.airNot(inst),
+
+ .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
+ .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
+ .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
+ .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
+ .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
+ .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
+
+ .arg => self.airArg(),
+ .alloc => try self.airAlloc(inst),
+ .block => (try self.airBlock(inst)) orelse return,
+ .load => try self.airLoad(inst),
+
+ .br => return self.airBr(inst),
.breakpoint => return,
- .condbr => return try self.genCondBr(inst.castTag(.condbr).?),
- .constant => unreachable,
- .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?),
- .load => try self.genLoad(inst.castTag(.load).?),
- .loop => return try self.genLoop(inst.castTag(.loop).?),
- .ret => return try self.genRet(inst.castTag(.ret).?),
- .retvoid => return try self.genRetVoid(),
- .store => return try self.genStore(inst.castTag(.store).?),
- .unreach => return try self.genUnreach(),
- else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}),
+ .cond_br => return self.airCondBr(inst),
+ .constant => unreachable,
+ .dbg_stmt => return self.airDbgStmt(inst),
+ .loop => return self.airLoop(inst),
+ .ret => return self.airRet(inst),
+ .store => return self.airStore(inst),
+ .unreach => return self.airUnreach(),
+ // zig fmt: on
+
+ else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{
+ @tagName(tag),
+ }),
};
try self.inst_results.putNoClobber(inst, result_id);
}
- fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
- // TODO: Will lhs and rhs have the same type?
- const lhs_id = try self.resolve(inst.lhs);
- const rhs_id = try self.resolve(inst.rhs);
+ fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
+ const result_id = self.spv.allocResultId();
+ const result_type_id = try self.genType(self.air.typeOfIndex(inst));
+ try writeInstruction(&self.code, opcode, &[_]Word{
+ result_type_id, result_id, lhs_id, rhs_id,
+ });
+ return result_id;
+ }
+
+ fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ // LHS and RHS are guaranteed to have the same type, and AIR guarantees
+ // the result to be the same as the LHS and RHS, which matches SPIR-V.
+ const ty = self.air.typeOfIndex(inst);
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- // TODO: Is the result the same as the argument types?
- // This is supposed to be the case for SPIR-V.
- std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
- std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty));
-
- // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float
- // versions of operations require different opcodes.
- // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand
- // instead.
- const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
- if (info.class == .composite_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
- } else if (info.class == .strange_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{});
- }
+ const result_type_id = try self.genType(ty);
+
+ assert(self.air.typeOf(bin_op.lhs).eql(ty));
+ assert(self.air.typeOf(bin_op.rhs).eql(ty));
+
+ // Binary operations are generally applicable to both scalar and vector operations
+ // in SPIR-V, but int and float versions of operations require different opcodes.
+ const info = try self.arithmeticTypeInfo(ty);
- const is_float = info.class == .float;
- const is_signed = info.signedness == .signed;
- // **Note**: All these operations must be valid for vectors as well!
- const opcode = switch (inst.base.tag) {
- // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers,
- // we can just switch on both cases here.
- .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd,
- .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub,
- .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul,
- // TODO: Trap if divisor is 0?
- // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful.
- // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those.
- // => TODO: Figure out how those work on the SPIR-V side.
- // => TODO: Test these.
- .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv,
- // Only integer versions for these.
- .bit_and => Opcode.OpBitwiseAnd,
- .bit_or => Opcode.OpBitwiseOr,
- .xor => Opcode.OpBitwiseXor,
- // Bool -> bool operations.
- .bool_and => Opcode.OpLogicalAnd,
- .bool_or => Opcode.OpLogicalOr,
+ const opcode_index: usize = switch (info.class) {
+ .composite_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ },
+ .strange_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
+ },
+ .integer => switch (info.signedness) {
+ .signed => @as(usize, 1),
+ .unsigned => @as(usize, 2),
+ },
+ .float => 0,
else => unreachable,
};
-
+ const opcode = ops[opcode_index];
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
// TODO: Trap on overflow? Probably going to be annoying.
// TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
- if (info.class != .strange_integer)
- return result_id;
-
- return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{});
+ return result_id;
}
- fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
- const lhs_id = try self.resolve(inst.lhs);
- const rhs_id = try self.resolve(inst.rhs);
-
+ fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- // All of these operations should be 2 equal types -> bool
- std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
- std.debug.assert(inst.base.ty.tag() == .bool);
-
- // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float
- // versions of operations require different opcodes.
- // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info
- // from either of the operands.
- const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
- if (info.class == .composite_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
- } else if (info.class == .strange_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{});
- }
+ const result_type_id = try self.genType(Type.initTag(.bool));
+ const op_ty = self.air.typeOf(bin_op.lhs);
+ assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
- const is_bool = info.class == .bool;
- const is_float = info.class == .float;
- const is_signed = info.signedness == .signed;
-
- // **Note**: All these operations must be valid for vectors as well!
- // For floating points, we generally want ordered operations (which return false if either operand is nan).
- const opcode = switch (inst.base.tag) {
- .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual,
- .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual,
- // TODO: Verify that these OpFOrd type operations produce the right value.
- // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type?
- .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan,
- .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual,
- .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan,
- .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual,
- else => unreachable,
+ // Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
+ // but int and float versions of operations require different opcodes.
+ const info = try self.arithmeticTypeInfo(op_ty);
+
+ const opcode_index: usize = switch (info.class) {
+ .composite_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ },
+ .strange_integer => {
+ return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
+ },
+ .float => 0,
+ .bool => 1,
+ .integer => switch (info.signedness) {
+ .signed => @as(usize, 1),
+ .unsigned => @as(usize, 2),
+ },
};
+ const opcode = ops[opcode_index];
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
return result_id;
}
- fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
- const operand_id = try self.resolve(inst.operand);
-
+ fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- const opcode = switch (inst.base.tag) {
- // Bool -> bool
- .not => Opcode.OpLogicalNot,
- else => unreachable,
- };
-
+ const result_type_id = try self.genType(Type.initTag(.bool));
+ const opcode: Opcode = .OpLogicalNot;
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id });
-
return result_id;
}
- fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId {
+ fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty = self.air.typeOfIndex(inst);
const storage_class = spec.StorageClass.Function;
- const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class);
+ const result_type_id = try self.genPointerType(ty, storage_class);
const result_id = self.spv.allocResultId();
// Rather than generating into code here, we're just going to generate directly into the fn_decls section so that
@@ -824,12 +819,12 @@ pub const DeclGen = struct {
return result_id;
}
- fn genArg(self: *DeclGen) ResultId {
+ fn airArg(self: *DeclGen) ResultId {
defer self.next_arg_index += 1;
return self.args.items[self.next_arg_index];
}
- fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId {
+ fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
// In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
// "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
@@ -849,11 +844,16 @@ pub const DeclGen = struct {
incoming_blocks.deinit(self.spv.gpa);
}
- try self.genBody(inst.body);
+ const ty = self.air.typeOfIndex(inst);
+ const inst_datas = self.air.instructions.items(.data);
+ const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+ try self.genBody(body);
try self.beginSPIRVBlock(label_id);
// If this block didn't produce a value, simply return here.
- if (!inst.base.ty.hasCodeGenBits())
+ if (!ty.hasCodeGenBits())
return null;
// Combine the result from the blocks using the Phi instruction.
@@ -863,7 +863,7 @@ pub const DeclGen = struct {
// TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
// are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
// an error for pointers.
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+ const result_type_id = try self.genType(ty);
_ = result_type_id;
try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
@@ -875,30 +875,26 @@ pub const DeclGen = struct {
return result_id;
}
- fn genBr(self: *DeclGen, inst: *Inst.Br) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const target = self.blocks.get(inst.block).?;
+ fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const br = self.air.instructions.items(.data)[inst].br;
+ const block = self.blocks.get(br.block_inst).?;
+ const operand_ty = self.air.typeOf(br.operand);
- // TODO: For some reason, br is emitted with void parameters.
- if (inst.operand.ty.hasCodeGenBits()) {
- const operand_id = try self.resolve(inst.operand);
+ if (operand_ty.hasCodeGenBits()) {
+ const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
- try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
+ try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
}
- try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
- }
-
- fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const target = self.blocks.get(inst.block).?;
- // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway.
- try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
+ try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
}
- fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const condition_id = try self.resolve(inst.condition);
+ fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len];
+ const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
+ const condition_id = try self.resolve(pl_op.operand);
// These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
const then_label_id = self.spv.allocResultId();
@@ -914,23 +910,26 @@ pub const DeclGen = struct {
});
try self.beginSPIRVBlock(then_label_id);
- try self.genBody(inst.then_body);
+ try self.genBody(then_body);
try self.beginSPIRVBlock(else_label_id);
- try self.genBody(inst.else_body);
+ try self.genBody(else_body);
}
- fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void {
+ fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
- try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column });
+ try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
}
- fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
- const operand_id = try self.resolve(inst.operand);
+ fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
+ const ty = self.air.typeOfIndex(inst);
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+ const result_type_id = try self.genType(ty);
const result_id = self.spv.allocResultId();
- const operands = if (inst.base.ty.isVolatilePtr())
+ const operands = if (ty.isVolatilePtr())
&[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
else
&[_]Word{ result_type_id, result_id, operand_id };
@@ -940,8 +939,10 @@ pub const DeclGen = struct {
return result_id;
}
- fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
+ fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const loop = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[loop.end..][0..loop.data.body_len];
const loop_label_id = self.spv.allocResultId();
// Jump to the loop entry point
@@ -950,27 +951,29 @@ pub const DeclGen = struct {
// TODO: Look into OpLoopMerge.
try self.beginSPIRVBlock(loop_label_id);
- try self.genBody(inst.body);
+ try self.genBody(body);
try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
}
- fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void {
- const operand_id = try self.resolve(inst.operand);
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
- }
-
- fn genRetVoid(self: *DeclGen) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+ fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const operand = self.air.instructions.items(.data)[inst].un_op;
+ const operand_ty = self.air.typeOf(operand);
+ if (operand_ty.hasCodeGenBits()) {
+ const operand_id = try self.resolve(operand);
+ try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
+ } else {
+ try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+ }
}
- fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void {
- const dst_ptr_id = try self.resolve(inst.lhs);
- const src_val_id = try self.resolve(inst.rhs);
+ fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const dst_ptr_id = try self.resolve(bin_op.lhs);
+ const src_val_id = try self.resolve(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
- const operands = if (inst.lhs.ty.isVolatilePtr())
+ const operands = if (lhs_ty.isVolatilePtr())
&[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
else
&[_]Word{ dst_ptr_id, src_val_id };
@@ -978,8 +981,7 @@ pub const DeclGen = struct {
try writeInstruction(&self.code, .OpStore, operands);
}
- fn genUnreach(self: *DeclGen) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
+ fn airUnreach(self: *DeclGen) !void {
try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
}
};