aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig712
-rw-r--r--src/codegen/llvm.zig473
-rw-r--r--src/codegen/spirv.zig492
-rw-r--r--src/codegen/wasm.zig535
4 files changed, 1254 insertions, 958 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 391375c709..71714cc1b8 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -6,8 +6,6 @@ const log = std.log.scoped(.c);
const link = @import("../link.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
-const ir = @import("../air.zig");
-const Inst = ir.Inst;
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
@@ -15,6 +13,9 @@ const C = link.File.C;
const Decl = Module.Decl;
const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
+const Air = @import("../Air.zig");
+const Zir = @import("../Zir.zig");
+const Liveness = @import("../Liveness.zig");
const Mutability = enum { Const, Mut };
@@ -25,7 +26,7 @@ pub const CValue = union(enum) {
/// Index into local_names, but take the address.
local_ref: usize,
/// A constant instruction, to be rendered inline.
- constant: *Inst,
+ constant: Air.Inst.Ref,
/// Index into the parameters
arg: usize,
/// By-value
@@ -38,7 +39,7 @@ const BlockData = struct {
result: CValue,
};
-pub const CValueMap = std.AutoHashMap(*Inst, CValue);
+pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue);
pub const TypedefMap = std.ArrayHashMap(
Type,
struct { name: []const u8, rendered: []u8 },
@@ -94,20 +95,23 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
/// It is not available when generating .h file.
pub const Object = struct {
dg: DeclGen,
+ air: Air,
+ liveness: Liveness,
gpa: *mem.Allocator,
code: std.ArrayList(u8),
value_map: CValueMap,
- blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{},
+ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
next_arg_index: usize = 0,
next_local_index: usize = 0,
next_block_index: usize = 0,
indent_writer: IndentWriter(std.ArrayList(u8).Writer),
- fn resolveInst(o: *Object, inst: *Inst) !CValue {
- if (inst.value()) |_| {
+ fn resolveInst(o: *Object, inst: Air.Inst.Ref) !CValue {
+ if (o.air.value(inst)) |_| {
return CValue{ .constant = inst };
}
- return o.value_map.get(inst).?; // Instruction does not dominate all uses!
+ const index = Air.refToIndex(inst).?;
+ return o.value_map.get(index).?; // Assertion means instruction does not dominate usage.
}
fn allocLocalValue(o: *Object) CValue {
@@ -131,7 +135,11 @@ pub const Object = struct {
.none => unreachable,
.local => |i| return w.print("t{d}", .{i}),
.local_ref => |i| return w.print("&t{d}", .{i}),
- .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?),
+ .constant => |inst| {
+ const ty = o.air.typeOf(inst);
+ const val = o.air.value(inst).?;
+ return o.dg.renderValue(w, ty, val);
+ },
.arg => |i| return w.print("a{d}", .{i}),
.decl => |decl| return w.writeAll(mem.span(decl.name)),
.decl_ref => |decl| return w.print("&{s}", .{decl.name}),
@@ -211,8 +219,9 @@ pub const DeclGen = struct {
error_msg: ?*Module.ErrorMsg,
typedefs: TypedefMap,
- fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
+ fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
+ const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLocWithDecl(dg.decl);
dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args);
return error.AnalysisFail;
@@ -228,7 +237,7 @@ pub const DeclGen = struct {
// This should lower to 0xaa bytes in safe modes, and for unsafe modes should
// lower to leaving variables uninitialized (that might need to be implemented
// outside of this function).
- return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{});
+ return dg.fail("TODO: C backend: implement renderValue undef", .{});
}
switch (t.zigTypeTag()) {
.Int => {
@@ -438,7 +447,7 @@ pub const DeclGen = struct {
},
else => unreachable,
},
- else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{
+ else => |e| return dg.fail("TODO: C backend: implement value {s}", .{
@tagName(e),
}),
}
@@ -517,14 +526,14 @@ pub const DeclGen = struct {
break;
}
} else {
- return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{});
+ return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
}
},
else => unreachable,
}
},
- .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}),
+ .Float => return dg.fail("TODO: C backend: implement type Float", .{}),
.Pointer => {
if (t.isSlice()) {
@@ -679,7 +688,7 @@ pub const DeclGen = struct {
try dg.renderType(w, int_tag_ty);
},
- .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}),
+ .Union => return dg.fail("TODO: C backend: implement type Union", .{}),
.Fn => {
try dg.renderType(w, t.fnReturnType());
try w.writeAll(" (*)(");
@@ -702,10 +711,10 @@ pub const DeclGen = struct {
}
try w.writeByte(')');
},
- .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}),
- .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}),
- .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}),
- .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}),
+ .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}),
+ .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}),
+ .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}),
+ .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}),
.Null,
.Undefined,
@@ -758,7 +767,8 @@ pub fn genDecl(o: *Object) !void {
try o.dg.renderFunctionSignature(o.writer(), is_global);
try o.writer().writeByte(' ');
- try genBody(o, func.body);
+ const main_body = o.air.getMainBody();
+ try genBody(o, main_body);
try o.indent_writer.insertNewline();
return;
@@ -831,9 +841,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
}
}
-pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void {
+fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
const writer = o.writer();
- if (body.instructions.len == 0) {
+ if (body.len == 0) {
try writer.writeAll("{}");
return;
}
@@ -841,82 +851,92 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
try writer.writeAll("{\n");
o.indent_writer.pushIndent();
- for (body.instructions) |inst| {
- const result_value = switch (inst.tag) {
+ const air_tags = o.air.instructions.items(.tag);
+
+ for (body) |inst| {
+ const result_value = switch (air_tags[inst]) {
+ // zig fmt: off
+ .constant => unreachable, // excluded from function bodies
+ .const_ty => unreachable, // excluded from function bodies
+ .arg => airArg(o),
+
+ .breakpoint => try airBreakpoint(o),
+ .unreach => try airUnreach(o),
+
// TODO use a different strategy for add that communicates to the optimizer
// that wrapping is UB.
- .add => try genBinOp(o, inst.castTag(.add).?, " + "),
- .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"),
+ .add => try airBinOp( o, inst, " + "),
+ .addwrap => try airWrapOp(o, inst, " + ", "addw_"),
// TODO use a different strategy for sub that communicates to the optimizer
// that wrapping is UB.
- .sub => try genBinOp(o, inst.castTag(.sub).?, " - "),
- .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"),
+ .sub => try airBinOp( o, inst, " - "),
+ .subwrap => try airWrapOp(o, inst, " - ", "subw_"),
// TODO use a different strategy for mul that communicates to the optimizer
// that wrapping is UB.
- .mul => try genBinOp(o, inst.castTag(.sub).?, " * "),
- .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"),
+ .mul => try airBinOp( o, inst, " * "),
+ .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try genBinOp(o, inst.castTag(.div).?, " / "),
+ .div => try airBinOp( o, inst, " / "),
+
+ .cmp_eq => try airBinOp(o, inst, " == "),
+ .cmp_gt => try airBinOp(o, inst, " > "),
+ .cmp_gte => try airBinOp(o, inst, " >= "),
+ .cmp_lt => try airBinOp(o, inst, " < "),
+ .cmp_lte => try airBinOp(o, inst, " <= "),
+ .cmp_neq => try airBinOp(o, inst, " != "),
- .constant => unreachable, // excluded from function bodies
- .alloc => try genAlloc(o, inst.castTag(.alloc).?),
- .arg => genArg(o),
- .assembly => try genAsm(o, inst.castTag(.assembly).?),
- .block => try genBlock(o, inst.castTag(.block).?),
- .bitcast => try genBitcast(o, inst.castTag(.bitcast).?),
- .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?),
- .call => try genCall(o, inst.castTag(.call).?),
- .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "),
- .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "),
- .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "),
- .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "),
- .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "),
- .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "),
- .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?),
- .intcast => try genIntCast(o, inst.castTag(.intcast).?),
- .load => try genLoad(o, inst.castTag(.load).?),
- .ret => try genRet(o, inst.castTag(.ret).?),
- .retvoid => try genRetVoid(o),
- .store => try genStore(o, inst.castTag(.store).?),
- .unreach => try genUnreach(o, inst.castTag(.unreach).?),
- .loop => try genLoop(o, inst.castTag(.loop).?),
- .condbr => try genCondBr(o, inst.castTag(.condbr).?),
- .br => try genBr(o, inst.castTag(.br).?),
- .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block),
- .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?),
// bool_and and bool_or are non-short-circuit operations
- .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "),
- .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "),
- .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "),
- .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "),
- .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "),
- .not => try genUnOp(o, inst.castTag(.not).?, "!"),
- .is_null => try genIsNull(o, inst.castTag(.is_null).?),
- .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?),
- .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?),
- .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?),
- .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?),
- .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?),
- .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?),
- .ref => try genRef(o, inst.castTag(.ref).?),
- .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?),
-
- .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="),
- .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="),
- .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="),
- .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="),
-
- .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?),
- .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?),
- .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?),
- .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?),
- .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?),
- .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?),
- .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}),
- .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}),
- .varptr => try genVarPtr(o, inst.castTag(.varptr).?),
- .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}),
+ .bool_and => try airBinOp(o, inst, " & "),
+ .bool_or => try airBinOp(o, inst, " | "),
+ .bit_and => try airBinOp(o, inst, " & "),
+ .bit_or => try airBinOp(o, inst, " | "),
+ .xor => try airBinOp(o, inst, " ^ "),
+
+ .not => try airNot( o, inst),
+
+ .optional_payload => try airOptionalPayload(o, inst),
+ .optional_payload_ptr => try airOptionalPayload(o, inst),
+
+ .is_err => try airIsErr(o, inst, "", ".", "!="),
+ .is_non_err => try airIsErr(o, inst, "", ".", "=="),
+ .is_err_ptr => try airIsErr(o, inst, "*", "->", "!="),
+ .is_non_err_ptr => try airIsErr(o, inst, "*", "->", "=="),
+
+ .is_null => try airIsNull(o, inst, "==", ""),
+ .is_non_null => try airIsNull(o, inst, "!=", ""),
+ .is_null_ptr => try airIsNull(o, inst, "==", "[0]"),
+ .is_non_null_ptr => try airIsNull(o, inst, "!=", "[0]"),
+
+ .alloc => try airAlloc(o, inst),
+ .assembly => try airAsm(o, inst),
+ .block => try airBlock(o, inst),
+ .bitcast => try airBitcast(o, inst),
+ .call => try airCall(o, inst),
+ .dbg_stmt => try airDbgStmt(o, inst),
+ .intcast => try airIntCast(o, inst),
+ .load => try airLoad(o, inst),
+ .ret => try airRet(o, inst),
+ .store => try airStore(o, inst),
+ .loop => try airLoop(o, inst),
+ .cond_br => try airCondBr(o, inst),
+ .br => try airBr(o, inst),
+ .switch_br => try airSwitchBr(o, inst),
+ .wrap_optional => try airWrapOptional(o, inst),
+ .ref => try airRef(o, inst),
+ .struct_field_ptr => try airStructFieldPtr(o, inst),
+ .varptr => try airVarPtr(o, inst),
+
+ .unwrap_errunion_payload => try airUnwrapErrUnionPay(o, inst),
+ .unwrap_errunion_err => try airUnwrapErrUnionErr(o, inst),
+ .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(o, inst),
+ .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(o, inst),
+ .wrap_errunion_payload => try airWrapErrUnionPay(o, inst),
+ .wrap_errunion_err => try airWrapErrUnionErr(o, inst),
+
+ .ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}),
+ .floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}),
+ // zig fmt: on
};
switch (result_value) {
.none => {},
@@ -928,38 +948,40 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
try writer.writeAll("}");
}
-fn genVarPtr(o: *Object, inst: *Inst.VarPtr) !CValue {
- _ = o;
- return CValue{ .decl_ref = inst.variable.owner_decl };
+fn airVarPtr(o: *Object, inst: Air.Inst.Index) !CValue {
+ const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
+ const variable = o.air.variables[ty_pl.payload];
+ return CValue{ .decl_ref = variable.owner_decl };
}
-fn genAlloc(o: *Object, alloc: *Inst.NoOp) !CValue {
+fn airAlloc(o: *Object, inst: Air.Inst.Index) !CValue {
const writer = o.writer();
+ const inst_ty = o.air.typeOfIndex(inst);
// First line: the variable used as data storage.
- const elem_type = alloc.base.ty.elemType();
- const mutability: Mutability = if (alloc.base.ty.isConstPtr()) .Const else .Mut;
+ const elem_type = inst_ty.elemType();
+ const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut;
const local = try o.allocLocal(elem_type, mutability);
try writer.writeAll(";\n");
return CValue{ .local_ref = local.local };
}
-fn genArg(o: *Object) CValue {
+fn airArg(o: *Object) CValue {
const i = o.next_arg_index;
o.next_arg_index += 1;
return .{ .arg = i };
}
-fn genRetVoid(o: *Object) !CValue {
- try o.writer().print("return;\n", .{});
- return CValue.none;
-}
-
-fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue {
- const operand = try o.resolveInst(inst.operand);
+fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue {
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr();
+ if (!is_volatile and o.liveness.isUnused(inst))
+ return CValue.none;
+ const inst_ty = o.air.typeOfIndex(inst);
+ const operand = try o.resolveInst(ty_op.operand);
const writer = o.writer();
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const local = try o.allocLocal(inst_ty, .Const);
switch (operand) {
.local_ref => |i| {
const wrapped: CValue = .{ .local = i };
@@ -982,35 +1004,43 @@ fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genRet(o: *Object, inst: *Inst.UnOp) !CValue {
- const operand = try o.resolveInst(inst.operand);
+fn airRet(o: *Object, inst: Air.Inst.Index) !CValue {
+ const un_op = o.air.instructions.items(.data)[inst].un_op;
const writer = o.writer();
- try writer.writeAll("return ");
- try o.writeCValue(writer, operand);
- try writer.writeAll(";\n");
+ if (o.air.typeOf(un_op).hasCodeGenBits()) {
+ const operand = try o.resolveInst(un_op);
+ try writer.writeAll("return ");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ } else {
+ try writer.writeAll("return;\n");
+ }
return CValue.none;
}
-fn genIntCast(o: *Object, inst: *Inst.UnOp) !CValue {
- if (inst.base.isUnused())
+fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
- const from = try o.resolveInst(inst.operand);
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const from = try o.resolveInst(ty_op.operand);
const writer = o.writer();
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
- try o.dg.renderType(writer, inst.base.ty);
+ try o.dg.renderType(writer, inst_ty);
try writer.writeAll(")");
try o.writeCValue(writer, from);
try writer.writeAll(";\n");
return local;
}
-fn genStore(o: *Object, inst: *Inst.BinOp) !CValue {
+fn airStore(o: *Object, inst: Air.Inst.Index) !CValue {
// *a = b;
- const dest_ptr = try o.resolveInst(inst.lhs);
- const src_val = try o.resolveInst(inst.rhs);
+ const bin_op = o.air.instructions.items(.data)[inst].bin_op;
+ const dest_ptr = try o.resolveInst(bin_op.lhs);
+ const src_val = try o.resolveInst(bin_op.rhs);
const writer = o.writer();
switch (dest_ptr) {
@@ -1039,11 +1069,18 @@ fn genStore(o: *Object, inst: *Inst.BinOp) !CValue {
return CValue.none;
}
-fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]const u8) !CValue {
- if (inst.base.isUnused())
+fn airWrapOp(
+ o: *Object,
+ inst: Air.Inst.Index,
+ str_op: [*:0]const u8,
+ fn_op: [*:0]const u8,
+) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
- const int_info = inst.base.ty.intInfo(o.dg.module.getTarget());
+ const bin_op = o.air.instructions.items(.data)[inst].bin_op;
+ const inst_ty = o.air.typeOfIndex(inst);
+ const int_info = inst_ty.intInfo(o.dg.module.getTarget());
const bits = int_info.bits;
// if it's an unsigned int with non-arbitrary bit size then we can just add
@@ -1052,19 +1089,19 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c
8, 16, 32, 64, 128 => true,
else => false,
};
- if (ok_bits or inst.base.ty.tag() != .int_unsigned) {
- return try genBinOp(o, inst, str_op);
+ if (ok_bits or inst_ty.tag() != .int_unsigned) {
+ return try airBinOp(o, inst, str_op);
}
}
if (bits > 64) {
- return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{});
+ return o.dg.fail("TODO: C backend: airWrapOp for large integers", .{});
}
var min_buf: [80]u8 = undefined;
const min = switch (int_info.signedness) {
.unsigned => "0",
- else => switch (inst.base.ty.tag()) {
+ else => switch (inst_ty.tag()) {
.c_short => "SHRT_MIN",
.c_int => "INT_MIN",
.c_long => "LONG_MIN",
@@ -1081,7 +1118,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c
};
var max_buf: [80]u8 = undefined;
- const max = switch (inst.base.ty.tag()) {
+ const max = switch (inst_ty.tag()) {
.c_short => "SHRT_MAX",
.c_ushort => "USHRT_MAX",
.c_int => "INT_MAX",
@@ -1105,14 +1142,14 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c
},
};
- const lhs = try o.resolveInst(inst.lhs);
- const rhs = try o.resolveInst(inst.rhs);
+ const lhs = try o.resolveInst(bin_op.lhs);
+ const rhs = try o.resolveInst(bin_op.rhs);
const w = o.writer();
- const ret = try o.allocLocal(inst.base.ty, .Mut);
+ const ret = try o.allocLocal(inst_ty, .Mut);
try w.print(" = zig_{s}", .{fn_op});
- switch (inst.base.ty.tag()) {
+ switch (inst_ty.tag()) {
.isize => try w.writeAll("isize"),
.c_short => try w.writeAll("short"),
.c_int => try w.writeAll("int"),
@@ -1149,53 +1186,65 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c
return ret;
}
-fn genBinOp(o: *Object, inst: *Inst.BinOp, operator: [*:0]const u8) !CValue {
- if (inst.base.isUnused())
+fn airNot(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
- const lhs = try o.resolveInst(inst.lhs);
- const rhs = try o.resolveInst(inst.rhs);
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const op = try o.resolveInst(ty_op.operand);
const writer = o.writer();
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, lhs);
- try writer.print("{s}", .{operator});
- try o.writeCValue(writer, rhs);
+ if (inst_ty.zigTypeTag() == .Bool)
+ try writer.writeAll("!")
+ else
+ try writer.writeAll("~");
+ try o.writeCValue(writer, op);
try writer.writeAll(";\n");
return local;
}
-fn genUnOp(o: *Object, inst: *Inst.UnOp, operator: []const u8) !CValue {
- if (inst.base.isUnused())
+fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
- const operand = try o.resolveInst(inst.operand);
+ const bin_op = o.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try o.resolveInst(bin_op.lhs);
+ const rhs = try o.resolveInst(bin_op.rhs);
const writer = o.writer();
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
- try writer.print(" = {s}", .{operator});
- try o.writeCValue(writer, operand);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, lhs);
+ try writer.print("{s}", .{operator});
+ try o.writeCValue(writer, rhs);
try writer.writeAll(";\n");
return local;
}
-fn genCall(o: *Object, inst: *Inst.Call) !CValue {
- if (inst.func.castTag(.constant)) |func_inst| {
- const fn_decl = if (func_inst.val.castTag(.extern_fn)) |extern_fn|
+fn airCall(o: *Object, inst: Air.Inst.Index) !CValue {
+ const pl_op = o.air.instructions.items(.data)[inst].pl_op;
+ const extra = o.air.extraData(Air.Call, pl_op.payload);
+ const args = @bitCast([]const Air.Inst.Ref, o.air.extra[extra.end..][0..extra.data.args_len]);
+
+ if (o.air.value(pl_op.operand)) |func_val| {
+ const fn_decl = if (func_val.castTag(.extern_fn)) |extern_fn|
extern_fn.data
- else if (func_inst.val.castTag(.function)) |func_payload|
+ else if (func_val.castTag(.function)) |func_payload|
func_payload.data.owner_decl
else
unreachable;
const fn_ty = fn_decl.ty;
const ret_ty = fn_ty.fnReturnType();
- const unused_result = inst.base.isUnused();
+ const unused_result = o.liveness.isUnused(inst);
var result_local: CValue = .none;
const writer = o.writer();
@@ -1209,41 +1258,44 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
}
const fn_name = mem.spanZ(fn_decl.name);
try writer.print("{s}(", .{fn_name});
- if (inst.args.len != 0) {
- for (inst.args) |arg, i| {
- if (i > 0) {
- try writer.writeAll(", ");
- }
- if (arg.value()) |val| {
- try o.dg.renderValue(writer, arg.ty, val);
- } else {
- const val = try o.resolveInst(arg);
- try o.writeCValue(writer, val);
- }
+ for (args) |arg, i| {
+ if (i != 0) {
+ try writer.writeAll(", ");
+ }
+ if (o.air.value(arg)) |val| {
+ try o.dg.renderValue(writer, o.air.typeOf(arg), val);
+ } else {
+ const val = try o.resolveInst(arg);
+ try o.writeCValue(writer, val);
}
}
try writer.writeAll(");\n");
return result_local;
} else {
- return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{});
+ return o.dg.fail("TODO: C backend: implement function pointers", .{});
}
}
-fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
- _ = o;
- _ = inst;
- // TODO emit #line directive here with line number and filename
+fn airDbgStmt(o: *Object, inst: Air.Inst.Index) !CValue {
+ const dbg_stmt = o.air.instructions.items(.data)[inst].dbg_stmt;
+ const writer = o.writer();
+ try writer.print("#line {d}\n", .{dbg_stmt.line + 1});
return CValue.none;
}
-fn genBlock(o: *Object, inst: *Inst.Block) !CValue {
+fn airBlock(o: *Object, inst: Air.Inst.Index) !CValue {
+ const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
+ const extra = o.air.extraData(Air.Block, ty_pl.payload);
+ const body = o.air.extra[extra.end..][0..extra.data.body_len];
+
const block_id: usize = o.next_block_index;
o.next_block_index += 1;
const writer = o.writer();
- const result = if (inst.base.ty.tag() != .void and !inst.base.isUnused()) blk: {
+ const inst_ty = o.air.typeOfIndex(inst);
+ const result = if (inst_ty.tag() != .void and !o.liveness.isUnused(inst)) blk: {
// allocate a location for the result
- const local = try o.allocLocal(inst.base.ty, .Mut);
+ const local = try o.allocLocal(inst_ty, .Mut);
try writer.writeAll(";\n");
break :blk local;
} else CValue{ .none = {} };
@@ -1253,42 +1305,44 @@ fn genBlock(o: *Object, inst: *Inst.Block) !CValue {
.result = result,
});
- try genBody(o, inst.body);
+ try genBody(o, body);
try o.indent_writer.insertNewline();
// label must be followed by an expression, add an empty one.
try writer.print("zig_block_{d}:;\n", .{block_id});
return result;
}
-fn genBr(o: *Object, inst: *Inst.Br) !CValue {
- const result = o.blocks.get(inst.block).?.result;
+fn airBr(o: *Object, inst: Air.Inst.Index) !CValue {
+ const branch = o.air.instructions.items(.data)[inst].br;
+ const block = o.blocks.get(branch.block_inst).?;
+ const result = block.result;
const writer = o.writer();
// If result is .none then the value of the block is unused.
- if (inst.operand.ty.tag() != .void and result != .none) {
- const operand = try o.resolveInst(inst.operand);
+ if (result != .none) {
+ const operand = try o.resolveInst(branch.operand);
try o.writeCValue(writer, result);
try writer.writeAll(" = ");
try o.writeCValue(writer, operand);
try writer.writeAll(";\n");
}
- return genBrVoid(o, inst.block);
-}
-
-fn genBrVoid(o: *Object, block: *Inst.Block) !CValue {
- try o.writer().print("goto zig_block_{d};\n", .{o.blocks.get(block).?.block_id});
+ try o.writer().print("goto zig_block_{d};\n", .{block.block_id});
return CValue.none;
}
-fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
- const operand = try o.resolveInst(inst.operand);
+fn airBitcast(o: *Object, inst: Air.Inst.Index) !CValue {
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const operand = try o.resolveInst(ty_op.operand);
const writer = o.writer();
- if (inst.base.ty.zigTypeTag() == .Pointer and inst.operand.ty.zigTypeTag() == .Pointer) {
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ if (inst_ty.zigTypeTag() == .Pointer and
+ o.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer)
+ {
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
- try o.dg.renderType(writer, inst.base.ty);
+ try o.dg.renderType(writer, inst_ty);
try writer.writeAll(")");
try o.writeCValue(writer, operand);
@@ -1296,7 +1350,7 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
- const local = try o.allocLocal(inst.base.ty, .Mut);
+ const local = try o.allocLocal(inst_ty, .Mut);
try writer.writeAll(";\n");
try writer.writeAll("memcpy(&");
@@ -1310,60 +1364,79 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue {
- _ = inst;
+fn airBreakpoint(o: *Object) !CValue {
try o.writer().writeAll("zig_breakpoint();\n");
return CValue.none;
}
-fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue {
- _ = inst;
+fn airUnreach(o: *Object) !CValue {
try o.writer().writeAll("zig_unreachable();\n");
return CValue.none;
}
-fn genLoop(o: *Object, inst: *Inst.Loop) !CValue {
+fn airLoop(o: *Object, inst: Air.Inst.Index) !CValue {
+ const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
+ const loop = o.air.extraData(Air.Block, ty_pl.payload);
+ const body = o.air.extra[loop.end..][0..loop.data.body_len];
try o.writer().writeAll("while (true) ");
- try genBody(o, inst.body);
+ try genBody(o, body);
try o.indent_writer.insertNewline();
return CValue.none;
}
-fn genCondBr(o: *Object, inst: *Inst.CondBr) !CValue {
- const cond = try o.resolveInst(inst.condition);
+fn airCondBr(o: *Object, inst: Air.Inst.Index) !CValue {
+ const pl_op = o.air.instructions.items(.data)[inst].pl_op;
+ const cond = try o.resolveInst(pl_op.operand);
+ const extra = o.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = o.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = o.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const writer = o.writer();
try writer.writeAll("if (");
try o.writeCValue(writer, cond);
try writer.writeAll(") ");
- try genBody(o, inst.then_body);
+ try genBody(o, then_body);
try writer.writeAll(" else ");
- try genBody(o, inst.else_body);
+ try genBody(o, else_body);
try o.indent_writer.insertNewline();
return CValue.none;
}
-fn genSwitchBr(o: *Object, inst: *Inst.SwitchBr) !CValue {
- const target = try o.resolveInst(inst.target);
+fn airSwitchBr(o: *Object, inst: Air.Inst.Index) !CValue {
+ const pl_op = o.air.instructions.items(.data)[inst].pl_op;
+ const condition = try o.resolveInst(pl_op.operand);
+ const condition_ty = o.air.typeOf(pl_op.operand);
+ const switch_br = o.air.extraData(Air.SwitchBr, pl_op.payload);
const writer = o.writer();
try writer.writeAll("switch (");
- try o.writeCValue(writer, target);
- try writer.writeAll(") {\n");
+ try o.writeCValue(writer, condition);
+ try writer.writeAll(") {");
o.indent_writer.pushIndent();
- for (inst.cases) |case| {
- try writer.writeAll("case ");
- try o.dg.renderValue(writer, inst.target.ty, case.item);
- try writer.writeAll(": ");
- // the case body must be noreturn so we don't need to insert a break
- try genBody(o, case.body);
- try o.indent_writer.insertNewline();
+ var extra_index: usize = switch_br.end;
+ var case_i: u32 = 0;
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = o.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @bitCast([]const Air.Inst.Ref, o.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = o.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + case.data.items_len + case_body.len;
+
+ for (items) |item| {
+ try o.indent_writer.insertNewline();
+ try writer.writeAll("case ");
+ try o.dg.renderValue(writer, condition_ty, o.air.value(item).?);
+ try writer.writeAll(": ");
+ }
+ // The case body must be noreturn so we don't need to insert a break.
+ try genBody(o, case_body);
}
+ const else_body = o.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ try o.indent_writer.insertNewline();
try writer.writeAll("default: ");
- try genBody(o, inst.else_body);
+ try genBody(o, else_body);
try o.indent_writer.insertNewline();
o.indent_writer.popIndent();
@@ -1371,39 +1444,75 @@ fn genSwitchBr(o: *Object, inst: *Inst.SwitchBr) !CValue {
return CValue.none;
}
-fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
- if (as.base.isUnused() and !as.is_volatile)
+fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue {
+ const air_datas = o.air.instructions.items(.data);
+ const air_extra = o.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload);
+ const zir = o.dg.decl.namespace.file_scope.zir;
+ const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended;
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const outputs_len = @truncate(u5, extended.small);
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ _ = clobbers_len; // TODO honor these
+ const is_volatile = @truncate(u1, extended.small >> 15) != 0;
+ const outputs = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end..][0..outputs_len]);
+ const args = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end + outputs.len ..][0..args_len]);
+
+ if (outputs_len > 1) {
+ return o.dg.fail("TODO implement codegen for asm with more than 1 output", .{});
+ }
+
+ if (o.liveness.isUnused(inst) and !is_volatile)
return CValue.none;
+ var extra_i: usize = zir_extra.end;
+ const output_constraint: ?[]const u8 = out: {
+ var i: usize = 0;
+ while (i < outputs_len) : (i += 1) {
+ const output = zir.extraData(Zir.Inst.Asm.Output, extra_i);
+ extra_i = output.end;
+ break :out zir.nullTerminatedString(output.data.constraint);
+ }
+ break :out null;
+ };
+ const args_extra_begin = extra_i;
+
const writer = o.writer();
- for (as.inputs) |i, index| {
- if (i[0] == '{' and i[i.len - 1] == '}') {
- const reg = i[1 .. i.len - 1];
- const arg = as.args[index];
+ for (args) |arg| {
+ const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
+ extra_i = input.end;
+ const constraint = zir.nullTerminatedString(input.data.constraint);
+ if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') {
+ const reg = constraint[1 .. constraint.len - 1];
const arg_c_value = try o.resolveInst(arg);
try writer.writeAll("register ");
- try o.dg.renderType(writer, arg.ty);
+ try o.dg.renderType(writer, o.air.typeOf(arg));
try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg });
try o.writeCValue(writer, arg_c_value);
try writer.writeAll(";\n");
} else {
- return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{});
+ return o.dg.fail("TODO non-explicit inline asm regs", .{});
}
}
- const volatile_string: []const u8 = if (as.is_volatile) "volatile " else "";
- try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source });
- if (as.output_constraint) |_| {
- return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{});
+ const volatile_string: []const u8 = if (is_volatile) "volatile " else "";
+ try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, asm_source });
+ if (output_constraint) |_| {
+ return o.dg.fail("TODO: CBE inline asm output", .{});
}
- if (as.inputs.len > 0) {
- if (as.output_constraint == null) {
+ if (args.len > 0) {
+ if (output_constraint == null) {
try writer.writeAll(" :");
}
try writer.writeAll(": ");
- for (as.inputs) |i, index| {
- if (i[0] == '{' and i[i.len - 1] == '}') {
- const reg = i[1 .. i.len - 1];
+ extra_i = args_extra_begin;
+ for (args) |_, index| {
+ const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
+ extra_i = input.end;
+ const constraint = zir.nullTerminatedString(input.data.constraint);
+ if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') {
+ const reg = constraint[1 .. constraint.len - 1];
if (index > 0) {
try writer.writeAll(", ");
}
@@ -1416,40 +1525,51 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
}
try writer.writeAll(");\n");
- if (as.base.isUnused())
+ if (o.liveness.isUnused(inst))
return CValue.none;
- return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{});
+ return o.dg.fail("TODO: C backend: inline asm expression result used", .{});
}
-fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue {
+fn airIsNull(
+ o: *Object,
+ inst: Air.Inst.Index,
+ operator: [*:0]const u8,
+ deref_suffix: [*:0]const u8,
+) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const un_op = o.air.instructions.items(.data)[inst].un_op;
const writer = o.writer();
- const invert_logic = inst.base.tag == .is_non_null or inst.base.tag == .is_non_null_ptr;
- const operator = if (invert_logic) "!=" else "==";
- const maybe_deref = if (inst.base.tag == .is_null_ptr or inst.base.tag == .is_non_null_ptr) "[0]" else "";
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(un_op);
const local = try o.allocLocal(Type.initTag(.bool), .Const);
try writer.writeAll(" = (");
try o.writeCValue(writer, operand);
- if (inst.operand.ty.isPtrLikeOptional()) {
+ if (o.air.typeOf(un_op).isPtrLikeOptional()) {
// operand is a regular pointer, test `operand !=/== NULL`
- try writer.print("){s} {s} NULL;\n", .{ maybe_deref, operator });
+ try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator });
} else {
- try writer.print("){s}.is_null {s} true;\n", .{ maybe_deref, operator });
+ try writer.print("){s}.is_null {s} true;\n", .{ deref_suffix, operator });
}
return local;
}
-fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue {
+fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
+ const operand_ty = o.air.typeOf(ty_op.operand);
- const opt_ty = if (inst.operand.ty.zigTypeTag() == .Pointer)
- inst.operand.ty.elemType()
+ const opt_ty = if (operand_ty.zigTypeTag() == .Pointer)
+ operand_ty.elemType()
else
- inst.operand.ty;
+ operand_ty;
if (opt_ty.isPtrLikeOptional()) {
// the operand is just a regular pointer, no need to do anything special.
@@ -1457,10 +1577,11 @@ fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue {
return operand;
}
- const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else ".";
- const maybe_addrof = if (inst.base.ty.zigTypeTag() == .Pointer) "&" else "";
+ const inst_ty = o.air.typeOfIndex(inst);
+ const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
+ const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else "";
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.print(" = {s}(", .{maybe_addrof});
try o.writeCValue(writer, operand);
@@ -1468,24 +1589,36 @@ fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genRef(o: *Object, inst: *Inst.UnOp) !CValue {
+fn airRef(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
try o.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
-fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue {
+fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
+ const extra = o.air.extraData(Air.StructField, ty_pl.payload).data;
const writer = o.writer();
- const struct_ptr = try o.resolveInst(inst.struct_ptr);
- const struct_obj = inst.struct_ptr.ty.elemType().castTag(.@"struct").?.data;
- const field_name = struct_obj.fields.keys()[inst.field_index];
+ const struct_ptr = try o.resolveInst(extra.struct_ptr);
+ const struct_ptr_ty = o.air.typeOf(extra.struct_ptr);
+ const struct_obj = struct_ptr_ty.elemType().castTag(.@"struct").?.data;
+ const field_name = struct_obj.fields.keys()[extra.field_index];
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
switch (struct_ptr) {
.local_ref => |i| {
try writer.print(" = &t{d}.{};\n", .{ i, fmtIdent(field_name) });
@@ -1500,17 +1633,20 @@ fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue {
}
// *(E!T) -> E NOT *E
-fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue {
- if (inst.base.isUnused())
+fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const inst_ty = o.air.typeOfIndex(inst);
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
+ const operand_ty = o.air.typeOf(ty_op.operand);
- const payload_ty = inst.operand.ty.errorUnionChild();
+ const payload_ty = operand_ty.errorUnionChild();
if (!payload_ty.hasCodeGenBits()) {
- if (inst.operand.ty.zigTypeTag() == .Pointer) {
- const local = try o.allocLocal(inst.base.ty, .Const);
+ if (operand_ty.zigTypeTag() == .Pointer) {
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = *");
try o.writeCValue(writer, operand);
try writer.writeAll(";\n");
@@ -1520,9 +1656,9 @@ fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue {
}
}
- const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else ".";
+ const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
try o.writeCValue(writer, operand);
@@ -1530,22 +1666,25 @@ fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genUnwrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue {
- if (inst.base.isUnused())
+fn airUnwrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
return CValue.none;
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
+ const operand_ty = o.air.typeOf(ty_op.operand);
- const payload_ty = inst.operand.ty.errorUnionChild();
+ const payload_ty = operand_ty.errorUnionChild();
if (!payload_ty.hasCodeGenBits()) {
return CValue.none;
}
- const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else ".";
- const maybe_addrof = if (inst.base.ty.zigTypeTag() == .Pointer) "&" else "";
+ const inst_ty = o.air.typeOfIndex(inst);
+ const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
+ const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else "";
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.print(" = {s}(", .{maybe_addrof});
try o.writeCValue(writer, operand);
@@ -1553,54 +1692,75 @@ fn genUnwrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue {
return local;
}
-fn genWrapOptional(o: *Object, inst: *Inst.UnOp) !CValue {
+fn airWrapOptional(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
- if (inst.base.ty.isPtrLikeOptional()) {
+ const inst_ty = o.air.typeOfIndex(inst);
+ if (inst_ty.isPtrLikeOptional()) {
// the operand is just a regular pointer, no need to do anything special.
return operand;
}
// .wrap_optional is used to convert non-optionals into optionals so it can never be null.
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .is_null = false, .payload =");
try o.writeCValue(writer, operand);
try writer.writeAll("};\n");
return local;
}
-fn genWrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue {
+fn airWrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
+ const operand = try o.resolveInst(ty_op.operand);
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .error = ");
try o.writeCValue(writer, operand);
try writer.writeAll(" };\n");
return local;
}
-fn genWrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue {
+
+fn airWrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = o.air.instructions.items(.data)[inst].ty_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(ty_op.operand);
- const local = try o.allocLocal(inst.base.ty, .Const);
+ const inst_ty = o.air.typeOfIndex(inst);
+ const local = try o.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .error = 0, .payload = ");
try o.writeCValue(writer, operand);
try writer.writeAll(" };\n");
return local;
}
-fn genIsErr(
+fn airIsErr(
o: *Object,
- inst: *Inst.UnOp,
+ inst: Air.Inst.Index,
deref_prefix: [*:0]const u8,
deref_suffix: [*:0]const u8,
op_str: [*:0]const u8,
) !CValue {
+ if (o.liveness.isUnused(inst))
+ return CValue.none;
+
+ const un_op = o.air.instructions.items(.data)[inst].un_op;
const writer = o.writer();
- const operand = try o.resolveInst(inst.operand);
+ const operand = try o.resolveInst(un_op);
+ const operand_ty = o.air.typeOf(un_op);
const local = try o.allocLocal(Type.initTag(.bool), .Const);
- const payload_ty = inst.operand.ty.errorUnionChild();
+ const payload_ty = operand_ty.errorUnionChild();
if (!payload_ty.hasCodeGenBits()) {
try writer.print(" = {s}", .{deref_prefix});
try o.writeCValue(writer, operand);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index b8f96891f4..81484e93db 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -9,8 +9,8 @@ const math = std.math;
const Module = @import("../Module.zig");
const TypedValue = @import("../TypedValue.zig");
-const ir = @import("../air.zig");
-const Inst = ir.Inst;
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
@@ -276,6 +276,70 @@ pub const Object = struct {
}
}
+ pub fn updateFunc(
+ self: *Object,
+ module: *Module,
+ func: *Module.Fn,
+ air: Air,
+ liveness: Liveness,
+ ) !void {
+ var dg: DeclGen = .{
+ .object = self,
+ .module = module,
+ .decl = func.owner_decl,
+ .err_msg = null,
+ .gpa = module.gpa,
+ };
+
+ const llvm_func = try dg.resolveLLVMFunction(func.owner_decl);
+
+ // This gets the LLVM values from the function and stores them in `dg.args`.
+ const fn_param_len = func.owner_decl.ty.fnParamLen();
+ var args = try dg.gpa.alloc(*const llvm.Value, fn_param_len);
+
+ for (args) |*arg, i| {
+ arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
+ }
+
+ // We remove all the basic blocks of a function to support incremental
+ // compilation!
+ // TODO: remove all basic blocks if functions can have more than one
+ if (llvm_func.getFirstBasicBlock()) |bb| {
+ bb.deleteBasicBlock();
+ }
+
+ const builder = dg.context().createBuilder();
+
+ const entry_block = dg.context().appendBasicBlock(llvm_func, "Entry");
+ builder.positionBuilderAtEnd(entry_block);
+
+ var fg: FuncGen = .{
+ .gpa = dg.gpa,
+ .air = air,
+ .liveness = liveness,
+ .dg = &dg,
+ .builder = builder,
+ .args = args,
+ .arg_index = 0,
+ .func_inst_table = .{},
+ .entry_block = entry_block,
+ .latest_alloca_inst = null,
+ .llvm_func = llvm_func,
+ .blocks = .{},
+ };
+ defer fg.deinit();
+
+ fg.genBody(air.getMainBody()) catch |err| switch (err) {
+ error.CodegenFail => {
+ func.owner_decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, func.owner_decl, dg.err_msg.?);
+ dg.err_msg = null;
+ return;
+ },
+ else => |e| return e,
+ };
+ }
+
pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
var dg: DeclGen = .{
.object = self,
@@ -327,44 +391,8 @@ pub const DeclGen = struct {
log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val });
if (decl.val.castTag(.function)) |func_payload| {
- const func = func_payload.data;
-
- const llvm_func = try self.resolveLLVMFunction(func.owner_decl);
-
- // This gets the LLVM values from the function and stores them in `self.args`.
- const fn_param_len = func.owner_decl.ty.fnParamLen();
- var args = try self.gpa.alloc(*const llvm.Value, fn_param_len);
-
- for (args) |*arg, i| {
- arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
- }
-
- // We remove all the basic blocks of a function to support incremental
- // compilation!
- // TODO: remove all basic blocks if functions can have more than one
- if (llvm_func.getFirstBasicBlock()) |bb| {
- bb.deleteBasicBlock();
- }
-
- const builder = self.context().createBuilder();
-
- const entry_block = self.context().appendBasicBlock(llvm_func, "Entry");
- builder.positionBuilderAtEnd(entry_block);
-
- var fg: FuncGen = .{
- .dg = self,
- .builder = builder,
- .args = args,
- .arg_index = 0,
- .func_inst_table = .{},
- .entry_block = entry_block,
- .latest_alloca_inst = null,
- .llvm_func = llvm_func,
- .blocks = .{},
- };
- defer fg.deinit();
-
- try fg.genBody(func.body);
+ _ = func_payload;
+ @panic("TODO llvm backend genDecl function pointer");
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try self.resolveLLVMFunction(extern_fn.data);
} else {
@@ -590,29 +618,31 @@ pub const DeclGen = struct {
};
pub const FuncGen = struct {
+ gpa: *Allocator,
dg: *DeclGen,
+ air: Air,
+ liveness: Liveness,
builder: *const llvm.Builder,
- /// This stores the LLVM values used in a function, such that they can be
- /// referred to in other instructions. This table is cleared before every function is generated.
- /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
- /// in here, however if a block ends, the instructions can be thrown away.
- func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value),
+ /// This stores the LLVM values used in a function, such that they can be referred to
+ /// in other instructions. This table is cleared before every function is generated.
+ func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value),
- /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
+ /// These fields are used to refer to the LLVM value of the function paramaters
+ /// in an Arg instruction.
args: []*const llvm.Value,
arg_index: usize,
entry_block: *const llvm.BasicBlock,
- /// This fields stores the last alloca instruction, such that we can append more alloca instructions
- /// to the top of the function.
+ /// This fields stores the last alloca instruction, such that we can append
+ /// more alloca instructions to the top of the function.
latest_alloca_inst: ?*const llvm.Value,
llvm_func: *const llvm.Value,
/// This data structure is used to implement breaking to blocks.
- blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
+ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
parent_bb: *const llvm.BasicBlock,
break_bbs: *BreakBasicBlocks,
break_vals: *BreakValues,
@@ -623,9 +653,9 @@ pub const FuncGen = struct {
fn deinit(self: *FuncGen) void {
self.builder.dispose();
- self.func_inst_table.deinit(self.gpa());
- self.gpa().free(self.args);
- self.blocks.deinit(self.gpa());
+ self.func_inst_table.deinit(self.gpa);
+ self.gpa.free(self.args);
+ self.blocks.deinit(self.gpa);
}
fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@@ -641,65 +671,68 @@ pub const FuncGen = struct {
return self.dg.object.context;
}
- fn gpa(self: *FuncGen) *Allocator {
- return self.dg.gpa;
- }
-
- fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value {
- if (inst.value()) |val| {
- return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self);
+ fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value {
+ if (self.air.value(inst)) |val| {
+ return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self);
}
- if (self.func_inst_table.get(inst)) |value| return value;
+ const inst_index = Air.refToIndex(inst).?;
+ if (self.func_inst_table.get(inst_index)) |value| return value;
return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{});
}
- fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
- for (body.instructions) |inst| {
- const opt_value = switch (inst.tag) {
- .add => try self.genAdd(inst.castTag(.add).?),
- .alloc => try self.genAlloc(inst.castTag(.alloc).?),
- .arg => try self.genArg(inst.castTag(.arg).?),
- .bitcast => try self.genBitCast(inst.castTag(.bitcast).?),
- .block => try self.genBlock(inst.castTag(.block).?),
- .br => try self.genBr(inst.castTag(.br).?),
- .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?),
- .br_void => try self.genBrVoid(inst.castTag(.br_void).?),
- .call => try self.genCall(inst.castTag(.call).?),
- .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq),
- .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt),
- .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte),
- .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt),
- .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte),
- .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq),
- .condbr => try self.genCondBr(inst.castTag(.condbr).?),
- .intcast => try self.genIntCast(inst.castTag(.intcast).?),
- .is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false),
- .is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true),
- .is_null => try self.genIsNull(inst.castTag(.is_null).?, false),
- .is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true),
- .load => try self.genLoad(inst.castTag(.load).?),
- .loop => try self.genLoop(inst.castTag(.loop).?),
- .not => try self.genNot(inst.castTag(.not).?),
- .ret => try self.genRet(inst.castTag(.ret).?),
- .retvoid => self.genRetVoid(inst.castTag(.retvoid).?),
- .store => try self.genStore(inst.castTag(.store).?),
- .sub => try self.genSub(inst.castTag(.sub).?),
- .unreach => self.genUnreach(inst.castTag(.unreach).?),
- .optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false),
- .optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true),
+ fn genBody(self: *FuncGen, body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void {
+ const air_tags = self.air.instructions.items(.tag);
+ for (body) |inst| {
+ const opt_value = switch (air_tags[inst]) {
+ .add => try self.airAdd(inst),
+ .sub => try self.airSub(inst),
+
+ .cmp_eq => try self.airCmp(inst, .eq),
+ .cmp_gt => try self.airCmp(inst, .gt),
+ .cmp_gte => try self.airCmp(inst, .gte),
+ .cmp_lt => try self.airCmp(inst, .lt),
+ .cmp_lte => try self.airCmp(inst, .lte),
+ .cmp_neq => try self.airCmp(inst, .neq),
+
+ .is_non_null => try self.airIsNonNull(inst, false),
+ .is_non_null_ptr => try self.airIsNonNull(inst, true),
+ .is_null => try self.airIsNull(inst, false),
+ .is_null_ptr => try self.airIsNull(inst, true),
+
+ .alloc => try self.airAlloc(inst),
+ .arg => try self.airArg(inst),
+ .bitcast => try self.airBitCast(inst),
+ .block => try self.airBlock(inst),
+ .br => try self.airBr(inst),
+ .breakpoint => try self.airBreakpoint(inst),
+ .call => try self.airCall(inst),
+ .cond_br => try self.airCondBr(inst),
+ .intcast => try self.airIntCast(inst),
+ .load => try self.airLoad(inst),
+ .loop => try self.airLoop(inst),
+ .not => try self.airNot(inst),
+ .ret => try self.airRet(inst),
+ .store => try self.airStore(inst),
+ .unreach => self.airUnreach(inst),
+ .optional_payload => try self.airOptionalPayload(inst, false),
+ .optional_payload_ptr => try self.airOptionalPayload(inst, true),
.dbg_stmt => blk: {
// TODO: implement debug info
break :blk null;
},
- else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}),
+ else => |tag| return self.todo("implement AIR instruction: {}", .{tag}),
};
- if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val);
+ if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
}
}
- fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value {
- if (inst.func.value()) |func_value| {
+ fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Call, pl_op.payload);
+ const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+
+ if (self.air.value(pl_op.operand)) |func_value| {
const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
extern_fn.data
else if (func_value.castTag(.function)) |func_payload|
@@ -711,12 +744,10 @@ pub const FuncGen = struct {
const zig_fn_type = fn_decl.ty;
const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl);
- const num_args = inst.args.len;
+ const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len);
+ defer self.gpa.free(llvm_param_vals);
- const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args);
- defer self.gpa().free(llvm_param_vals);
-
- for (inst.args) |arg, i| {
+ for (args) |arg, i| {
llvm_param_vals[i] = try self.resolveInst(arg);
}
@@ -724,8 +755,8 @@ pub const FuncGen = struct {
// Do we need that?
const call = self.builder.buildCall(
llvm_fn,
- if (num_args == 0) null else llvm_param_vals.ptr,
- @intCast(c_uint, num_args),
+ if (args.len == 0) null else llvm_param_vals.ptr,
+ @intCast(c_uint, args.len),
"",
);
@@ -743,31 +774,31 @@ pub const FuncGen = struct {
}
}
- fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
- _ = inst;
- _ = self.builder.buildRetVoid();
- return null;
- }
-
- fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
- if (!inst.operand.ty.hasCodeGenBits()) {
- // TODO: in astgen these instructions should turn into `retvoid` instructions.
+ fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ if (!self.air.typeOf(un_op).hasCodeGenBits()) {
_ = self.builder.buildRetVoid();
return null;
}
- _ = self.builder.buildRet(try self.resolveInst(inst.operand));
+ const operand = try self.resolveInst(un_op);
+ _ = self.builder.buildRet(operand);
return null;
}
- fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
- const lhs = try self.resolveInst(inst.lhs);
- const rhs = try self.resolveInst(inst.rhs);
+ fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
- if (!inst.base.ty.isInt())
- if (inst.base.ty.tag() != .bool)
- return self.todo("implement 'genCmp' for type {}", .{inst.base.ty});
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
- const is_signed = inst.base.ty.isSignedInt();
+ if (!inst_ty.isInt())
+ if (inst_ty.tag() != .bool)
+ return self.todo("implement 'airCmp' for type {}", .{inst_ty});
+
+ const is_signed = inst_ty.isSignedInt();
const operation = switch (op) {
.eq => .EQ,
.neq => .NE,
@@ -780,32 +811,36 @@ pub const FuncGen = struct {
return self.builder.buildICmp(operation, lhs, rhs, "");
}
- fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value {
+ fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
const parent_bb = self.context().createBasicBlock("Block");
// 5 breaks to a block seems like a reasonable default.
- var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5);
- var break_vals = try BreakValues.initCapacity(self.gpa(), 5);
- try self.blocks.putNoClobber(self.gpa(), inst, .{
+ var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5);
+ var break_vals = try BreakValues.initCapacity(self.gpa, 5);
+ try self.blocks.putNoClobber(self.gpa, inst, .{
.parent_bb = parent_bb,
.break_bbs = &break_bbs,
.break_vals = &break_vals,
});
defer {
assert(self.blocks.remove(inst));
- break_bbs.deinit(self.gpa());
- break_vals.deinit(self.gpa());
+ break_bbs.deinit(self.gpa);
+ break_vals.deinit(self.gpa);
}
- try self.genBody(inst.body);
+ try self.genBody(body);
self.llvm_func.appendExistingBasicBlock(parent_bb);
self.builder.positionBuilderAtEnd(parent_bb);
// If the block does not return a value, we dont have to create a phi node.
- if (!inst.base.ty.hasCodeGenBits()) return null;
+ const inst_ty = self.air.typeOfIndex(inst);
+ if (!inst_ty.hasCodeGenBits()) return null;
- const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), "");
+ const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst_ty), "");
phi_node.addIncoming(
break_vals.items.ptr,
break_bbs.items.ptr,
@@ -814,35 +849,30 @@ pub const FuncGen = struct {
return phi_node;
}
- fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value {
- var block = self.blocks.get(inst.block).?;
+ fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const branch = self.air.instructions.items(.data)[inst].br;
+ const block = self.blocks.get(branch.block_inst).?;
// If the break doesn't break a value, then we don't have to add
// the values to the lists.
- if (!inst.operand.ty.hasCodeGenBits()) {
- // TODO: in astgen these instructions should turn into `br_void` instructions.
- _ = self.builder.buildBr(block.parent_bb);
- } else {
- const val = try self.resolveInst(inst.operand);
+ if (self.air.typeOf(branch.operand).hasCodeGenBits()) {
+ const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
// break instructions.
- try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock());
- try block.break_vals.append(self.gpa(), val);
-
- _ = self.builder.buildBr(block.parent_bb);
+ try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
+ try block.break_vals.append(self.gpa, val);
}
- return null;
- }
-
- fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value {
- var block = self.blocks.get(inst.block).?;
_ = self.builder.buildBr(block.parent_bb);
return null;
}
- fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value {
- const condition_value = try self.resolveInst(inst.condition);
+ fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond = try self.resolveInst(pl_op.operand);
+ const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const then_block = self.context().appendBasicBlock(self.llvm_func, "Then");
const else_block = self.context().appendBasicBlock(self.llvm_func, "Else");
@@ -851,38 +881,51 @@ pub const FuncGen = struct {
defer self.builder.positionBuilderAtEnd(prev_block);
self.builder.positionBuilderAtEnd(then_block);
- try self.genBody(inst.then_body);
+ try self.genBody(then_body);
self.builder.positionBuilderAtEnd(else_block);
- try self.genBody(inst.else_body);
+ try self.genBody(else_body);
}
- _ = self.builder.buildCondBr(condition_value, then_block, else_block);
+ _ = self.builder.buildCondBr(cond, then_block, else_block);
return null;
}
- fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value {
+ fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const loop = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[loop.end..][0..loop.data.body_len];
const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop");
_ = self.builder.buildBr(loop_block);
self.builder.positionBuilderAtEnd(loop_block);
- try self.genBody(inst.body);
+ try self.genBody(body);
_ = self.builder.buildBr(loop_block);
return null;
}
- fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
- return self.builder.buildNot(try self.resolveInst(inst.operand), "");
+ fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+
+ return self.builder.buildNot(operand, "");
}
- fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
+ fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value {
_ = inst;
_ = self.builder.buildUnreachable();
return null;
}
- fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
- const operand = try self.resolveInst(inst.operand);
+ fn airIsNonNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
if (operand_is_ptr) {
const index_type = self.context().intType(32);
@@ -898,12 +941,23 @@ pub const FuncGen = struct {
}
}
- fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
- return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
+ fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ return self.builder.buildNot((try self.airIsNonNull(inst, operand_is_ptr)).?, "");
}
- fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
- const operand = try self.resolveInst(inst.operand);
+ fn airOptionalPayload(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ operand_is_ptr: bool,
+ ) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
if (operand_is_ptr) {
const index_type = self.context().intType(32);
@@ -919,61 +973,83 @@ pub const FuncGen = struct {
}
}
- fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
- const lhs = try self.resolveInst(inst.lhs);
- const rhs = try self.resolveInst(inst.rhs);
+ fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
- if (!inst.base.ty.isInt())
- return self.todo("implement 'genAdd' for type {}", .{inst.base.ty});
+ if (!inst_ty.isInt())
+ return self.todo("implement 'airAdd' for type {}", .{inst_ty});
- return if (inst.base.ty.isSignedInt())
+ return if (inst_ty.isSignedInt())
self.builder.buildNSWAdd(lhs, rhs, "")
else
self.builder.buildNUWAdd(lhs, rhs, "");
}
- fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
- const lhs = try self.resolveInst(inst.lhs);
- const rhs = try self.resolveInst(inst.rhs);
+ fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
- if (!inst.base.ty.isInt())
- return self.todo("implement 'genSub' for type {}", .{inst.base.ty});
+ if (!inst_ty.isInt())
+ return self.todo("implement 'airSub' for type {}", .{inst_ty});
- return if (inst.base.ty.isSignedInt())
+ return if (inst_ty.isSignedInt())
self.builder.buildNSWSub(lhs, rhs, "")
else
self.builder.buildNUWSub(lhs, rhs, "");
}
- fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
- const val = try self.resolveInst(inst.operand);
+ fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const inst_ty = self.air.typeOfIndex(inst);
- const signed = inst.base.ty.isSignedInt();
+ const signed = inst_ty.isSignedInt();
// TODO: Should we use intcast here or just a simple bitcast?
// LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
- return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), "");
+ return self.builder.buildIntCast2(operand, try self.dg.getLLVMType(inst_ty), llvm.Bool.fromBool(signed), "");
}
- fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
- const val = try self.resolveInst(inst.operand);
- const dest_type = try self.dg.getLLVMType(inst.base.ty);
+ fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const inst_ty = self.air.typeOfIndex(inst);
+ const dest_type = try self.dg.getLLVMType(inst_ty);
- return self.builder.buildBitCast(val, dest_type, "");
+ return self.builder.buildBitCast(operand, dest_type, "");
}
- fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value {
+ fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
- const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty));
+ const inst_ty = self.air.typeOfIndex(inst);
+ const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst_ty));
_ = self.builder.buildStore(arg_val, ptr_val);
return self.builder.buildLoad(ptr_val, "");
}
- fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
+ fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
// buildAlloca expects the pointee type, not the pointer type, so assert that
// a Payload.PointerSimple is passed to the alloc instruction.
- const pointee_type = inst.base.ty.castPointer().?.data;
+ const inst_ty = self.air.typeOfIndex(inst);
+ const pointee_type = inst_ty.castPointer().?.data;
// TODO: figure out a way to get the name of the var decl.
// TODO: set alignment and volatile
@@ -1004,19 +1080,26 @@ pub const FuncGen = struct {
return val;
}
- fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
- const val = try self.resolveInst(inst.rhs);
- const ptr = try self.resolveInst(inst.lhs);
- _ = self.builder.buildStore(val, ptr);
+ fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const dest_ptr = try self.resolveInst(bin_op.lhs);
+ const src_operand = try self.resolveInst(bin_op.rhs);
+ // TODO set volatile on this store properly
+ _ = self.builder.buildStore(src_operand, dest_ptr);
return null;
}
- fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
- const ptr_val = try self.resolveInst(inst.operand);
- return self.builder.buildLoad(ptr_val, "");
+ fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
+ if (!is_volatile and self.liveness.isUnused(inst))
+ return null;
+ const ptr = try self.resolveInst(ty_op.operand);
+ // TODO set volatile on this load properly
+ return self.builder.buildLoad(ptr, "");
}
- fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
+ fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
_ = inst;
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
_ = self.builder.buildCall(llvn_fn, null, 0, "");
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 7fa813e565..7429e3c3b0 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -12,21 +12,21 @@ const Decl = Module.Decl;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
const LazySrcLoc = Module.LazySrcLoc;
-const ir = @import("../air.zig");
-const Inst = ir.Inst;
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
pub const Word = u32;
pub const ResultId = u32;
pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage);
-pub const InstMap = std.AutoHashMap(*Inst, ResultId);
+pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId);
const IncomingBlock = struct {
src_label_id: ResultId,
break_value_id: ResultId,
};
-pub const BlockMap = std.AutoHashMap(*Inst.Block, struct {
+pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct {
label_id: ResultId,
incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock),
});
@@ -160,7 +160,11 @@ pub const DeclGen = struct {
/// The SPIR-V module code should be put in.
spv: *SPIRVModule,
- /// An array of function argument result-ids. Each index corresponds with the function argument of the same index.
+ air: Air,
+ liveness: Liveness,
+
+ /// An array of function argument result-ids. Each index corresponds with the
+ /// function argument of the same index.
args: std.ArrayList(ResultId),
/// A counter to keep track of how many `arg` instructions we've seen yet.
@@ -169,33 +173,35 @@ pub const DeclGen = struct {
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap,
- /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block.
+ /// We need to keep track of result ids for block labels, as well as the 'incoming'
+ /// blocks for a block.
blocks: BlockMap,
/// The label of the SPIR-V block we are currently generating.
current_block_label_id: ResultId,
- /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't
- /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array.
- /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated
- /// into spv.binary.fn_decls directly.
+ /// The actual instructions for this function. We need to declare all locals in
+ /// the first block, and because we don't know which locals there are going to be,
+ /// we're just going to generate everything after the locals-section in this array.
+ /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the
+ /// initial OpLabel. These will be generated into spv.binary.fn_decls directly.
code: std.ArrayList(Word),
/// The decl we are currently generating code for.
decl: *Decl,
- /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by
- /// `module.gpa`.
+ /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message.
+ /// Memory is owned by `module.gpa`.
error_msg: ?*Module.ErrorMsg,
/// Possible errors the `gen` function may return.
const Error = error{ AnalysisFail, OutOfMemory };
- /// This structure is used to return information about a type typically used for arithmetic operations.
- /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors,
- /// so we can easily represent those as arithmetic types.
- /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers
- /// to the vector's element type.
+ /// This structure is used to return information about a type typically used for
+ /// arithmetic operations. These types may either be integers, floats, or a vector
+ /// of these. Most scalar operations also work on vectors, so we can easily represent
+ /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the
+ /// scalar type. Otherwise, if its a vector, it refers to the vector's element type.
const ArithmeticTypeInfo = struct {
/// A classification of the inner type.
const Class = enum {
@@ -207,13 +213,14 @@ pub const DeclGen = struct {
/// the relevant capability is enabled).
integer,
- /// A regular float. These are all required to be natively supported. Floating points for
- /// which the relevant capability is not enabled are not emulated.
+ /// A regular float. These are all required to be natively supported. Floating points
+ /// for which the relevant capability is not enabled are not emulated.
float,
- /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this
- /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still
- /// within the limits of the largest natively supported integer type.
+ /// An integer of a 'strange' size (which' bit size is not the same as its backing
+ /// type. **Note**: this may **also** include power-of-2 integers for which the
+ /// relevant capability is not enabled), but still within the limits of the largest
+ /// natively supported integer type.
strange_integer,
/// An integer with more bits than the largest natively supported integer type.
@@ -221,7 +228,7 @@ pub const DeclGen = struct {
};
/// The number of bits in the inner type.
- /// Note: this is the actual number of bits of the type, not the size of the backing integer.
+ /// This is the actual number of bits of the type, not the size of the backing integer.
bits: u16,
/// Whether the type is a vector.
@@ -235,10 +242,13 @@ pub const DeclGen = struct {
class: Class,
};
- /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called.
+ /// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
+ /// only set when `gen` is called.
pub fn init(spv: *SPIRVModule) DeclGen {
return .{
.spv = spv,
+ .air = undefined,
+ .liveness = undefined,
.args = std.ArrayList(ResultId).init(spv.gpa),
.next_arg_index = undefined,
.inst_results = InstMap.init(spv.gpa),
@@ -251,10 +261,12 @@ pub const DeclGen = struct {
}
/// Generate the code for `decl`. If a reportable error occured during code generation,
- /// a message is returned by this function. Callee owns the memory. If this function returns such
- /// a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg {
+ /// a message is returned by this function. Callee owns the memory. If this function
+ /// returns such a reportable error, it is valid to be called again for a different decl.
+ pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
+ self.air = air;
+ self.liveness = liveness;
self.args.items.len = 0;
self.next_arg_index = 0;
self.inst_results.clearRetainingCapacity();
@@ -280,19 +292,20 @@ pub const DeclGen = struct {
return self.spv.module.getTarget();
}
- fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error {
+ fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
+ const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLocWithDecl(self.decl);
self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args);
return error.AnalysisFail;
}
- fn resolve(self: *DeclGen, inst: *Inst) !ResultId {
- if (inst.value()) |val| {
- return self.genConstant(inst.src, inst.ty, val);
+ fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId {
+ if (self.air.value(inst)) |val| {
+ return self.genConstant(self.air.typeOf(inst), val);
}
-
- return self.inst_results.get(inst).?; // Instruction does not dominate all uses!
+ const index = Air.refToIndex(inst).?;
+ return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void {
@@ -314,7 +327,7 @@ pub const DeclGen = struct {
const target = self.getTarget();
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
- std.debug.assert(bits != 0);
+ assert(bits != 0);
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
@@ -388,19 +401,19 @@ pub const DeclGen = struct {
.composite_integer };
},
// As of yet, there is no vector support in the self-hosted compiler.
- .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
+ .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
- else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
+ else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
};
}
/// Generate a constant representing `val`.
/// TODO: Deduplication?
- fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId {
+ fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId {
const target = self.getTarget();
const code = &self.spv.binary.types_globals_constants;
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(src, ty);
+ const result_type_id = try self.genType(ty);
if (val.isUndef()) {
try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id });
@@ -412,13 +425,13 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
+ return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
};
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
// SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
// might need to be updated.
- std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
+ assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
// Mask the low bits which make up the actual integer. This is to make sure that negative values
@@ -470,13 +483,13 @@ pub const DeclGen = struct {
}
},
.Void => unreachable,
- else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}),
+ else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
}
return result_id;
}
- fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId {
+ fn genType(self: *DeclGen, ty: Type) Error!ResultId {
// We can't use getOrPut here so we can recursively generate types.
if (self.spv.types.get(ty)) |already_generated| {
return already_generated;
@@ -493,7 +506,7 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty});
+ return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
};
// TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
@@ -519,7 +532,7 @@ pub const DeclGen = struct {
};
if (!supported) {
- return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
+ return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits });
@@ -527,19 +540,19 @@ pub const DeclGen = struct {
.Fn => {
// We only support zig-calling-convention functions, no varargs.
if (ty.fnCallingConvention() != .Unspecified)
- return self.fail(src, "Unsupported calling convention for SPIR-V", .{});
+ return self.fail("Unsupported calling convention for SPIR-V", .{});
if (ty.fnIsVarArgs())
- return self.fail(src, "VarArgs unsupported for SPIR-V", .{});
+ return self.fail("VarArgs unsupported for SPIR-V", .{});
// In order to avoid a temporary here, first generate all the required types and then simply look them up
// when generating the function type.
const params = ty.fnParamLen();
var i: usize = 0;
while (i < params) : (i += 1) {
- _ = try self.genType(src, ty.fnParamType(i));
+ _ = try self.genType(ty.fnParamType(i));
}
- const return_type_id = try self.genType(src, ty.fnReturnType());
+ const return_type_id = try self.genType(ty.fnReturnType());
// result id + result type id + parameter type ids.
try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
@@ -552,7 +565,7 @@ pub const DeclGen = struct {
}
},
// When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
- .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}),
+ .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}),
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
// which work on them), so simply use those.
@@ -562,7 +575,7 @@ pub const DeclGen = struct {
// is adequate at all for this.
// TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
- return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{});
+ return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
},
.Null,
.Undefined,
@@ -574,7 +587,7 @@ pub const DeclGen = struct {
.BoundFn => unreachable, // this type will be deleted from the language.
- else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}),
+ else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
}
try self.spv.types.putNoClobber(ty, result_id);
@@ -583,8 +596,8 @@ pub const DeclGen = struct {
/// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
/// TODO: The result of this needs to be cached.
- fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId {
- std.debug.assert(ty.zigTypeTag() == .Pointer);
+ fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId {
+ assert(ty.zigTypeTag() == .Pointer);
const code = &self.spv.binary.types_globals_constants;
const result_id = self.spv.allocResultId();
@@ -592,7 +605,7 @@ pub const DeclGen = struct {
// TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
// if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
// These also relates to the pointer's address space.
- const child_id = try self.genType(src, ty.elemType());
+ const child_id = try self.genType(ty.elemType());
try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id });
@@ -603,9 +616,9 @@ pub const DeclGen = struct {
const decl = self.decl;
const result_id = decl.fn_link.spirv.id;
- if (decl.val.castTag(.function)) |func_payload| {
- std.debug.assert(decl.ty.zigTypeTag() == .Fn);
- const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty);
+ if (decl.val.castTag(.function)) |_| {
+ assert(decl.ty.zigTypeTag() == .Fn);
+ const prototype_id = try self.genType(decl.ty);
try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{
self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
result_id,
@@ -632,189 +645,171 @@ pub const DeclGen = struct {
try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id});
self.current_block_label_id = root_block_id;
- try self.genBody(func_payload.data.body);
+ const main_body = self.air.getMainBody();
+ try self.genBody(main_body);
// Append the actual code into the fn_decls section.
try self.spv.binary.fn_decls.appendSlice(self.code.items);
try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{});
} else {
- return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
+ return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
}
}
- fn genBody(self: *DeclGen, body: ir.Body) Error!void {
- for (body.instructions) |inst| {
+ fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void {
+ for (body) |inst| {
try self.genInst(inst);
}
}
- fn genInst(self: *DeclGen, inst: *Inst) !void {
- const result_id = switch (inst.tag) {
- .add, .addwrap => try self.genBinOp(inst.castTag(.add).?),
- .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?),
- .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?),
- .div => try self.genBinOp(inst.castTag(.div).?),
- .bit_and => try self.genBinOp(inst.castTag(.bit_and).?),
- .bit_or => try self.genBinOp(inst.castTag(.bit_or).?),
- .xor => try self.genBinOp(inst.castTag(.xor).?),
- .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?),
- .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?),
- .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?),
- .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?),
- .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?),
- .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?),
- .bool_and => try self.genBinOp(inst.castTag(.bool_and).?),
- .bool_or => try self.genBinOp(inst.castTag(.bool_or).?),
- .not => try self.genUnOp(inst.castTag(.not).?),
- .alloc => try self.genAlloc(inst.castTag(.alloc).?),
- .arg => self.genArg(),
- .block => (try self.genBlock(inst.castTag(.block).?)) orelse return,
- .br => return try self.genBr(inst.castTag(.br).?),
- .br_void => return try self.genBrVoid(inst.castTag(.br_void).?),
- // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them
- // throughout the IR.
+ fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const air_tags = self.air.instructions.items(.tag);
+ const result_id = switch (air_tags[inst]) {
+ // zig fmt: off
+ .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
+ .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
+ .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
+ .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
+
+ .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
+ .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
+ .xor => try self.airBinOpSimple(inst, .OpBitwiseXor),
+ .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd),
+ .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr),
+
+ .not => try self.airNot(inst),
+
+ .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
+ .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
+ .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
+ .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
+ .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
+ .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
+
+ .arg => self.airArg(),
+ .alloc => try self.airAlloc(inst),
+ .block => (try self.airBlock(inst)) orelse return,
+ .load => try self.airLoad(inst),
+
+ .br => return self.airBr(inst),
.breakpoint => return,
- .condbr => return try self.genCondBr(inst.castTag(.condbr).?),
- .constant => unreachable,
- .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?),
- .load => try self.genLoad(inst.castTag(.load).?),
- .loop => return try self.genLoop(inst.castTag(.loop).?),
- .ret => return try self.genRet(inst.castTag(.ret).?),
- .retvoid => return try self.genRetVoid(),
- .store => return try self.genStore(inst.castTag(.store).?),
- .unreach => return try self.genUnreach(),
- else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}),
+ .cond_br => return self.airCondBr(inst),
+ .constant => unreachable,
+ .dbg_stmt => return self.airDbgStmt(inst),
+ .loop => return self.airLoop(inst),
+ .ret => return self.airRet(inst),
+ .store => return self.airStore(inst),
+ .unreach => return self.airUnreach(),
+ // zig fmt: on
+
+ else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{
+ @tagName(tag),
+ }),
};
try self.inst_results.putNoClobber(inst, result_id);
}
- fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
- // TODO: Will lhs and rhs have the same type?
- const lhs_id = try self.resolve(inst.lhs);
- const rhs_id = try self.resolve(inst.rhs);
+ fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
+ const result_id = self.spv.allocResultId();
+ const result_type_id = try self.genType(self.air.typeOfIndex(inst));
+ try writeInstruction(&self.code, opcode, &[_]Word{
+ result_type_id, result_id, lhs_id, rhs_id,
+ });
+ return result_id;
+ }
+
+ fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ // LHS and RHS are guaranteed to have the same type, and AIR guarantees
+ // the result to be the same as the LHS and RHS, which matches SPIR-V.
+ const ty = self.air.typeOfIndex(inst);
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- // TODO: Is the result the same as the argument types?
- // This is supposed to be the case for SPIR-V.
- std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
- std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty));
-
- // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float
- // versions of operations require different opcodes.
- // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand
- // instead.
- const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
- if (info.class == .composite_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
- } else if (info.class == .strange_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{});
- }
+ const result_type_id = try self.genType(ty);
+
+ assert(self.air.typeOf(bin_op.lhs).eql(ty));
+ assert(self.air.typeOf(bin_op.rhs).eql(ty));
+
+ // Binary operations are generally applicable to both scalar and vector operations
+ // in SPIR-V, but int and float versions of operations require different opcodes.
+ const info = try self.arithmeticTypeInfo(ty);
- const is_float = info.class == .float;
- const is_signed = info.signedness == .signed;
- // **Note**: All these operations must be valid for vectors as well!
- const opcode = switch (inst.base.tag) {
- // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers,
- // we can just switch on both cases here.
- .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd,
- .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub,
- .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul,
- // TODO: Trap if divisor is 0?
- // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful.
- // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those.
- // => TODO: Figure out how those work on the SPIR-V side.
- // => TODO: Test these.
- .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv,
- // Only integer versions for these.
- .bit_and => Opcode.OpBitwiseAnd,
- .bit_or => Opcode.OpBitwiseOr,
- .xor => Opcode.OpBitwiseXor,
- // Bool -> bool operations.
- .bool_and => Opcode.OpLogicalAnd,
- .bool_or => Opcode.OpLogicalOr,
+ const opcode_index: usize = switch (info.class) {
+ .composite_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ },
+ .strange_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
+ },
+ .integer => switch (info.signedness) {
+ .signed => @as(usize, 1),
+ .unsigned => @as(usize, 2),
+ },
+ .float => 0,
else => unreachable,
};
-
+ const opcode = ops[opcode_index];
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
// TODO: Trap on overflow? Probably going to be annoying.
// TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
- if (info.class != .strange_integer)
- return result_id;
-
- return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{});
+ return result_id;
}
- fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
- const lhs_id = try self.resolve(inst.lhs);
- const rhs_id = try self.resolve(inst.rhs);
-
+ fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- // All of these operations should be 2 equal types -> bool
- std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
- std.debug.assert(inst.base.ty.tag() == .bool);
-
- // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float
- // versions of operations require different opcodes.
- // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info
- // from either of the operands.
- const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
- if (info.class == .composite_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
- } else if (info.class == .strange_integer) {
- return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{});
- }
+ const result_type_id = try self.genType(Type.initTag(.bool));
+ const op_ty = self.air.typeOf(bin_op.lhs);
+ assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
- const is_bool = info.class == .bool;
- const is_float = info.class == .float;
- const is_signed = info.signedness == .signed;
-
- // **Note**: All these operations must be valid for vectors as well!
- // For floating points, we generally want ordered operations (which return false if either operand is nan).
- const opcode = switch (inst.base.tag) {
- .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual,
- .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual,
- // TODO: Verify that these OpFOrd type operations produce the right value.
- // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type?
- .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan,
- .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual,
- .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan,
- .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual,
- else => unreachable,
+ // Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
+ // but int and float versions of operations require different opcodes.
+ const info = try self.arithmeticTypeInfo(op_ty);
+
+ const opcode_index: usize = switch (info.class) {
+ .composite_integer => {
+ return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ },
+ .strange_integer => {
+ return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
+ },
+ .float => 0,
+ .bool => 1,
+ .integer => switch (info.signedness) {
+ .signed => @as(usize, 1),
+ .unsigned => @as(usize, 2),
+ },
};
+ const opcode = ops[opcode_index];
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
return result_id;
}
- fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
- const operand_id = try self.resolve(inst.operand);
-
+ fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
- const opcode = switch (inst.base.tag) {
- // Bool -> bool
- .not => Opcode.OpLogicalNot,
- else => unreachable,
- };
-
+ const result_type_id = try self.genType(Type.initTag(.bool));
+ const opcode: Opcode = .OpLogicalNot;
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id });
-
return result_id;
}
- fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId {
+ fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty = self.air.typeOfIndex(inst);
const storage_class = spec.StorageClass.Function;
- const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class);
+ const result_type_id = try self.genPointerType(ty, storage_class);
const result_id = self.spv.allocResultId();
// Rather than generating into code here, we're just going to generate directly into the fn_decls section so that
@@ -824,12 +819,12 @@ pub const DeclGen = struct {
return result_id;
}
- fn genArg(self: *DeclGen) ResultId {
+ fn airArg(self: *DeclGen) ResultId {
defer self.next_arg_index += 1;
return self.args.items[self.next_arg_index];
}
- fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId {
+ fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
// In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
// "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
@@ -849,11 +844,16 @@ pub const DeclGen = struct {
incoming_blocks.deinit(self.spv.gpa);
}
- try self.genBody(inst.body);
+ const ty = self.air.typeOfIndex(inst);
+ const inst_datas = self.air.instructions.items(.data);
+ const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+ try self.genBody(body);
try self.beginSPIRVBlock(label_id);
// If this block didn't produce a value, simply return here.
- if (!inst.base.ty.hasCodeGenBits())
+ if (!ty.hasCodeGenBits())
return null;
// Combine the result from the blocks using the Phi instruction.
@@ -863,7 +863,7 @@ pub const DeclGen = struct {
// TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
// are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
// an error for pointers.
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+ const result_type_id = try self.genType(ty);
_ = result_type_id;
try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
@@ -875,30 +875,26 @@ pub const DeclGen = struct {
return result_id;
}
- fn genBr(self: *DeclGen, inst: *Inst.Br) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const target = self.blocks.get(inst.block).?;
+ fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const br = self.air.instructions.items(.data)[inst].br;
+ const block = self.blocks.get(br.block_inst).?;
+ const operand_ty = self.air.typeOf(br.operand);
- // TODO: For some reason, br is emitted with void parameters.
- if (inst.operand.ty.hasCodeGenBits()) {
- const operand_id = try self.resolve(inst.operand);
+ if (operand_ty.hasCodeGenBits()) {
+ const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
- try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
+ try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
}
- try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
- }
-
- fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const target = self.blocks.get(inst.block).?;
- // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway.
- try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
+ try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
}
- fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- const condition_id = try self.resolve(inst.condition);
+ fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len];
+ const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
+ const condition_id = try self.resolve(pl_op.operand);
// These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
const then_label_id = self.spv.allocResultId();
@@ -914,23 +910,26 @@ pub const DeclGen = struct {
});
try self.beginSPIRVBlock(then_label_id);
- try self.genBody(inst.then_body);
+ try self.genBody(then_body);
try self.beginSPIRVBlock(else_label_id);
- try self.genBody(inst.else_body);
+ try self.genBody(else_body);
}
- fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void {
+ fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
- try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column });
+ try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
}
- fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
- const operand_id = try self.resolve(inst.operand);
+ fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
+ const ty = self.air.typeOfIndex(inst);
- const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+ const result_type_id = try self.genType(ty);
const result_id = self.spv.allocResultId();
- const operands = if (inst.base.ty.isVolatilePtr())
+ const operands = if (ty.isVolatilePtr())
&[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
else
&[_]Word{ result_type_id, result_id, operand_id };
@@ -940,8 +939,10 @@ pub const DeclGen = struct {
return result_id;
}
- fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
+ fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const loop = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[loop.end..][0..loop.data.body_len];
const loop_label_id = self.spv.allocResultId();
// Jump to the loop entry point
@@ -950,27 +951,29 @@ pub const DeclGen = struct {
// TODO: Look into OpLoopMerge.
try self.beginSPIRVBlock(loop_label_id);
- try self.genBody(inst.body);
+ try self.genBody(body);
try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
}
- fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void {
- const operand_id = try self.resolve(inst.operand);
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
- }
-
- fn genRetVoid(self: *DeclGen) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
- try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+ fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const operand = self.air.instructions.items(.data)[inst].un_op;
+ const operand_ty = self.air.typeOf(operand);
+ if (operand_ty.hasCodeGenBits()) {
+ const operand_id = try self.resolve(operand);
+ try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
+ } else {
+ try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+ }
}
- fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void {
- const dst_ptr_id = try self.resolve(inst.lhs);
- const src_val_id = try self.resolve(inst.rhs);
+ fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const dst_ptr_id = try self.resolve(bin_op.lhs);
+ const src_val_id = try self.resolve(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
- const operands = if (inst.lhs.ty.isVolatilePtr())
+ const operands = if (lhs_ty.isVolatilePtr())
&[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
else
&[_]Word{ dst_ptr_id, src_val_id };
@@ -978,8 +981,7 @@ pub const DeclGen = struct {
try writeInstruction(&self.code, .OpStore, operands);
}
- fn genUnreach(self: *DeclGen) !void {
- // TODO: This instruction needs to be the last in a block. Is that guaranteed?
+ fn airUnreach(self: *DeclGen) !void {
try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
}
};
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index 3476ab2ce6..41397f55f4 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -9,14 +9,14 @@ const wasm = std.wasm;
const Module = @import("../Module.zig");
const Decl = Module.Decl;
-const ir = @import("../air.zig");
-const Inst = ir.Inst;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
const Compilation = @import("../Compilation.zig");
const LazySrcLoc = Module.LazySrcLoc;
const link = @import("../link.zig");
const TypedValue = @import("../TypedValue.zig");
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
@@ -24,8 +24,8 @@ const WValue = union(enum) {
none: void,
/// Index of the local variable
local: u32,
- /// Instruction holding a constant `Value`
- constant: *Inst,
+ /// Holds a memoized typed value
+ constant: TypedValue,
/// Offset position in the list of bytecode instructions
code_offset: usize,
/// Used for variables that create multiple locals on the stack when allocated
@@ -483,8 +483,8 @@ pub const Result = union(enum) {
externally_managed: []const u8,
};
-/// Hashmap to store generated `WValue` for each `Inst`
-pub const ValueTable = std.AutoHashMapUnmanaged(*Inst, WValue);
+/// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
+pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue);
/// Code represents the `Code` section of wasm that
/// belongs to a function
@@ -492,11 +492,13 @@ pub const Context = struct {
/// Reference to the function declaration the code
/// section belongs to
decl: *Decl,
+ air: Air,
+ liveness: Liveness,
gpa: *mem.Allocator,
- /// Table to save `WValue`'s generated by an `Inst`
+ /// Table to save `WValue`'s generated by an `Air.Inst`
values: ValueTable,
- /// Mapping from *Inst.Block to block ids
- blocks: std.AutoArrayHashMapUnmanaged(*Inst.Block, u32) = .{},
+ /// Mapping from Air.Inst.Index to block ids
+ blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{},
/// `bytes` contains the wasm bytecode belonging to the 'code' section.
code: ArrayList(u8),
/// Contains the generated function type bytecode for the current function
@@ -536,7 +538,8 @@ pub const Context = struct {
}
/// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig
- fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError {
+ fn fail(self: *Context, comptime fmt: []const u8, args: anytype) InnerError {
+ const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLocWithDecl(self.decl);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args);
return error.CodegenFail;
@@ -544,59 +547,66 @@ pub const Context = struct {
/// Resolves the `WValue` for the given instruction `inst`
/// When the given instruction has a `Value`, it returns a constant instead
- fn resolveInst(self: Context, inst: *Inst) WValue {
- if (!inst.ty.hasCodeGenBits()) return .none;
+ fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue {
+ const inst_index = Air.refToIndex(ref) orelse {
+ const tv = Air.Inst.Ref.typed_value_map[@enumToInt(ref)];
+ if (!tv.ty.hasCodeGenBits()) {
+ return WValue.none;
+ }
+ return WValue{ .constant = tv };
+ };
+
+ const inst_type = self.air.typeOfIndex(inst_index);
+ if (!inst_type.hasCodeGenBits()) return .none;
- if (inst.value()) |_| {
- return WValue{ .constant = inst };
+ if (self.air.instructions.items(.tag)[inst_index] == .constant) {
+ const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
+ return WValue{ .constant = .{ .ty = inst_type, .val = self.air.values[ty_pl.payload] } };
}
- return self.values.get(inst).?; // Instruction does not dominate all uses!
+ return self.values.get(inst_index).?; // Instruction does not dominate all uses!
}
/// Using a given `Type`, returns the corresponding wasm Valtype
- fn typeToValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!wasm.Valtype {
+ fn typeToValtype(self: *Context, ty: Type) InnerError!wasm.Valtype {
return switch (ty.zigTypeTag()) {
.Float => blk: {
const bits = ty.floatBits(self.target);
if (bits == 16 or bits == 32) break :blk wasm.Valtype.f32;
if (bits == 64) break :blk wasm.Valtype.f64;
- return self.fail(src, "Float bit size not supported by wasm: '{d}'", .{bits});
+ return self.fail("Float bit size not supported by wasm: '{d}'", .{bits});
},
.Int => blk: {
const info = ty.intInfo(self.target);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 64) break :blk wasm.Valtype.i64;
- return self.fail(src, "Integer bit size not supported by wasm: '{d}'", .{info.bits});
+ return self.fail("Integer bit size not supported by wasm: '{d}'", .{info.bits});
},
.Enum => switch (ty.tag()) {
.enum_simple => wasm.Valtype.i32,
- else => self.typeToValtype(
- src,
- ty.cast(Type.Payload.EnumFull).?.data.tag_ty,
- ),
+ else => self.typeToValtype(ty.cast(Type.Payload.EnumFull).?.data.tag_ty),
},
.Bool,
.Pointer,
.ErrorSet,
=> wasm.Valtype.i32,
.Struct, .ErrorUnion => unreachable, // Multi typed, must be handled individually.
- else => self.fail(src, "TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}),
+ else => self.fail("TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}),
};
}
/// Using a given `Type`, returns the byte representation of its wasm value type
- fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
- return wasm.valtype(try self.typeToValtype(src, ty));
+ fn genValtype(self: *Context, ty: Type) InnerError!u8 {
+ return wasm.valtype(try self.typeToValtype(ty));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
- fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 {
+ fn genBlockType(self: *Context, ty: Type) InnerError!u8 {
return switch (ty.tag()) {
.void, .noreturn => wasm.block_empty,
- else => self.genValtype(src, ty),
+ else => self.genValtype(ty),
};
}
@@ -610,7 +620,7 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.local_get));
try leb.writeULEB128(writer, idx);
},
- .constant => |inst| try self.emitConstant(inst.src, inst.value().?, inst.ty), // creates a new constant onto the stack
+ .constant => |tv| try self.emitConstant(tv.val, tv.ty), // Creates a new constant on the stack
}
}
@@ -626,10 +636,7 @@ pub const Context = struct {
const fields_len = @intCast(u32, struct_data.fields.count());
try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len);
for (struct_data.fields.values()) |*value| {
- const val_type = try self.genValtype(
- .{ .node_offset = struct_data.node_offset },
- value.ty,
- );
+ const val_type = try self.genValtype(value.ty);
self.locals.appendAssumeCapacity(val_type);
self.local_index += 1;
}
@@ -640,7 +647,7 @@ pub const Context = struct {
},
.ErrorUnion => {
const payload_type = ty.errorUnionChild();
- const val_type = try self.genValtype(.{ .node_offset = 0 }, payload_type);
+ const val_type = try self.genValtype(payload_type);
// we emit the error value as the first local, and the payload as the following.
// The first local is also used to find the index of the error and payload.
@@ -657,7 +664,7 @@ pub const Context = struct {
} };
},
else => {
- const valtype = try self.genValtype(.{ .node_offset = 0 }, ty);
+ const valtype = try self.genValtype(ty);
try self.locals.append(self.gpa, valtype);
self.local_index += 1;
return WValue{ .local = initial_index };
@@ -680,7 +687,7 @@ pub const Context = struct {
ty.fnParamTypes(params);
for (params) |param_type| {
// Can we maybe get the source index of each param?
- const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type);
+ const val_type = try self.genValtype(param_type);
try writer.writeByte(val_type);
}
}
@@ -689,13 +696,10 @@ pub const Context = struct {
const return_type = ty.fnReturnType();
switch (return_type.zigTypeTag()) {
.Void, .NoReturn => try leb.writeULEB128(writer, @as(u32, 0)),
- .Struct => return self.fail(.{ .node_offset = 0 }, "TODO: Implement struct as return type for wasm", .{}),
- .Optional => return self.fail(.{ .node_offset = 0 }, "TODO: Implement optionals as return type for wasm", .{}),
+ .Struct => return self.fail("TODO: Implement struct as return type for wasm", .{}),
+ .Optional => return self.fail("TODO: Implement optionals as return type for wasm", .{}),
.ErrorUnion => {
- const val_type = try self.genValtype(
- .{ .node_offset = 0 },
- return_type.errorUnionChild(),
- );
+ const val_type = try self.genValtype(return_type.errorUnionChild());
// write down the amount of return values
try leb.writeULEB128(writer, @as(u32, 2));
@@ -705,58 +709,57 @@ pub const Context = struct {
else => {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
- const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
+ const val_type = try self.genValtype(return_type);
try writer.writeByte(val_type);
},
}
}
- /// Generates the wasm bytecode for the function declaration belonging to `Context`
- pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result {
- switch (typed_value.ty.zigTypeTag()) {
- .Fn => {
- try self.genFunctype();
+ pub fn genFunc(self: *Context) InnerError!Result {
+ try self.genFunctype();
+ // TODO: check for and handle death of instructions
- // Write instructions
- // TODO: check for and handle death of instructions
- const mod_fn = blk: {
- if (typed_value.val.castTag(.function)) |func| break :blk func.data;
- if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
- unreachable;
- };
-
- // Reserve space to write the size after generating the code as well as space for locals count
- try self.code.resize(10);
-
- try self.genBody(mod_fn.body);
-
- // finally, write our local types at the 'offset' position
- {
- leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len));
-
- // offset into 'code' section where we will put our locals types
- var local_offset: usize = 10;
-
- // emit the actual locals amount
- for (self.locals.items) |local| {
- var buf: [6]u8 = undefined;
- leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1));
- buf[5] = local;
- try self.code.insertSlice(local_offset, &buf);
- local_offset += 6;
- }
- }
+ // Reserve space to write the size after generating the code as well as space for locals count
+ try self.code.resize(10);
+
+ try self.genBody(self.air.getMainBody());
- const writer = self.code.writer();
- try writer.writeByte(wasm.opcode(.end));
+ // finally, write our local types at the 'offset' position
+ {
+ leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len));
- // Fill in the size of the generated code to the reserved space at the
- // beginning of the buffer.
- const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5;
- leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size));
+ // offset into 'code' section where we will put our locals types
+ var local_offset: usize = 10;
- // codegen data has been appended to `code`
- return Result.appended;
+ // emit the actual locals amount
+ for (self.locals.items) |local| {
+ var buf: [6]u8 = undefined;
+ leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1));
+ buf[5] = local;
+ try self.code.insertSlice(local_offset, &buf);
+ local_offset += 6;
+ }
+ }
+
+ const writer = self.code.writer();
+ try writer.writeByte(wasm.opcode(.end));
+
+ // Fill in the size of the generated code to the reserved space at the
+ // beginning of the buffer.
+ const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5;
+ leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size));
+
+ // codegen data has been appended to `code`
+ return Result.appended;
+ }
+
+ /// Generates the wasm bytecode for the declaration belonging to `Context`
+ pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result {
+ switch (typed_value.ty.zigTypeTag()) {
+ .Fn => {
+ try self.genFunctype();
+ if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
+ return self.fail("TODO implement wasm codegen for function pointers", .{});
},
.Array => {
if (typed_value.val.castTag(.bytes)) |payload| {
@@ -775,7 +778,7 @@ pub const Context = struct {
}
}
return Result{ .externally_managed = payload.data };
- } else return self.fail(.{ .node_offset = 0 }, "TODO implement gen for more kinds of arrays", .{});
+ } else return self.fail("TODO implement gen for more kinds of arrays", .{});
},
.Int => {
const info = typed_value.ty.intInfo(self.target);
@@ -784,85 +787,91 @@ pub const Context = struct {
try self.code.append(@intCast(u8, int_byte));
return Result.appended;
}
- return self.fail(.{ .node_offset = 0 }, "TODO: Implement codegen for int type: '{}'", .{typed_value.ty});
+ return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty});
},
- else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: Implement zig type codegen for type: '{s}'", .{tag}),
+ else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}),
}
}
- fn genInst(self: *Context, inst: *Inst) InnerError!WValue {
- return switch (inst.tag) {
- .add => self.genBinOp(inst.castTag(.add).?, .add),
- .alloc => self.genAlloc(inst.castTag(.alloc).?),
- .arg => self.genArg(inst.castTag(.arg).?),
- .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"),
- .bitcast => self.genBitcast(inst.castTag(.bitcast).?),
- .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"),
- .block => self.genBlock(inst.castTag(.block).?),
- .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"),
- .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"),
- .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?),
- .br => self.genBr(inst.castTag(.br).?),
- .call => self.genCall(inst.castTag(.call).?),
- .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq),
- .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte),
- .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt),
- .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte),
- .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt),
- .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq),
- .condbr => self.genCondBr(inst.castTag(.condbr).?),
+ fn genInst(self: *Context, inst: Air.Inst.Index) !WValue {
+ const air_tags = self.air.instructions.items(.tag);
+ return switch (air_tags[inst]) {
+ .add => self.airBinOp(inst, .add),
+ .sub => self.airBinOp(inst, .sub),
+ .mul => self.airBinOp(inst, .mul),
+ .div => self.airBinOp(inst, .div),
+ .bit_and => self.airBinOp(inst, .@"and"),
+ .bit_or => self.airBinOp(inst, .@"or"),
+ .bool_and => self.airBinOp(inst, .@"and"),
+ .bool_or => self.airBinOp(inst, .@"or"),
+ .xor => self.airBinOp(inst, .xor),
+
+ .cmp_eq => self.airCmp(inst, .eq),
+ .cmp_gte => self.airCmp(inst, .gte),
+ .cmp_gt => self.airCmp(inst, .gt),
+ .cmp_lte => self.airCmp(inst, .lte),
+ .cmp_lt => self.airCmp(inst, .lt),
+ .cmp_neq => self.airCmp(inst, .neq),
+
+ .alloc => self.airAlloc(inst),
+ .arg => self.airArg(inst),
+ .bitcast => self.airBitcast(inst),
+ .block => self.airBlock(inst),
+ .breakpoint => self.airBreakpoint(inst),
+ .br => self.airBr(inst),
+ .call => self.airCall(inst),
+ .cond_br => self.airCondBr(inst),
.constant => unreachable,
.dbg_stmt => WValue.none,
- .div => self.genBinOp(inst.castTag(.div).?, .div),
- .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne),
- .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq),
- .load => self.genLoad(inst.castTag(.load).?),
- .loop => self.genLoop(inst.castTag(.loop).?),
- .mul => self.genBinOp(inst.castTag(.mul).?, .mul),
- .not => self.genNot(inst.castTag(.not).?),
- .ret => self.genRet(inst.castTag(.ret).?),
- .retvoid => WValue.none,
- .store => self.genStore(inst.castTag(.store).?),
- .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?),
- .sub => self.genBinOp(inst.castTag(.sub).?, .sub),
- .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?),
- .unreach => self.genUnreachable(inst.castTag(.unreach).?),
- .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?),
- .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?),
- .xor => self.genBinOp(inst.castTag(.xor).?, .xor),
- else => self.fail(.{ .node_offset = 0 }, "TODO: Implement wasm inst: {s}", .{inst.tag}),
+ .is_err => self.airIsErr(inst, .i32_ne),
+ .is_non_err => self.airIsErr(inst, .i32_eq),
+ .load => self.airLoad(inst),
+ .loop => self.airLoop(inst),
+ .not => self.airNot(inst),
+ .ret => self.airRet(inst),
+ .store => self.airStore(inst),
+ .struct_field_ptr => self.airStructFieldPtr(inst),
+ .switch_br => self.airSwitchBr(inst),
+ .unreach => self.airUnreachable(inst),
+ .unwrap_errunion_payload => self.airUnwrapErrUnionPayload(inst),
+ .wrap_errunion_payload => self.airWrapErrUnionPayload(inst),
+ else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
};
}
- fn genBody(self: *Context, body: ir.Body) InnerError!void {
- for (body.instructions) |inst| {
+ fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void {
+ for (body) |inst| {
const result = try self.genInst(inst);
try self.values.putNoClobber(self.gpa, inst, result);
}
}
- fn genRet(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
- // TODO: Implement tail calls
- const operand = self.resolveInst(inst.operand);
+ fn airRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = self.resolveInst(un_op);
try self.emitWValue(operand);
try self.code.append(wasm.opcode(.@"return"));
return .none;
}
- fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue {
- const func_val = inst.func.value().?;
+ fn airCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Call, pl_op.payload);
+ const args = self.air.extra[extra.end..][0..extra.data.args_len];
const target: *Decl = blk: {
+ const func_val = self.air.value(pl_op.operand).?;
+
if (func_val.castTag(.function)) |func| {
break :blk func.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |ext_fn| {
break :blk ext_fn.data;
}
- return self.fail(inst.base.src, "Expected a function, but instead found type '{s}'", .{func_val.tag()});
+ return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()});
};
- for (inst.args) |arg| {
- const arg_val = self.resolveInst(arg);
+ for (args) |arg| {
+ const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg));
try self.emitWValue(arg_val);
}
@@ -878,16 +887,17 @@ pub const Context = struct {
return .none;
}
- fn genAlloc(self: *Context, inst: *Inst.NoOp) InnerError!WValue {
- const elem_type = inst.base.ty.elemType();
+ fn airAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const elem_type = self.air.typeOfIndex(inst).elemType();
return self.allocLocal(elem_type);
}
- fn genStore(self: *Context, inst: *Inst.BinOp) InnerError!WValue {
+ fn airStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const writer = self.code.writer();
- const lhs = self.resolveInst(inst.lhs);
- const rhs = self.resolveInst(inst.rhs);
+ const lhs = self.resolveInst(bin_op.lhs);
+ const rhs = self.resolveInst(bin_op.rhs);
switch (lhs) {
.multi_value => |multi_value| switch (rhs) {
@@ -895,7 +905,7 @@ pub const Context = struct {
// we simply assign the local_index to the rhs one.
// This allows us to update struct fields without having to individually
// set each local as each field's index will be calculated off the struct's base index
- .multi_value => self.values.put(self.gpa, inst.lhs, rhs) catch unreachable, // Instruction does not dominate all uses!
+ .multi_value => self.values.put(self.gpa, Air.refToIndex(bin_op.lhs).?, rhs) catch unreachable, // Instruction does not dominate all uses!
.constant, .none => {
// emit all values onto the stack if constant
try self.emitWValue(rhs);
@@ -921,20 +931,22 @@ pub const Context = struct {
return .none;
}
- fn genLoad(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
- return self.resolveInst(inst.operand);
+ fn airLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ return self.resolveInst(ty_op.operand);
}
- fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue {
+ fn airArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
_ = inst;
// arguments share the index with locals
defer self.local_index += 1;
return WValue{ .local = self.local_index };
}
- fn genBinOp(self: *Context, inst: *Inst.BinOp, op: Op) InnerError!WValue {
- const lhs = self.resolveInst(inst.lhs);
- const rhs = self.resolveInst(inst.rhs);
+ fn airBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = self.resolveInst(bin_op.lhs);
+ const rhs = self.resolveInst(bin_op.rhs);
// it's possible for both lhs and/or rhs to return an offset as well,
// in which case we return the first offset occurance we find.
@@ -947,23 +959,24 @@ pub const Context = struct {
try self.emitWValue(lhs);
try self.emitWValue(rhs);
+ const bin_ty = self.air.typeOf(bin_op.lhs);
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = try self.typeToValtype(inst.base.src, inst.base.ty),
- .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = try self.typeToValtype(bin_ty),
+ .signedness = if (bin_ty.isSignedInt()) .signed else .unsigned,
});
try self.code.append(wasm.opcode(opcode));
return WValue{ .code_offset = offset };
}
- fn emitConstant(self: *Context, src: LazySrcLoc, value: Value, ty: Type) InnerError!void {
+ fn emitConstant(self: *Context, value: Value, ty: Type) InnerError!void {
const writer = self.code.writer();
switch (ty.zigTypeTag()) {
.Int => {
// write opcode
const opcode: wasm.Opcode = buildOpcode(.{
.op = .@"const",
- .valtype1 = try self.typeToValtype(src, ty),
+ .valtype1 = try self.typeToValtype(ty),
});
try writer.writeByte(wasm.opcode(opcode));
// write constant
@@ -982,14 +995,14 @@ pub const Context = struct {
// write opcode
const opcode: wasm.Opcode = buildOpcode(.{
.op = .@"const",
- .valtype1 = try self.typeToValtype(src, ty),
+ .valtype1 = try self.typeToValtype(ty),
});
try writer.writeByte(wasm.opcode(opcode));
// write constant
switch (ty.floatBits(self.target)) {
0...32 => try writer.writeIntLittle(u32, @bitCast(u32, value.toFloat(f32))),
64 => try writer.writeIntLittle(u64, @bitCast(u64, value.toFloat(f64))),
- else => |bits| return self.fail(src, "Wasm TODO: emitConstant for float with {d} bits", .{bits}),
+ else => |bits| return self.fail("Wasm TODO: emitConstant for float with {d} bits", .{bits}),
}
},
.Pointer => {
@@ -1006,7 +1019,7 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.i32_load));
try leb.writeULEB128(writer, @as(u32, 0));
try leb.writeULEB128(writer, @as(u32, 0));
- } else return self.fail(src, "Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()});
+ } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()});
},
.Void => {},
.Enum => {
@@ -1020,7 +1033,7 @@ pub const Context = struct {
const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
if (enum_full.values.count() != 0) {
const tag_val = enum_full.values.keys()[field_index.data];
- try self.emitConstant(src, tag_val, enum_full.tag_ty);
+ try self.emitConstant(tag_val, enum_full.tag_ty);
} else {
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeULEB128(writer, field_index.data);
@@ -1031,7 +1044,7 @@ pub const Context = struct {
} else {
var int_tag_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&int_tag_buffer);
- try self.emitConstant(src, value, int_tag_ty);
+ try self.emitConstant(value, int_tag_ty);
}
},
.ErrorSet => {
@@ -1045,12 +1058,12 @@ pub const Context = struct {
const payload_type = ty.errorUnionChild();
if (value.getError()) |_| {
// write the error value
- try self.emitConstant(src, data, error_type);
+ try self.emitConstant(data, error_type);
// no payload, so write a '0' const
const opcode: wasm.Opcode = buildOpcode(.{
.op = .@"const",
- .valtype1 = try self.typeToValtype(src, payload_type),
+ .valtype1 = try self.typeToValtype(payload_type),
});
try writer.writeByte(wasm.opcode(opcode));
try leb.writeULEB128(writer, @as(u32, 0));
@@ -1059,21 +1072,24 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeULEB128(writer, @as(u32, 0));
// after the error code, we emit the payload
- try self.emitConstant(src, data, payload_type);
+ try self.emitConstant(data, payload_type);
}
},
- else => |zig_type| return self.fail(src, "Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}),
+ else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}),
}
}
- fn genBlock(self: *Context, block: *Inst.Block) InnerError!WValue {
- const block_ty = try self.genBlockType(block.base.src, block.base.ty);
+ fn airBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty));
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.startBlock(.block, block_ty, null);
// Here we set the current block idx, so breaks know the depth to jump
// to when breaking out.
- try self.blocks.putNoClobber(self.gpa, block, self.block_depth);
- try self.genBody(block.body);
+ try self.blocks.putNoClobber(self.gpa, inst, self.block_depth);
+ try self.genBody(body);
try self.endBlock();
return .none;
@@ -1097,11 +1113,15 @@ pub const Context = struct {
self.block_depth -= 1;
}
- fn genLoop(self: *Context, loop: *Inst.Loop) InnerError!WValue {
- const loop_ty = try self.genBlockType(loop.base.src, loop.base.ty);
+ fn airLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const loop = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[loop.end..][0..loop.data.body_len];
- try self.startBlock(.loop, loop_ty, null);
- try self.genBody(loop.body);
+ // result type of loop is always 'noreturn', meaning we can always
+ // emit the wasm type 'block_empty'.
+ try self.startBlock(.loop, wasm.block_empty, null);
+ try self.genBody(body);
// breaking to the index of a loop block will continue the loop instead
try self.code.append(wasm.opcode(.br));
@@ -1112,8 +1132,12 @@ pub const Context = struct {
return .none;
}
- fn genCondBr(self: *Context, condbr: *Inst.CondBr) InnerError!WValue {
- const condition = self.resolveInst(condbr.condition);
+ fn airCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const condition = self.resolveInst(pl_op.operand);
+ const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const writer = self.code.writer();
// TODO: Handle death instructions for then and else body
@@ -1128,8 +1152,9 @@ pub const Context = struct {
break :blk offset;
},
};
- const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty);
- try self.startBlock(.block, block_ty, offset);
+
+ // result type is always noreturn, so use `block_empty` as type.
+ try self.startBlock(.block, wasm.block_empty, offset);
// we inserted the block in front of the condition
// so now check if condition matches. If not, break outside this block
@@ -1137,35 +1162,37 @@ pub const Context = struct {
try writer.writeByte(wasm.opcode(.br_if));
try leb.writeULEB128(writer, @as(u32, 0));
- try self.genBody(condbr.else_body);
+ try self.genBody(else_body);
try self.endBlock();
// Outer block that matches the condition
- try self.genBody(condbr.then_body);
+ try self.genBody(then_body);
return .none;
}
- fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue {
+ fn airCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue {
// save offset, so potential conditions can insert blocks in front of
// the comparison that we can later jump back to
const offset = self.code.items.len;
- const lhs = self.resolveInst(inst.lhs);
- const rhs = self.resolveInst(inst.rhs);
+ const data: Air.Inst.Data = self.air.instructions.items(.data)[inst];
+ const lhs = self.resolveInst(data.bin_op.lhs);
+ const rhs = self.resolveInst(data.bin_op.rhs);
+ const lhs_ty = self.air.typeOf(data.bin_op.lhs);
try self.emitWValue(lhs);
try self.emitWValue(rhs);
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (inst.lhs.ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (lhs_ty.zigTypeTag() != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk inst.lhs.ty.intInfo(self.target).signedness;
+ break :blk lhs_ty.intInfo(self.target).signedness;
};
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = try self.typeToValtype(inst.base.src, inst.lhs.ty),
+ .valtype1 = try self.typeToValtype(lhs_ty),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -1180,16 +1207,17 @@ pub const Context = struct {
return WValue{ .code_offset = offset };
}
- fn genBr(self: *Context, br: *Inst.Br) InnerError!WValue {
+ fn airBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const br = self.air.instructions.items(.data)[inst].br;
+
// if operand has codegen bits we should break with a value
- if (br.operand.ty.hasCodeGenBits()) {
- const operand = self.resolveInst(br.operand);
- try self.emitWValue(operand);
+ if (self.air.typeOf(br.operand).hasCodeGenBits()) {
+ try self.emitWValue(self.resolveInst(br.operand));
}
// We map every block to its block index.
// We then determine how far we have to jump to it by substracting it from current block depth
- const idx: u32 = self.block_depth - self.blocks.get(br.block).?;
+ const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?;
const writer = self.code.writer();
try writer.writeByte(wasm.opcode(.br));
try leb.writeULEB128(writer, idx);
@@ -1197,10 +1225,11 @@ pub const Context = struct {
return .none;
}
- fn genNot(self: *Context, not: *Inst.UnOp) InnerError!WValue {
+ fn airNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const offset = self.code.items.len;
- const operand = self.resolveInst(not.operand);
+ const operand = self.resolveInst(ty_op.operand);
try self.emitWValue(operand);
// wasm does not have booleans nor the `not` instruction, therefore compare with 0
@@ -1214,73 +1243,93 @@ pub const Context = struct {
return WValue{ .code_offset = offset };
}
- fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue {
+ fn airBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
_ = self;
- _ = breakpoint;
+ _ = inst;
// unsupported by wasm itself. Can be implemented once we support DWARF
// for wasm
return .none;
}
- fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue {
- _ = unreach;
+ fn airUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ _ = inst;
try self.code.append(wasm.opcode(.@"unreachable"));
return .none;
}
- fn genBitcast(self: *Context, bitcast: *Inst.UnOp) InnerError!WValue {
- return self.resolveInst(bitcast.operand);
+ fn airBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ return self.resolveInst(ty_op.operand);
}
- fn genStructFieldPtr(self: *Context, inst: *Inst.StructFieldPtr) InnerError!WValue {
- const struct_ptr = self.resolveInst(inst.struct_ptr);
+ fn airStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.StructField, ty_pl.payload);
+ const struct_ptr = self.resolveInst(extra.data.struct_ptr);
- return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) };
+ return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) };
}
- fn genSwitchBr(self: *Context, inst: *Inst.SwitchBr) InnerError!WValue {
- const target = self.resolveInst(inst.target);
- const target_ty = inst.target.ty;
- const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty);
- const blocktype = try self.genBlockType(inst.base.src, inst.base.ty);
-
- const signedness: std.builtin.Signedness = blk: {
- // by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
-
- // incase of an actual integer, we emit the correct signedness
- break :blk target_ty.intInfo(self.target).signedness;
- };
- for (inst.cases) |case| {
- // create a block for each case, when the condition does not match we break out of it
- try self.startBlock(.block, blocktype, null);
- try self.emitWValue(target);
- try self.emitConstant(.{ .node_offset = 0 }, case.item, target_ty);
- const opcode = buildOpcode(.{
- .valtype1 = valtype,
- .op = .ne, // not equal because we jump out the block if it does not match the condition
- .signedness = signedness,
- });
- try self.code.append(wasm.opcode(opcode));
- try self.code.append(wasm.opcode(.br_if));
- try leb.writeULEB128(self.code.writer(), @as(u32, 0));
-
- // emit our block code
- try self.genBody(case.body);
-
- // end the block we created earlier
- try self.endBlock();
- }
-
- // finally, emit the else case if it exists. Here we will not have to
- // check for a condition, so also no need to emit a block.
- try self.genBody(inst.else_body);
-
- return .none;
+ fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ const cases = self.air.extra[extra.end..][0..extra.data.cases_len];
+ const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len];
+
+ const target = self.resolveInst(pl_op.operand);
+ const target_ty = self.air.typeOf(pl_op.operand);
+ const valtype = try self.typeToValtype(target_ty);
+ // result type is always 'noreturn'
+ const blocktype = wasm.block_empty;
+
+ _ = valtype;
+ _ = blocktype;
+ _ = target;
+ _ = else_body;
+ return self.fail("TODO implement wasm codegen for switch", .{});
+ //const signedness: std.builtin.Signedness = blk: {
+ // // by default we tell the operand type is unsigned (i.e. bools and enum values)
+ // if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
+
+ // // incase of an actual integer, we emit the correct signedness
+ // break :blk target_ty.intInfo(self.target).signedness;
+ //};
+ //for (cases) |case_idx| {
+ // const case = self.air.extraData(Air.SwitchBr.Case, case_idx);
+ // const case_body = self.air.extra[case.end..][0..case.data.body_len];
+
+ // // create a block for each case, when the condition does not match we break out of it
+ // try self.startBlock(.block, blocktype, null);
+ // try self.emitWValue(target);
+
+ // const val = self.air.value(case.data.item).?;
+ // try self.emitConstant(val, target_ty);
+ // const opcode = buildOpcode(.{
+ // .valtype1 = valtype,
+ // .op = .ne, // not equal because we jump out the block if it does not match the condition
+ // .signedness = signedness,
+ // });
+ // try self.code.append(wasm.opcode(opcode));
+ // try self.code.append(wasm.opcode(.br_if));
+ // try leb.writeULEB128(self.code.writer(), @as(u32, 0));
+
+ // // emit our block code
+ // try self.genBody(case_body);
+
+ // // end the block we created earlier
+ // try self.endBlock();
+ //}
+
+ //// finally, emit the else case if it exists. Here we will not have to
+ //// check for a condition, so also no need to emit a block.
+ //try self.genBody(else_body);
+
+ //return .none;
}
- fn genIsErr(self: *Context, inst: *Inst.UnOp, opcode: wasm.Opcode) InnerError!WValue {
- const operand = self.resolveInst(inst.operand);
+ fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = self.resolveInst(un_op);
const offset = self.code.items.len;
const writer = self.code.writer();
@@ -1295,8 +1344,9 @@ pub const Context = struct {
return WValue{ .code_offset = offset };
}
- fn genUnwrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
- const operand = self.resolveInst(inst.operand);
+ fn airUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = self.resolveInst(ty_op.operand);
// The index of multi_value contains the error code. To get the initial index of the payload we get
// the following index. Next, convert it to a `WValue.local`
//
@@ -1304,7 +1354,8 @@ pub const Context = struct {
return WValue{ .local = operand.multi_value.index + 1 };
}
- fn genWrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
- return self.resolveInst(inst.operand);
+ fn airWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ return self.resolveInst(ty_op.operand);
}
};