aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-02-25 21:04:23 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-02-25 21:04:23 -0700
commit0b58b617998b79a765b54f88fbe90ca2798b3d3e (patch)
treeca6cc4b6bcc2b93166d196049ee49416afe781ad /src/codegen
parentdc325669e360f7a9dfa24f85a62fa386529dade6 (diff)
parentfd208d9d5913a0929e444deb97b91092c427bb14 (diff)
downloadzig-0b58b617998b79a765b54f88fbe90ca2798b3d3e.tar.gz
zig-0b58b617998b79a765b54f88fbe90ca2798b3d3e.zip
Merge remote-tracking branch 'origin/master' into llvm12
Conflicts: * src/clang.zig * src/llvm.zig - this file got moved to src/llvm/bindings.zig in master branch so I had to put the new LLVM arch/os enum tags into it. * lib/std/target.zig, src/stage1/target.cpp - haiku had an inconsistency with its default target ABI, gnu vs eabi. In this commit we make it gnu in both places to match the latest changes by @hoanga. * src/translate_c.zig
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/aarch64.zig291
-rw-r--r--src/codegen/arm.zig168
-rw-r--r--src/codegen/c.zig995
-rw-r--r--src/codegen/llvm.zig752
-rw-r--r--src/codegen/llvm/bindings.zig614
-rw-r--r--src/codegen/spirv.zig49
-rw-r--r--src/codegen/spirv/spec.zig1645
-rw-r--r--src/codegen/wasm.zig642
8 files changed, 4689 insertions, 467 deletions
diff --git a/src/codegen/aarch64.zig b/src/codegen/aarch64.zig
index 0e9ad61745..8abc616e2f 100644
--- a/src/codegen/aarch64.zig
+++ b/src/codegen/aarch64.zig
@@ -64,7 +64,7 @@ pub const callee_preserved_regs = [_]Register{
};
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
-pub const c_abi_int_return_regs = [_]Register{ .x0, .x1 };
+pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
test "Register.id" {
testing.expectEqual(@as(u5, 0), Register.x0.id());
@@ -200,17 +200,6 @@ test "FloatingPointRegister.toX" {
/// Represents an instruction in the AArch64 instruction set
pub const Instruction = union(enum) {
- OrShiftedRegister: packed struct {
- rd: u5,
- rn: u5,
- imm6: u6,
- rm: u5,
- n: u1,
- shift: u2,
- fixed: u5 = 0b01010,
- opc: u2 = 0b01,
- sf: u1,
- },
MoveWideImmediate: packed struct {
rd: u5,
imm16: u16,
@@ -274,10 +263,47 @@ pub const Instruction = union(enum) {
NoOperation: packed struct {
fixed: u32 = 0b1101010100_0_00_011_0010_0000_000_11111,
},
+ LogicalShiftedRegister: packed struct {
+ rd: u5,
+ rn: u5,
+ imm6: u6,
+ rm: u5,
+ n: u1,
+ shift: u2,
+ fixed: u5 = 0b01010,
+ opc: u2,
+ sf: u1,
+ },
+ AddSubtractImmediate: packed struct {
+ rd: u5,
+ rn: u5,
+ imm12: u12,
+ sh: u1,
+ fixed: u6 = 0b100010,
+ s: u1,
+ op: u1,
+ sf: u1,
+ },
+
+ pub const Shift = struct {
+ shift: Type = .lsl,
+ amount: u6 = 0,
+
+ pub const Type = enum(u2) {
+ lsl,
+ lsr,
+ asr,
+ ror,
+ };
+
+ pub const none = Shift{
+ .shift = .lsl,
+ .amount = 0,
+ };
+ };
pub fn toU32(self: Instruction) u32 {
return switch (self) {
- .OrShiftedRegister => |v| @bitCast(u32, v),
.MoveWideImmediate => |v| @bitCast(u32, v),
.PCRelativeAddress => |v| @bitCast(u32, v),
.LoadStoreRegister => |v| @bitCast(u32, v),
@@ -287,68 +313,11 @@ pub const Instruction = union(enum) {
.UnconditionalBranchRegister => |v| @bitCast(u32, v),
.UnconditionalBranchImmediate => |v| @bitCast(u32, v),
.NoOperation => |v| @bitCast(u32, v),
+ .LogicalShiftedRegister => |v| @bitCast(u32, v),
+ .AddSubtractImmediate => |v| @bitCast(u32, v),
};
}
- pub const RegisterShift = struct {
- rn: u5,
- imm6: u6,
- shift: enum(u2) {
- Lsl = 0,
- Lsr = 1,
- Asr = 2,
- Ror = 3,
- },
-
- pub fn none() RegisterShift {
- return .{
- .rn = 0b11111,
- .imm6 = 0,
- .shift = .Lsl,
- };
- }
- };
-
- // Helper functions for assembly syntax functions
-
- fn orShiftedRegister(
- rd: Register,
- rm: Register,
- shift: RegisterShift,
- invert: bool,
- ) Instruction {
- const n: u1 = if (invert) 1 else 0;
- switch (rd.size()) {
- 32 => {
- return Instruction{
- .OrShiftedRegister = .{
- .rd = rd.id(),
- .rn = shift.rn,
- .imm6 = shift.imm6,
- .rm = rm.id(),
- .n = n,
- .shift = @enumToInt(shift.shift),
- .sf = 0,
- },
- };
- },
- 64 => {
- return Instruction{
- .OrShiftedRegister = .{
- .rd = rd.id(),
- .rn = shift.rn,
- .imm6 = shift.imm6,
- .rm = rm.id(),
- .n = n,
- .shift = @enumToInt(shift.shift),
- .sf = 1,
- },
- };
- },
- else => unreachable, // unexpected register size
- }
- }
-
fn moveWideImmediate(
opc: u2,
rd: Register,
@@ -671,16 +640,75 @@ pub const Instruction = union(enum) {
};
}
- // Bitwise (inclusive) OR of a register value
-
- pub fn orr(rd: Register, rm: Register, shift: RegisterShift) Instruction {
- return orShiftedRegister(rd, rm, shift, false);
+ fn logicalShiftedRegister(
+ opc: u2,
+ n: u1,
+ shift: Shift,
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ ) Instruction {
+ switch (rd.size()) {
+ 32 => {
+ assert(shift.amount < 32);
+ return Instruction{
+ .LogicalShiftedRegister = .{
+ .rd = rd.id(),
+ .rn = rn.id(),
+ .imm6 = shift.amount,
+ .rm = rm.id(),
+ .n = n,
+ .shift = @enumToInt(shift.shift),
+ .opc = opc,
+ .sf = 0b0,
+ },
+ };
+ },
+ 64 => {
+ return Instruction{
+ .LogicalShiftedRegister = .{
+ .rd = rd.id(),
+ .rn = rn.id(),
+ .imm6 = shift.amount,
+ .rm = rm.id(),
+ .n = n,
+ .shift = @enumToInt(shift.shift),
+ .opc = opc,
+ .sf = 0b1,
+ },
+ };
+ },
+ else => unreachable, // unexpected register size
+ }
}
- pub fn orn(rd: Register, rm: Register, shift: RegisterShift) Instruction {
- return orShiftedRegister(rd, rm, shift, true);
+ fn addSubtractImmediate(
+ op: u1,
+ s: u1,
+ rd: Register,
+ rn: Register,
+ imm12: u12,
+ shift: bool,
+ ) Instruction {
+ return Instruction{
+ .AddSubtractImmediate = .{
+ .rd = rd.id(),
+ .rn = rn.id(),
+ .imm12 = imm12,
+ .sh = @boolToInt(shift),
+ .s = s,
+ .op = op,
+ .sf = switch (rd.size()) {
+ 32 => 0b0,
+ 64 => 0b1,
+ else => unreachable, // unexpected register size
+ },
+ },
+ };
}
+ // Helper functions for assembly syntax functions
+
// Move wide (immediate)
pub fn movn(rd: Register, imm16: u16, shift: u6) Instruction {
@@ -707,17 +735,18 @@ pub const Instruction = union(enum) {
// Load or store register
- pub const LdrArgs = struct {
- rn: ?Register = null,
- offset: LoadStoreOffset = LoadStoreOffset.none,
- literal: ?u19 = null,
+ pub const LdrArgs = union(enum) {
+ register: struct {
+ rn: Register,
+ offset: LoadStoreOffset = LoadStoreOffset.none,
+ },
+ literal: u19,
};
pub fn ldr(rt: Register, args: LdrArgs) Instruction {
- if (args.rn) |rn| {
- return loadStoreRegister(rt, rn, args.offset, true);
- } else {
- return loadLiteral(rt, args.literal.?);
+ switch (args) {
+ .register => |info| return loadStoreRegister(rt, info.rn, info.offset, true),
+ .literal => |literal| return loadLiteral(rt, literal),
}
}
@@ -821,11 +850,63 @@ pub const Instruction = union(enum) {
// Nop
pub fn nop() Instruction {
- return Instruction{ .NoOperation = {} };
+ return Instruction{ .NoOperation = .{} };
+ }
+
+ // Logical (shifted register)
+
+ pub fn @"and"(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b00, 0b0, shift, rd, rn, rm);
+ }
+
+ pub fn bic(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b00, 0b1, shift, rd, rn, rm);
+ }
+
+ pub fn orr(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b01, 0b0, shift, rd, rn, rm);
+ }
+
+ pub fn orn(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b01, 0b1, shift, rd, rn, rm);
+ }
+
+ pub fn eor(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b10, 0b0, shift, rd, rn, rm);
+ }
+
+ pub fn eon(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b10, 0b1, shift, rd, rn, rm);
+ }
+
+ pub fn ands(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b11, 0b0, shift, rd, rn, rm);
+ }
+
+ pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
+ return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm);
+ }
+
+ // Add/subtract (immediate)
+
+ pub fn add(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
+ return addSubtractImmediate(0b0, 0b0, rd, rn, imm, shift);
+ }
+
+ pub fn adds(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
+ return addSubtractImmediate(0b0, 0b1, rd, rn, imm, shift);
+ }
+
+ pub fn sub(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
+ return addSubtractImmediate(0b1, 0b0, rd, rn, imm, shift);
+ }
+
+ pub fn subs(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
+ return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
}
};
-test "" {
+test {
testing.refAllDecls(@This());
}
@@ -836,15 +917,15 @@ test "serialize instructions" {
};
const testcases = [_]Testcase{
- .{ // orr x0 x1
- .inst = Instruction.orr(.x0, .x1, Instruction.RegisterShift.none()),
+ .{ // orr x0, xzr, x1
+ .inst = Instruction.orr(.x0, .xzr, .x1, Instruction.Shift.none),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
- .{ // orn x0 x1
- .inst = Instruction.orn(.x0, .x1, Instruction.RegisterShift.none()),
+ .{ // orn x0, xzr, x1
+ .inst = Instruction.orn(.x0, .xzr, .x1, Instruction.Shift.none),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
- .{ // movz x1 #4
+ .{ // movz x1, #4
.inst = Instruction.movz(.x1, 4, 0),
.expected = 0b1_10_100101_00_0000000000000100_00001,
},
@@ -885,19 +966,19 @@ test "serialize instructions" {
.expected = 0b1_00101_00_0000_0000_0000_0000_0000_0100,
},
.{ // ldr x2, [x1]
- .inst = Instruction.ldr(.x2, .{ .rn = .x1 }),
+ .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1 } }),
.expected = 0b11_111_0_01_01_000000000000_00001_00010,
},
.{ // ldr x2, [x1, #1]!
- .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_pre_index(1) }),
+ .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_pre_index(1) } }),
.expected = 0b11_111_0_00_01_0_000000001_11_00001_00010,
},
.{ // ldr x2, [x1], #-1
- .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_post_index(-1) }),
+ .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.imm_post_index(-1) } }),
.expected = 0b11_111_0_00_01_0_111111111_01_00001_00010,
},
.{ // ldr x2, [x1], (x3)
- .inst = Instruction.ldr(.x2, .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.reg(.x3) }),
+ .inst = Instruction.ldr(.x2, .{ .register = .{ .rn = .x1, .offset = Instruction.LoadStoreOffset.reg(.x3) } }),
.expected = 0b11_111_0_00_01_1_00011_011_0_10_00001_00010,
},
.{ // ldr x2, label
@@ -944,6 +1025,22 @@ test "serialize instructions" {
.inst = Instruction.ldp(.x1, .x2, Register.sp, Instruction.LoadStorePairOffset.post_index(16)),
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
+ .{ // and x0, x4, x2
+ .inst = Instruction.@"and"(.x0, .x4, .x2, .{}),
+ .expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
+ },
+ .{ // and x0, x4, x2, lsl #0x8
+ .inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }),
+ .expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
+ },
+ .{ // add x0, x10, #10
+ .inst = Instruction.add(.x0, .x10, 10, false),
+ .expected = 0b1_0_0_100010_0_0000_0000_1010_01010_00000,
+ },
+ .{ // subs x0, x5, #11, lsl #12
+ .inst = Instruction.subs(.x0, .x5, 11, true),
+ .expected = 0b1_1_1_100010_1_0000_0000_1011_00101_00000,
+ },
};
for (testcases) |case| {
diff --git a/src/codegen/arm.zig b/src/codegen/arm.zig
index 33ff789648..d538d28c50 100644
--- a/src/codegen/arm.zig
+++ b/src/codegen/arm.zig
@@ -35,8 +35,74 @@ pub const Condition = enum(u4) {
le,
/// always
al,
+
+ /// Converts a std.math.CompareOperator into a condition flag,
+ /// i.e. returns the condition that is true iff the result of the
+ /// comparison is true. Assumes signed comparison
+ pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition {
+ return switch (op) {
+ .gte => .ge,
+ .gt => .gt,
+ .neq => .ne,
+ .lt => .lt,
+ .lte => .le,
+ .eq => .eq,
+ };
+ }
+
+ /// Converts a std.math.CompareOperator into a condition flag,
+ /// i.e. returns the condition that is true iff the result of the
+ /// comparison is true. Assumes unsigned comparison
+ pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition {
+ return switch (op) {
+ .gte => .cs,
+ .gt => .hi,
+ .neq => .ne,
+ .lt => .cc,
+ .lte => .ls,
+ .eq => .eq,
+ };
+ }
+
+ /// Returns the condition which is true iff the given condition is
+ /// false (if such a condition exists)
+ pub fn negate(cond: Condition) Condition {
+ return switch (cond) {
+ .eq => .ne,
+ .ne => .eq,
+ .cs => .cc,
+ .cc => .cs,
+ .mi => .pl,
+ .pl => .mi,
+ .vs => .vc,
+ .vc => .vs,
+ .hi => .ls,
+ .ls => .hi,
+ .ge => .lt,
+ .lt => .ge,
+ .gt => .le,
+ .le => .gt,
+ .al => unreachable,
+ };
+ }
};
+test "condition from CompareOperator" {
+ testing.expectEqual(@as(Condition, .eq), Condition.fromCompareOperatorSigned(.eq));
+ testing.expectEqual(@as(Condition, .eq), Condition.fromCompareOperatorUnsigned(.eq));
+
+ testing.expectEqual(@as(Condition, .gt), Condition.fromCompareOperatorSigned(.gt));
+ testing.expectEqual(@as(Condition, .hi), Condition.fromCompareOperatorUnsigned(.gt));
+
+ testing.expectEqual(@as(Condition, .le), Condition.fromCompareOperatorSigned(.lte));
+ testing.expectEqual(@as(Condition, .ls), Condition.fromCompareOperatorUnsigned(.lte));
+}
+
+test "negate condition" {
+ testing.expectEqual(@as(Condition, .eq), Condition.ne.negate());
+ testing.expectEqual(@as(Condition, .ne), Condition.eq.negate());
+}
+
/// Represents a register in the ARM instruction set architecture
pub const Register = enum(u5) {
r0,
@@ -120,7 +186,7 @@ pub const Psr = enum {
spsr,
};
-pub const callee_preserved_regs = [_]Register{ .r0, .r1, .r2, .r3, .r4, .r5, .r6, .r7, .r8, .r10 };
+pub const callee_preserved_regs = [_]Register{ .r4, .r5, .r6, .r7, .r8, .r10 };
pub const c_abi_int_param_regs = [_]Register{ .r0, .r1, .r2, .r3 };
pub const c_abi_int_return_regs = [_]Register{ .r0, .r1 };
@@ -174,6 +240,22 @@ pub const Instruction = union(enum) {
fixed: u2 = 0b01,
cond: u4,
},
+ ExtraLoadStore: packed struct {
+ imm4l: u4,
+ fixed_1: u1 = 0b1,
+ op2: u2,
+ fixed_2: u1 = 0b1,
+ imm4h: u4,
+ rt: u4,
+ rn: u4,
+ o1: u1,
+ write_back: u1,
+ imm: u1,
+ up_down: u1,
+ pre_index: u1,
+ fixed_3: u3 = 0b000,
+ cond: u4,
+ },
BlockDataTransfer: packed struct {
register_list: u16,
rn: u4,
@@ -402,6 +484,29 @@ pub const Instruction = union(enum) {
}
};
+ /// Represents the offset operand of an extra load or store
+ /// instruction.
+ pub const ExtraLoadStoreOffset = union(enum) {
+ immediate: u8,
+ register: u4,
+
+ pub const none = ExtraLoadStoreOffset{
+ .immediate = 0,
+ };
+
+ pub fn reg(register: Register) ExtraLoadStoreOffset {
+ return ExtraLoadStoreOffset{
+ .register = register.id(),
+ };
+ }
+
+ pub fn imm(immediate: u8) ExtraLoadStoreOffset {
+ return ExtraLoadStoreOffset{
+ .immediate = immediate,
+ };
+ }
+ };
+
/// Represents the register list operand to a block data transfer
/// instruction
pub const RegisterList = packed struct {
@@ -429,6 +534,7 @@ pub const Instruction = union(enum) {
.Multiply => |v| @bitCast(u32, v),
.MultiplyLong => |v| @bitCast(u32, v),
.SingleDataTransfer => |v| @bitCast(u32, v),
+ .ExtraLoadStore => |v| @bitCast(u32, v),
.BlockDataTransfer => |v| @bitCast(u32, v),
.Branch => |v| @bitCast(u32, v),
.BranchExchange => |v| @bitCast(u32, v),
@@ -551,6 +657,43 @@ pub const Instruction = union(enum) {
};
}
+ fn extraLoadStore(
+ cond: Condition,
+ pre_index: bool,
+ positive: bool,
+ write_back: bool,
+ o1: u1,
+ op2: u2,
+ rn: Register,
+ rt: Register,
+ offset: ExtraLoadStoreOffset,
+ ) Instruction {
+ const imm4l: u4 = switch (offset) {
+ .immediate => |imm| @truncate(u4, imm),
+ .register => |reg| reg,
+ };
+ const imm4h: u4 = switch (offset) {
+ .immediate => |imm| @truncate(u4, imm >> 4),
+ .register => |reg| 0b0000,
+ };
+
+ return Instruction{
+ .ExtraLoadStore = .{
+ .imm4l = imm4l,
+ .op2 = op2,
+ .imm4h = imm4h,
+ .rt = rt.id(),
+ .rn = rn.id(),
+ .o1 = o1,
+ .write_back = @boolToInt(write_back),
+ .imm = @boolToInt(offset == .immediate),
+ .up_down = @boolToInt(positive),
+ .pre_index = @boolToInt(pre_index),
+ .cond = @enumToInt(cond),
+ },
+ };
+ }
+
fn blockDataTransfer(
cond: Condition,
rn: Register,
@@ -847,6 +990,23 @@ pub const Instruction = union(enum) {
return singleDataTransfer(cond, rd, rn, args.offset, args.pre_index, args.positive, 1, args.write_back, 0);
}
+ // Extra load/store
+
+ pub const ExtraLoadStoreOffsetArgs = struct {
+ pre_index: bool = true,
+ positive: bool = true,
+ offset: ExtraLoadStoreOffset,
+ write_back: bool = false,
+ };
+
+ pub fn strh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
+ return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0, 0b01, rn, rt, args.offset);
+ }
+
+ pub fn ldrh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
+ return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 1, 0b01, rn, rt, args.offset);
+ }
+
// Block data transfer
pub fn ldmda(cond: Condition, rn: Register, write_back: bool, reg_list: RegisterList) Instruction {
@@ -1027,6 +1187,12 @@ test "serialize instructions" {
}),
.expected = 0b1110_01_0_1_1_0_0_0_0011_0000_000000000000,
},
+ .{ // strh r1, [r5]
+ .inst = Instruction.strh(.al, .r1, .r5, .{
+ .offset = Instruction.ExtraLoadStoreOffset.none,
+ }),
+ .expected = 0b1110_000_1_1_1_0_0_0101_0001_0000_1011_0000,
+ },
.{ // b #12
.inst = Instruction.b(.al, 12),
.expected = 0b1110_101_0_0000_0000_0000_0000_0000_0011,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index e562ce320e..a885b984ac 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1,312 +1,774 @@
const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+const log = std.log.scoped(.c);
const link = @import("../link.zig");
const Module = @import("../Module.zig");
-
-const Inst = @import("../ir.zig").Inst;
+const Compilation = @import("../Compilation.zig");
+const ir = @import("../ir.zig");
+const Inst = ir.Inst;
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
-
+const TypedValue = @import("../TypedValue.zig");
const C = link.File.C;
const Decl = Module.Decl;
-const mem = std.mem;
+const trace = @import("../tracy.zig").trace;
-const indentation = " ";
+const Mutability = enum { Const, Mut };
-/// Maps a name from Zig source to C. Currently, this will always give the same
-/// output for any given input, sometimes resulting in broken identifiers.
-fn map(allocator: *std.mem.Allocator, name: []const u8) ![]const u8 {
- return allocator.dupe(u8, name);
-}
+pub const CValue = union(enum) {
+ none: void,
+ /// Index into local_names
+ local: usize,
+ /// Index into local_names, but take the address.
+ local_ref: usize,
+ /// A constant instruction, to be rendered inline.
+ constant: *Inst,
+ /// Index into the parameters
+ arg: usize,
+ /// By-value
+ decl: *Decl,
+};
-fn renderType(ctx: *Context, writer: std.ArrayList(u8).Writer, T: Type) !void {
- switch (T.zigTypeTag()) {
- .NoReturn => {
- try writer.writeAll("zig_noreturn void");
- },
- .Void => try writer.writeAll("void"),
- .Int => {
- if (T.tag() == .u8) {
- ctx.file.need_stdint = true;
- try writer.writeAll("uint8_t");
- } else if (T.tag() == .usize) {
- ctx.file.need_stddef = true;
- try writer.writeAll("size_t");
- } else {
- return ctx.file.fail(ctx.decl.src(), "TODO implement int types", .{});
- }
- },
- else => |e| return ctx.file.fail(ctx.decl.src(), "TODO implement type {}", .{e}),
+pub const CValueMap = std.AutoHashMap(*Inst, CValue);
+
+/// This data is available when outputting .c code for a Module.
+/// It is not available when generating .h file.
+pub const Object = struct {
+ dg: DeclGen,
+ gpa: *mem.Allocator,
+ code: std.ArrayList(u8),
+ value_map: CValueMap,
+ next_arg_index: usize = 0,
+ next_local_index: usize = 0,
+ next_block_index: usize = 0,
+ indent_writer: IndentWriter(std.ArrayList(u8).Writer),
+
+ fn resolveInst(o: *Object, inst: *Inst) !CValue {
+ if (inst.value()) |_| {
+ return CValue{ .constant = inst };
+ }
+ return o.value_map.get(inst).?; // Instruction does not dominate all uses!
}
-}
-fn renderValue(ctx: *Context, writer: std.ArrayList(u8).Writer, T: Type, val: Value) !void {
- switch (T.zigTypeTag()) {
- .Int => {
- if (T.isSignedInt())
- return writer.print("{}", .{val.toSignedInt()});
- return writer.print("{}", .{val.toUnsignedInt()});
- },
- else => |e| return ctx.file.fail(ctx.decl.src(), "TODO implement value {}", .{e}),
+ fn allocLocalValue(o: *Object) CValue {
+ const result = o.next_local_index;
+ o.next_local_index += 1;
+ return .{ .local = result };
}
-}
-fn renderFunctionSignature(ctx: *Context, writer: std.ArrayList(u8).Writer, decl: *Decl) !void {
- const tv = decl.typed_value.most_recent.typed_value;
- try renderType(ctx, writer, tv.ty.fnReturnType());
- // Use the child allocator directly, as we know the name can be freed before
- // the rest of the arena.
- const name = try map(ctx.arena.child_allocator, mem.spanZ(decl.name));
- defer ctx.arena.child_allocator.free(name);
- try writer.print(" {}(", .{name});
- var param_len = tv.ty.fnParamLen();
- if (param_len == 0)
- try writer.writeAll("void")
- else {
- var index: usize = 0;
- while (index < param_len) : (index += 1) {
- if (index > 0) {
- try writer.writeAll(", ");
- }
- try renderType(ctx, writer, tv.ty.fnParamType(index));
- try writer.print(" arg{}", .{index});
- }
+ fn allocLocal(o: *Object, ty: Type, mutability: Mutability) !CValue {
+ const local_value = o.allocLocalValue();
+ try o.renderTypeAndName(o.writer(), ty, local_value, mutability);
+ return local_value;
}
- try writer.writeByte(')');
-}
-pub fn generate(file: *C, decl: *Decl) !void {
- switch (decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
- .Fn => try genFn(file, decl),
- .Array => try genArray(file, decl),
- else => |e| return file.fail(decl.src(), "TODO {}", .{e}),
+ fn writer(o: *Object) IndentWriter(std.ArrayList(u8).Writer).Writer {
+ return o.indent_writer.writer();
}
-}
-fn genArray(file: *C, decl: *Decl) !void {
- const tv = decl.typed_value.most_recent.typed_value;
- // TODO: prevent inline asm constants from being emitted
- const name = try map(file.base.allocator, mem.span(decl.name));
- defer file.base.allocator.free(name);
- if (tv.val.cast(Value.Payload.Bytes)) |payload|
- if (tv.ty.sentinel()) |sentinel|
- if (sentinel.toUnsignedInt() == 0)
- // TODO: static by default
- try file.constants.writer().print("const char *const {} = \"{}\";\n", .{ name, payload.data })
- else
- return file.fail(decl.src(), "TODO byte arrays with non-zero sentinels", .{})
- else
- return file.fail(decl.src(), "TODO byte arrays without sentinels", .{})
- else
- return file.fail(decl.src(), "TODO non-byte arrays", .{});
-}
+ fn writeCValue(o: *Object, w: anytype, c_value: CValue) !void {
+ switch (c_value) {
+ .none => unreachable,
+ .local => |i| return w.print("t{d}", .{i}),
+ .local_ref => |i| return w.print("&t{d}", .{i}),
+ .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?),
+ .arg => |i| return w.print("a{d}", .{i}),
+ .decl => |decl| return w.writeAll(mem.span(decl.name)),
+ }
+ }
+
+ fn renderTypeAndName(
+ o: *Object,
+ w: anytype,
+ ty: Type,
+ name: CValue,
+ mutability: Mutability,
+ ) error{ OutOfMemory, AnalysisFail }!void {
+ var suffix = std.ArrayList(u8).init(o.gpa);
+ defer suffix.deinit();
+
+ var render_ty = ty;
+ while (render_ty.zigTypeTag() == .Array) {
+ const sentinel_bit = @boolToInt(render_ty.sentinel() != null);
+ const c_len = render_ty.arrayLen() + sentinel_bit;
+ try suffix.writer().print("[{d}]", .{c_len});
+ render_ty = render_ty.elemType();
+ }
+
+ try o.dg.renderType(w, render_ty);
-const Context = struct {
- file: *C,
+ const const_prefix = switch (mutability) {
+ .Const => "const ",
+ .Mut => "",
+ };
+ try w.print(" {s}", .{const_prefix});
+ try o.writeCValue(w, name);
+ try w.writeAll(suffix.items);
+ }
+};
+
+/// This data is available both when outputting .c code and when outputting an .h file.
+pub const DeclGen = struct {
+ module: *Module,
decl: *Decl,
- inst_map: *std.AutoHashMap(*Inst, []u8),
- arena: *std.heap.ArenaAllocator,
- argdex: usize = 0,
- unnamed_index: usize = 0,
-
- fn resolveInst(self: *Context, inst: *Inst) ![]u8 {
- if (inst.cast(Inst.Constant)) |const_inst| {
- var out = std.ArrayList(u8).init(&self.arena.allocator);
- try renderValue(self, out.writer(), inst.ty, const_inst.val);
- return out.toOwnedSlice();
+ fwd_decl: std.ArrayList(u8),
+ error_msg: ?*Module.ErrorMsg,
+
+ fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
+ dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, .{
+ .file_scope = dg.decl.getFileScope(),
+ .byte_offset = src,
+ }, format, args);
+ return error.AnalysisFail;
+ }
+
+ fn renderValue(
+ dg: *DeclGen,
+ writer: anytype,
+ t: Type,
+ val: Value,
+ ) error{ OutOfMemory, AnalysisFail }!void {
+ if (val.isUndef()) {
+ return dg.fail(dg.decl.src(), "TODO: C backend: properly handle undefined in all cases (with debug safety?)", .{});
}
- if (self.inst_map.get(inst)) |val| {
- return val;
+ switch (t.zigTypeTag()) {
+ .Int => {
+ if (t.isSignedInt())
+ return writer.print("{d}", .{val.toSignedInt()});
+ return writer.print("{d}", .{val.toUnsignedInt()});
+ },
+ .Pointer => switch (val.tag()) {
+ .undef, .zero => try writer.writeAll("0"),
+ .one => try writer.writeAll("1"),
+ .decl_ref => {
+ const decl = val.castTag(.decl_ref).?.data;
+
+ // Determine if we must pointer cast.
+ const decl_tv = decl.typed_value.most_recent.typed_value;
+ if (t.eql(decl_tv.ty)) {
+ try writer.print("&{s}", .{decl.name});
+ } else {
+ try writer.writeAll("(");
+ try dg.renderType(writer, t);
+ try writer.print(")&{s}", .{decl.name});
+ }
+ },
+ .function => {
+ const func = val.castTag(.function).?.data;
+ try writer.print("{s}", .{func.owner_decl.name});
+ },
+ .extern_fn => {
+ const decl = val.castTag(.extern_fn).?.data;
+ try writer.print("{s}", .{decl.name});
+ },
+ else => |e| return dg.fail(
+ dg.decl.src(),
+ "TODO: C backend: implement Pointer value {s}",
+ .{@tagName(e)},
+ ),
+ },
+ .Array => {
+ // First try specific tag representations for more efficiency.
+ switch (val.tag()) {
+ .undef, .empty_struct_value, .empty_array => try writer.writeAll("{}"),
+ .bytes => {
+ const bytes = val.castTag(.bytes).?.data;
+ // TODO: make our own C string escape instead of using std.zig.fmtEscapes
+ try writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
+ },
+ else => {
+ // Fall back to generic implementation.
+ var arena = std.heap.ArenaAllocator.init(dg.module.gpa);
+ defer arena.deinit();
+
+ try writer.writeAll("{");
+ var index: usize = 0;
+ const len = t.arrayLen();
+ const elem_ty = t.elemType();
+ while (index < len) : (index += 1) {
+ if (index != 0) try writer.writeAll(",");
+ const elem_val = try val.elemValue(&arena.allocator, index);
+ try dg.renderValue(writer, elem_ty, elem_val);
+ }
+ if (t.sentinel()) |sentinel_val| {
+ if (index != 0) try writer.writeAll(",");
+ try dg.renderValue(writer, elem_ty, sentinel_val);
+ }
+ try writer.writeAll("}");
+ },
+ }
+ },
+ .Bool => return writer.print("{}", .{val.toBool()}),
+ else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement value {s}", .{
+ @tagName(e),
+ }),
+ }
+ }
+
+ fn renderFunctionSignature(dg: *DeclGen, w: anytype, is_global: bool) !void {
+ if (!is_global) {
+ try w.writeAll("static ");
+ }
+ const tv = dg.decl.typed_value.most_recent.typed_value;
+ try dg.renderType(w, tv.ty.fnReturnType());
+ const decl_name = mem.span(dg.decl.name);
+ try w.print(" {s}(", .{decl_name});
+ var param_len = tv.ty.fnParamLen();
+ if (param_len == 0)
+ try w.writeAll("void")
+ else {
+ var index: usize = 0;
+ while (index < param_len) : (index += 1) {
+ if (index > 0) {
+ try w.writeAll(", ");
+ }
+ try dg.renderType(w, tv.ty.fnParamType(index));
+ try w.print(" a{d}", .{index});
+ }
}
- unreachable;
+ try w.writeByte(')');
}
- fn name(self: *Context) ![]u8 {
- const val = try std.fmt.allocPrint(&self.arena.allocator, "__temp_{}", .{self.unnamed_index});
- self.unnamed_index += 1;
- return val;
+ fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void {
+ switch (t.zigTypeTag()) {
+ .NoReturn => {
+ try w.writeAll("zig_noreturn void");
+ },
+ .Void => try w.writeAll("void"),
+ .Bool => try w.writeAll("bool"),
+ .Int => {
+ switch (t.tag()) {
+ .u8 => try w.writeAll("uint8_t"),
+ .i8 => try w.writeAll("int8_t"),
+ .u16 => try w.writeAll("uint16_t"),
+ .i16 => try w.writeAll("int16_t"),
+ .u32 => try w.writeAll("uint32_t"),
+ .i32 => try w.writeAll("int32_t"),
+ .u64 => try w.writeAll("uint64_t"),
+ .i64 => try w.writeAll("int64_t"),
+ .usize => try w.writeAll("uintptr_t"),
+ .isize => try w.writeAll("intptr_t"),
+ .c_short => try w.writeAll("short"),
+ .c_ushort => try w.writeAll("unsigned short"),
+ .c_int => try w.writeAll("int"),
+ .c_uint => try w.writeAll("unsigned int"),
+ .c_long => try w.writeAll("long"),
+ .c_ulong => try w.writeAll("unsigned long"),
+ .c_longlong => try w.writeAll("long long"),
+ .c_ulonglong => try w.writeAll("unsigned long long"),
+ .int_signed, .int_unsigned => {
+ const info = t.intInfo(dg.module.getTarget());
+ const sign_prefix = switch (info.signedness) {
+ .signed => "",
+ .unsigned => "u",
+ };
+ inline for (.{ 8, 16, 32, 64, 128 }) |nbits| {
+ if (info.bits <= nbits) {
+ try w.print("{s}int{d}_t", .{ sign_prefix, nbits });
+ break;
+ }
+ } else {
+ return dg.fail(dg.decl.src(), "TODO: C backend: implement integer types larger than 128 bits", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .Pointer => {
+ if (t.isSlice()) {
+ return dg.fail(dg.decl.src(), "TODO: C backend: implement slices", .{});
+ } else {
+ try dg.renderType(w, t.elemType());
+ try w.writeAll(" *");
+ if (t.isConstPtr()) {
+ try w.writeAll("const ");
+ }
+ if (t.isVolatilePtr()) {
+ try w.writeAll("volatile ");
+ }
+ }
+ },
+ .Array => {
+ try dg.renderType(w, t.elemType());
+ try w.writeAll(" *");
+ },
+ .Null, .Undefined => unreachable, // must be const or comptime
+ else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement type {s}", .{
+ @tagName(e),
+ }),
+ }
}
- fn deinit(self: *Context) void {
- self.* = undefined;
+ fn functionIsGlobal(dg: *DeclGen, tv: TypedValue) bool {
+ switch (tv.val.tag()) {
+ .extern_fn => return true,
+ .function => {
+ const func = tv.val.castTag(.function).?.data;
+ return dg.module.decl_exports.contains(func.owner_decl);
+ },
+ else => unreachable,
+ }
}
};
-fn genFn(file: *C, decl: *Decl) !void {
- const writer = file.main.writer();
- const tv = decl.typed_value.most_recent.typed_value;
-
- var arena = std.heap.ArenaAllocator.init(file.base.allocator);
- defer arena.deinit();
- var inst_map = std.AutoHashMap(*Inst, []u8).init(&arena.allocator);
- defer inst_map.deinit();
- var ctx = Context{
- .file = file,
- .decl = decl,
- .arena = &arena,
- .inst_map = &inst_map,
- };
- defer ctx.deinit();
-
- try renderFunctionSignature(&ctx, writer, decl);
-
- try writer.writeAll(" {");
-
- const func: *Module.Fn = tv.val.cast(Value.Payload.Function).?.func;
- const instructions = func.analysis.success.instructions;
- if (instructions.len > 0) {
- try writer.writeAll("\n");
- for (instructions) |inst| {
- if (switch (inst.tag) {
- .assembly => try genAsm(&ctx, inst.castTag(.assembly).?),
- .call => try genCall(&ctx, inst.castTag(.call).?),
- .add => try genBinOp(&ctx, inst.cast(Inst.BinOp).?, "+"),
- .sub => try genBinOp(&ctx, inst.cast(Inst.BinOp).?, "-"),
- .ret => try genRet(&ctx, inst.castTag(.ret).?),
- .retvoid => try genRetVoid(&ctx),
- .arg => try genArg(&ctx),
- .dbg_stmt => try genDbgStmt(&ctx, inst.castTag(.dbg_stmt).?),
- .breakpoint => try genBreak(&ctx, inst.castTag(.breakpoint).?),
- .unreach => try genUnreach(&ctx, inst.castTag(.unreach).?),
- .intcast => try genIntCast(&ctx, inst.castTag(.intcast).?),
- else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
- }) |name| {
- try ctx.inst_map.putNoClobber(inst, name);
+pub fn genDecl(o: *Object) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const tv = o.dg.decl.typed_value.most_recent.typed_value;
+
+ if (tv.val.castTag(.function)) |func_payload| {
+ const is_global = o.dg.functionIsGlobal(tv);
+ const fwd_decl_writer = o.dg.fwd_decl.writer();
+ if (is_global) {
+ try fwd_decl_writer.writeAll("ZIG_EXTERN_C ");
+ }
+ try o.dg.renderFunctionSignature(fwd_decl_writer, is_global);
+ try fwd_decl_writer.writeAll(";\n");
+
+ const func: *Module.Fn = func_payload.data;
+ try o.indent_writer.insertNewline();
+ try o.dg.renderFunctionSignature(o.writer(), is_global);
+
+ try o.writer().writeByte(' ');
+ try genBody(o, func.body);
+
+ try o.indent_writer.insertNewline();
+ } else if (tv.val.tag() == .extern_fn) {
+ const writer = o.writer();
+ try writer.writeAll("ZIG_EXTERN_C ");
+ try o.dg.renderFunctionSignature(writer, true);
+ try writer.writeAll(";\n");
+ } else {
+ const writer = o.writer();
+ try writer.writeAll("static ");
+
+ // TODO ask the Decl if it is const
+ // https://github.com/ziglang/zig/issues/7582
+
+ const decl_c_value: CValue = .{ .decl = o.dg.decl };
+ try o.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut);
+
+ try writer.writeAll(" = ");
+ try o.dg.renderValue(writer, tv.ty, tv.val);
+ try writer.writeAll(";\n");
+ }
+}
+
+pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const tv = dg.decl.typed_value.most_recent.typed_value;
+ const writer = dg.fwd_decl.writer();
+
+ switch (tv.ty.zigTypeTag()) {
+ .Fn => {
+ const is_global = dg.functionIsGlobal(tv);
+ if (is_global) {
+ try writer.writeAll("ZIG_EXTERN_C ");
}
+ try dg.renderFunctionSignature(writer, is_global);
+ try dg.fwd_decl.appendSlice(";\n");
+ },
+ else => {},
+ }
+}
+
+pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void {
+ const writer = o.writer();
+ if (body.instructions.len == 0) {
+ try writer.writeAll("{}");
+ return;
+ }
+
+ try writer.writeAll("{\n");
+ o.indent_writer.pushIndent();
+
+ for (body.instructions) |inst| {
+ const result_value = switch (inst.tag) {
+ .constant => unreachable, // excluded from function bodies
+ .add => try genBinOp(o, inst.castTag(.add).?, " + "),
+ .alloc => try genAlloc(o, inst.castTag(.alloc).?),
+ .arg => genArg(o),
+ .assembly => try genAsm(o, inst.castTag(.assembly).?),
+ .block => try genBlock(o, inst.castTag(.block).?),
+ .bitcast => try genBitcast(o, inst.castTag(.bitcast).?),
+ .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?),
+ .call => try genCall(o, inst.castTag(.call).?),
+ .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "),
+ .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "),
+ .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "),
+ .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "),
+ .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "),
+ .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "),
+ .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?),
+ .intcast => try genIntCast(o, inst.castTag(.intcast).?),
+ .load => try genLoad(o, inst.castTag(.load).?),
+ .ret => try genRet(o, inst.castTag(.ret).?),
+ .retvoid => try genRetVoid(o),
+ .store => try genStore(o, inst.castTag(.store).?),
+ .sub => try genBinOp(o, inst.castTag(.sub).?, " - "),
+ .unreach => try genUnreach(o, inst.castTag(.unreach).?),
+ .loop => try genLoop(o, inst.castTag(.loop).?),
+ .condbr => try genCondBr(o, inst.castTag(.condbr).?),
+ .br => try genBr(o, inst.castTag(.br).?),
+ .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block),
+ .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?),
+ // bool_and and bool_or are non-short-circuit operations
+ .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "),
+ .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "),
+ .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "),
+ .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "),
+ .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "),
+ .not => try genUnOp(o, inst.castTag(.not).?, "!"),
+ else => |e| return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement codegen for {}", .{e}),
+ };
+ switch (result_value) {
+ .none => {},
+ else => try o.value_map.putNoClobber(inst, result_value),
}
}
- try writer.writeAll("}\n\n");
+ o.indent_writer.popIndent();
+ try writer.writeAll("}");
+}
+
+fn genAlloc(o: *Object, alloc: *Inst.NoOp) !CValue {
+ const writer = o.writer();
+
+ // First line: the variable used as data storage.
+ const elem_type = alloc.base.ty.elemType();
+ const mutability: Mutability = if (alloc.base.ty.isConstPtr()) .Const else .Mut;
+ const local = try o.allocLocal(elem_type, mutability);
+ try writer.writeAll(";\n");
+
+ return CValue{ .local_ref = local.local };
+}
+
+fn genArg(o: *Object) CValue {
+ const i = o.next_arg_index;
+ o.next_arg_index += 1;
+ return .{ .arg = i };
}
-fn genArg(ctx: *Context) !?[]u8 {
- const name = try std.fmt.allocPrint(&ctx.arena.allocator, "arg{}", .{ctx.argdex});
- ctx.argdex += 1;
- return name;
+fn genRetVoid(o: *Object) !CValue {
+ try o.writer().print("return;\n", .{});
+ return CValue.none;
}
-fn genRetVoid(ctx: *Context) !?[]u8 {
- try ctx.file.main.writer().print(indentation ++ "return;\n", .{});
- return null;
+fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue {
+ const operand = try o.resolveInst(inst.operand);
+ const writer = o.writer();
+ const local = try o.allocLocal(inst.base.ty, .Const);
+ switch (operand) {
+ .local_ref => |i| {
+ const wrapped: CValue = .{ .local = i };
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, wrapped);
+ try writer.writeAll(";\n");
+ },
+ else => {
+ try writer.writeAll(" = *");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ },
+ }
+ return local;
}
-fn genRet(ctx: *Context, inst: *Inst.UnOp) !?[]u8 {
- return ctx.file.fail(ctx.decl.src(), "TODO return", .{});
+fn genRet(o: *Object, inst: *Inst.UnOp) !CValue {
+ const operand = try o.resolveInst(inst.operand);
+ const writer = o.writer();
+ try writer.writeAll("return ");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ return CValue.none;
}
-fn genIntCast(ctx: *Context, inst: *Inst.UnOp) !?[]u8 {
+fn genIntCast(o: *Object, inst: *Inst.UnOp) !CValue {
if (inst.base.isUnused())
- return null;
- const op = inst.operand;
- const writer = ctx.file.main.writer();
- const name = try ctx.name();
- const from = try ctx.resolveInst(inst.operand);
- try writer.writeAll(indentation ++ "const ");
- try renderType(ctx, writer, inst.base.ty);
- try writer.print(" {} = (", .{name});
- try renderType(ctx, writer, inst.base.ty);
- try writer.print("){};\n", .{from});
- return name;
+ return CValue.none;
+
+ const from = try o.resolveInst(inst.operand);
+
+ const writer = o.writer();
+ const local = try o.allocLocal(inst.base.ty, .Const);
+ try writer.writeAll(" = (");
+ try o.dg.renderType(writer, inst.base.ty);
+ try writer.writeAll(")");
+ try o.writeCValue(writer, from);
+ try writer.writeAll(";\n");
+ return local;
}
-fn genBinOp(ctx: *Context, inst: *Inst.BinOp, comptime operator: []const u8) !?[]u8 {
+fn genStore(o: *Object, inst: *Inst.BinOp) !CValue {
+ // *a = b;
+ const dest_ptr = try o.resolveInst(inst.lhs);
+ const src_val = try o.resolveInst(inst.rhs);
+
+ const writer = o.writer();
+ switch (dest_ptr) {
+ .local_ref => |i| {
+ const dest: CValue = .{ .local = i };
+ try o.writeCValue(writer, dest);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, src_val);
+ try writer.writeAll(";\n");
+ },
+ else => {
+ try writer.writeAll("*");
+ try o.writeCValue(writer, dest_ptr);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, src_val);
+ try writer.writeAll(";\n");
+ },
+ }
+ return CValue.none;
+}
+
+fn genBinOp(o: *Object, inst: *Inst.BinOp, operator: []const u8) !CValue {
if (inst.base.isUnused())
- return null;
- const lhs = ctx.resolveInst(inst.lhs);
- const rhs = ctx.resolveInst(inst.rhs);
- const writer = ctx.file.main.writer();
- const name = try ctx.name();
- try writer.writeAll(indentation ++ "const ");
- try renderType(ctx, writer, inst.base.ty);
- try writer.print(" {} = {} " ++ operator ++ " {};\n", .{ name, lhs, rhs });
- return name;
+ return CValue.none;
+
+ const lhs = try o.resolveInst(inst.lhs);
+ const rhs = try o.resolveInst(inst.rhs);
+
+ const writer = o.writer();
+ const local = try o.allocLocal(inst.base.ty, .Const);
+
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, lhs);
+ try writer.writeAll(operator);
+ try o.writeCValue(writer, rhs);
+ try writer.writeAll(";\n");
+
+ return local;
}
-fn genCall(ctx: *Context, inst: *Inst.Call) !?[]u8 {
- const writer = ctx.file.main.writer();
- const header = ctx.file.header.writer();
- try writer.writeAll(indentation);
+fn genUnOp(o: *Object, inst: *Inst.UnOp, operator: []const u8) !CValue {
+ if (inst.base.isUnused())
+ return CValue.none;
+
+ const operand = try o.resolveInst(inst.operand);
+
+ const writer = o.writer();
+ const local = try o.allocLocal(inst.base.ty, .Const);
+
+ try writer.print(" = {s}", .{operator});
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+
+ return local;
+}
+
+fn genCall(o: *Object, inst: *Inst.Call) !CValue {
if (inst.func.castTag(.constant)) |func_inst| {
- if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
- const target = func_val.func.owner_decl;
- const target_ty = target.typed_value.most_recent.typed_value.ty;
- const ret_ty = target_ty.fnReturnType().tag();
- if (target_ty.fnReturnType().hasCodeGenBits() and inst.base.isUnused()) {
+ const fn_decl = if (func_inst.val.castTag(.extern_fn)) |extern_fn|
+ extern_fn.data
+ else if (func_inst.val.castTag(.function)) |func_payload|
+ func_payload.data.owner_decl
+ else
+ unreachable;
+
+ const fn_ty = fn_decl.typed_value.most_recent.typed_value.ty;
+ const ret_ty = fn_ty.fnReturnType();
+ const unused_result = inst.base.isUnused();
+ var result_local: CValue = .none;
+
+ const writer = o.writer();
+ if (unused_result) {
+ if (ret_ty.hasCodeGenBits()) {
try writer.print("(void)", .{});
}
- const tname = mem.spanZ(target.name);
- if (ctx.file.called.get(tname) == null) {
- try ctx.file.called.put(tname, void{});
- try renderFunctionSignature(ctx, header, target);
- try header.writeAll(";\n");
- }
- try writer.print("{}(", .{tname});
- if (inst.args.len != 0) {
- for (inst.args) |arg, i| {
- if (i > 0) {
- try writer.writeAll(", ");
- }
- if (arg.cast(Inst.Constant)) |con| {
- try renderValue(ctx, writer, arg.ty, con.val);
- } else {
- const val = try ctx.resolveInst(arg);
- try writer.print("{}", .{val});
- }
+ } else {
+ result_local = try o.allocLocal(ret_ty, .Const);
+ try writer.writeAll(" = ");
+ }
+ const fn_name = mem.spanZ(fn_decl.name);
+ try writer.print("{s}(", .{fn_name});
+ if (inst.args.len != 0) {
+ for (inst.args) |arg, i| {
+ if (i > 0) {
+ try writer.writeAll(", ");
+ }
+ if (arg.value()) |val| {
+ try o.dg.renderValue(writer, arg.ty, val);
+ } else {
+ const val = try o.resolveInst(arg);
+ try o.writeCValue(writer, val);
}
}
- try writer.writeAll(");\n");
- } else {
- return ctx.file.fail(ctx.decl.src(), "TODO non-function call target?", .{});
}
+ try writer.writeAll(");\n");
+ return result_local;
} else {
- return ctx.file.fail(ctx.decl.src(), "TODO non-constant call inst?", .{});
+ return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement function pointers", .{});
}
- return null;
}
-fn genDbgStmt(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
+fn genDbgStmt(o: *Object, inst: *Inst.NoOp) !CValue {
// TODO emit #line directive here with line number and filename
- return null;
+ return CValue.none;
+}
+
+fn genBlock(o: *Object, inst: *Inst.Block) !CValue {
+ const block_id: usize = o.next_block_index;
+ o.next_block_index += 1;
+ const writer = o.writer();
+
+ // store the block id in relocs.capacity as it is not used for anything else in the C backend.
+ inst.codegen.relocs.capacity = block_id;
+ const result = if (inst.base.ty.tag() != .void and !inst.base.isUnused()) blk: {
+ // allocate a location for the result
+ const local = try o.allocLocal(inst.base.ty, .Mut);
+ try writer.writeAll(";\n");
+ break :blk local;
+ } else CValue{ .none = {} };
+
+ inst.codegen.mcv = @bitCast(@import("../codegen.zig").AnyMCValue, result);
+ try genBody(o, inst.body);
+ try o.indent_writer.insertNewline();
+ // label must be followed by an expression, add an empty one.
+ try writer.print("zig_block_{d}:;\n", .{block_id});
+ return result;
+}
+
+fn genBr(o: *Object, inst: *Inst.Br) !CValue {
+ const result = @bitCast(CValue, inst.block.codegen.mcv);
+ const writer = o.writer();
+
+ // If result is .none then the value of the block is unused.
+ if (inst.operand.ty.tag() != .void and result != .none) {
+ const operand = try o.resolveInst(inst.operand);
+ try o.writeCValue(writer, result);
+ try writer.writeAll(" = ");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ }
+
+ return genBrVoid(o, inst.block);
}
-fn genBreak(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
- // TODO ??
- return null;
+fn genBrVoid(o: *Object, block: *Inst.Block) !CValue {
+ try o.writer().print("goto zig_block_{d};\n", .{block.codegen.relocs.capacity});
+ return CValue.none;
}
-fn genUnreach(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
- try ctx.file.main.writer().writeAll(indentation ++ "zig_unreachable();\n");
- return null;
+fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
+ const operand = try o.resolveInst(inst.operand);
+
+ const writer = o.writer();
+ if (inst.base.ty.zigTypeTag() == .Pointer and inst.operand.ty.zigTypeTag() == .Pointer) {
+ const local = try o.allocLocal(inst.base.ty, .Const);
+ try writer.writeAll(" = (");
+ try o.dg.renderType(writer, inst.base.ty);
+
+ try writer.writeAll(")");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ return local;
+ }
+
+ const local = try o.allocLocal(inst.base.ty, .Mut);
+ try writer.writeAll(";\n");
+
+ try writer.writeAll("memcpy(&");
+ try o.writeCValue(writer, local);
+ try writer.writeAll(", &");
+ try o.writeCValue(writer, operand);
+ try writer.writeAll(", sizeof ");
+ try o.writeCValue(writer, local);
+ try writer.writeAll(");\n");
+
+ return local;
}
-fn genAsm(ctx: *Context, as: *Inst.Assembly) !?[]u8 {
- const writer = ctx.file.main.writer();
- try writer.writeAll(indentation);
+fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue {
+ try o.writer().writeAll("zig_breakpoint();\n");
+ return CValue.none;
+}
+
+fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue {
+ try o.writer().writeAll("zig_unreachable();\n");
+ return CValue.none;
+}
+
+fn genLoop(o: *Object, inst: *Inst.Loop) !CValue {
+ try o.writer().writeAll("while (true) ");
+ try genBody(o, inst.body);
+ try o.indent_writer.insertNewline();
+ return CValue.none;
+}
+
+fn genCondBr(o: *Object, inst: *Inst.CondBr) !CValue {
+ const cond = try o.resolveInst(inst.condition);
+ const writer = o.writer();
+
+ try writer.writeAll("if (");
+ try o.writeCValue(writer, cond);
+ try writer.writeAll(") ");
+ try genBody(o, inst.then_body);
+ try writer.writeAll(" else ");
+ try genBody(o, inst.else_body);
+ try o.indent_writer.insertNewline();
+
+ return CValue.none;
+}
+
+fn genSwitchBr(o: *Object, inst: *Inst.SwitchBr) !CValue {
+ const target = try o.resolveInst(inst.target);
+ const writer = o.writer();
+
+ try writer.writeAll("switch (");
+ try o.writeCValue(writer, target);
+ try writer.writeAll(") {\n");
+ o.indent_writer.pushIndent();
+
+ for (inst.cases) |case| {
+ try writer.writeAll("case ");
+ try o.dg.renderValue(writer, inst.target.ty, case.item);
+ try writer.writeAll(": ");
+ // the case body must be noreturn so we don't need to insert a break
+ try genBody(o, case.body);
+ try o.indent_writer.insertNewline();
+ }
+
+ try writer.writeAll("default: ");
+ try genBody(o, inst.else_body);
+ try o.indent_writer.insertNewline();
+
+ o.indent_writer.popIndent();
+ try writer.writeAll("}\n");
+ return CValue.none;
+}
+
+fn genAsm(o: *Object, as: *Inst.Assembly) !CValue {
+ if (as.base.isUnused() and !as.is_volatile)
+ return CValue.none;
+
+ const writer = o.writer();
for (as.inputs) |i, index| {
if (i[0] == '{' and i[i.len - 1] == '}') {
const reg = i[1 .. i.len - 1];
const arg = as.args[index];
+ const arg_c_value = try o.resolveInst(arg);
try writer.writeAll("register ");
- try renderType(ctx, writer, arg.ty);
- try writer.print(" {}_constant __asm__(\"{}\") = ", .{ reg, reg });
- // TODO merge constant handling into inst_map as well
- if (arg.castTag(.constant)) |c| {
- try renderValue(ctx, writer, arg.ty, c.val);
- try writer.writeAll(";\n ");
- } else {
- const gop = try ctx.inst_map.getOrPut(arg);
- if (!gop.found_existing) {
- return ctx.file.fail(ctx.decl.src(), "Internal error in C backend: asm argument not found in inst_map", .{});
- }
- try writer.print("{};\n ", .{gop.entry.value});
- }
+ try o.dg.renderType(writer, arg.ty);
+
+ try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg });
+ try o.writeCValue(writer, arg_c_value);
+ try writer.writeAll(";\n");
} else {
- return ctx.file.fail(ctx.decl.src(), "TODO non-explicit inline asm regs", .{});
+ return o.dg.fail(o.dg.decl.src(), "TODO non-explicit inline asm regs", .{});
}
}
- try writer.print("__asm {} (\"{}\"", .{ if (as.is_volatile) @as([]const u8, "volatile") else "", as.asm_source });
- if (as.output) |o| {
- return ctx.file.fail(ctx.decl.src(), "TODO inline asm output", .{});
+ const volatile_string: []const u8 = if (as.is_volatile) "volatile " else "";
+ try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source });
+ if (as.output) |_| {
+ return o.dg.fail(o.dg.decl.src(), "TODO inline asm output", .{});
}
if (as.inputs.len > 0) {
if (as.output == null) {
@@ -320,7 +782,7 @@ fn genAsm(ctx: *Context, as: *Inst.Assembly) !?[]u8 {
if (index > 0) {
try writer.writeAll(", ");
}
- try writer.print("\"\"({}_constant)", .{reg});
+ try writer.print("\"r\"({s}_constant)", .{reg});
} else {
// This is blocked by the earlier test
unreachable;
@@ -328,5 +790,62 @@ fn genAsm(ctx: *Context, as: *Inst.Assembly) !?[]u8 {
}
}
try writer.writeAll(");\n");
- return null;
+
+ if (as.base.isUnused())
+ return CValue.none;
+
+ return o.dg.fail(o.dg.decl.src(), "TODO: C backend: inline asm expression result used", .{});
+}
+
+fn IndentWriter(comptime UnderlyingWriter: type) type {
+ return struct {
+ const Self = @This();
+ pub const Error = UnderlyingWriter.Error;
+ pub const Writer = std.io.Writer(*Self, Error, write);
+
+ pub const indent_delta = 4;
+
+ underlying_writer: UnderlyingWriter,
+ indent_count: usize = 0,
+ current_line_empty: bool = true,
+
+ pub fn writer(self: *Self) Writer {
+ return .{ .context = self };
+ }
+
+ pub fn write(self: *Self, bytes: []const u8) Error!usize {
+ if (bytes.len == 0) return @as(usize, 0);
+
+ const current_indent = self.indent_count * Self.indent_delta;
+ if (self.current_line_empty and current_indent > 0) {
+ try self.underlying_writer.writeByteNTimes(' ', current_indent);
+ }
+ self.current_line_empty = false;
+
+ return self.writeNoIndent(bytes);
+ }
+
+ pub fn insertNewline(self: *Self) Error!void {
+ _ = try self.writeNoIndent("\n");
+ }
+
+ pub fn pushIndent(self: *Self) void {
+ self.indent_count += 1;
+ }
+
+ pub fn popIndent(self: *Self) void {
+ assert(self.indent_count != 0);
+ self.indent_count -= 1;
+ }
+
+ fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
+ if (bytes.len == 0) return @as(usize, 0);
+
+ try self.underlying_writer.writeAll(bytes);
+ if (bytes[bytes.len - 1] == '\n') {
+ self.current_line_empty = true;
+ }
+ return bytes.len;
+ }
+ };
}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index ea8e25c214..3caa95d466 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1,7 +1,21 @@
const std = @import("std");
+const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
+const Compilation = @import("../Compilation.zig");
+const llvm = @import("llvm/bindings.zig");
+const link = @import("../link.zig");
+const log = std.log.scoped(.codegen);
+const math = std.math;
-pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
+const Module = @import("../Module.zig");
+const TypedValue = @import("../TypedValue.zig");
+const ir = @import("../ir.zig");
+const Inst = ir.Inst;
+
+const Value = @import("../value.zig").Value;
+const Type = @import("../type.zig").Type;
+
+pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
@@ -56,6 +70,8 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
.renderscript64 => "renderscript64",
.ve => "ve",
.spu_2 => return error.LLVMBackendDoesNotSupportSPUMarkII,
+ .spirv32 => return error.LLVMBackendDoesNotSupportSPIRV,
+ .spirv64 => return error.LLVMBackendDoesNotSupportSPIRV,
};
// TODO Add a sub-arch for some architectures depending on CPU features.
@@ -96,6 +112,9 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
.wasi => "wasi",
.emscripten => "emscripten",
.uefi => "windows",
+ .opencl => return error.LLVMBackendDoesNotSupportOpenCL,
+ .glsl450 => return error.LLVMBackendDoesNotSupportGLSL450,
+ .vulkan => return error.LLVMBackendDoesNotSupportVulkan,
.other => "unknown",
};
@@ -122,5 +141,734 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
.macabi => "macabi",
};
- return std.fmt.allocPrint(allocator, "{}-unknown-{}-{}", .{ llvm_arch, llvm_os, llvm_abi });
+ return std.fmt.allocPrintZ(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi });
}
+
+pub const LLVMIRModule = struct {
+ module: *Module,
+ llvm_module: *const llvm.Module,
+ context: *const llvm.Context,
+ target_machine: *const llvm.TargetMachine,
+ builder: *const llvm.Builder,
+
+ object_path: []const u8,
+
+ gpa: *Allocator,
+ err_msg: ?*Module.ErrorMsg = null,
+
+ // TODO: The fields below should really move into a different struct,
+ // because they are only valid when generating a function
+
+ /// This stores the LLVM values used in a function, such that they can be
+ /// referred to in other instructions. This table is cleared before every function is generated.
+ /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
+ /// in here, however if a block ends, the instructions can be thrown away.
+ func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value) = .{},
+
+ /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
+ args: []*const llvm.Value = &[_]*const llvm.Value{},
+ arg_index: usize = 0,
+
+ entry_block: *const llvm.BasicBlock = undefined,
+ /// This fields stores the last alloca instruction, such that we can append more alloca instructions
+ /// to the top of the function.
+ latest_alloca_inst: ?*const llvm.Value = null,
+
+ llvm_func: *const llvm.Value = undefined,
+
+ /// This data structure is used to implement breaking to blocks.
+ blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
+ parent_bb: *const llvm.BasicBlock,
+ break_bbs: *BreakBasicBlocks,
+ break_vals: *BreakValues,
+ }) = .{},
+
+ src_loc: Module.SrcLoc,
+
+ const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
+ const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
+
+ pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*LLVMIRModule {
+ const self = try allocator.create(LLVMIRModule);
+ errdefer allocator.destroy(self);
+
+ const gpa = options.module.?.gpa;
+
+ const obj_basename = try std.zig.binNameAlloc(gpa, .{
+ .root_name = options.root_name,
+ .target = options.target,
+ .output_mode = .Obj,
+ });
+ defer gpa.free(obj_basename);
+
+ const o_directory = options.module.?.zig_cache_artifact_directory;
+ const object_path = try o_directory.join(gpa, &[_][]const u8{obj_basename});
+ errdefer gpa.free(object_path);
+
+ const context = llvm.Context.create();
+ errdefer context.dispose();
+
+ initializeLLVMTargets();
+
+ const root_nameZ = try gpa.dupeZ(u8, options.root_name);
+ defer gpa.free(root_nameZ);
+ const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context);
+ errdefer llvm_module.dispose();
+
+ const llvm_target_triple = try targetTriple(gpa, options.target);
+ defer gpa.free(llvm_target_triple);
+
+ var error_message: [*:0]const u8 = undefined;
+ var target: *const llvm.Target = undefined;
+ if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message)) {
+ defer llvm.disposeMessage(error_message);
+
+ const stderr = std.io.getStdErr().writer();
+ try stderr.print(
+ \\Zig is expecting LLVM to understand this target: '{s}'
+ \\However LLVM responded with: "{s}"
+ \\Zig is unable to continue. This is a bug in Zig:
+ \\https://github.com/ziglang/zig/issues/438
+ \\
+ ,
+ .{
+ llvm_target_triple,
+ error_message,
+ },
+ );
+ return error.InvalidLLVMTriple;
+ }
+
+ const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) .None else .Aggressive;
+ const target_machine = llvm.TargetMachine.create(
+ target,
+ llvm_target_triple.ptr,
+ "",
+ "",
+ opt_level,
+ .Static,
+ .Default,
+ );
+ errdefer target_machine.dispose();
+
+ const builder = context.createBuilder();
+ errdefer builder.dispose();
+
+ self.* = .{
+ .module = options.module.?,
+ .llvm_module = llvm_module,
+ .context = context,
+ .target_machine = target_machine,
+ .builder = builder,
+ .object_path = object_path,
+ .gpa = gpa,
+ // TODO move this field into a struct that is only instantiated per gen() call
+ .src_loc = undefined,
+ };
+ return self;
+ }
+
+ pub fn deinit(self: *LLVMIRModule, allocator: *Allocator) void {
+ self.builder.dispose();
+ self.target_machine.dispose();
+ self.llvm_module.dispose();
+ self.context.dispose();
+
+ self.func_inst_table.deinit(self.gpa);
+ self.gpa.free(self.object_path);
+
+ self.blocks.deinit(self.gpa);
+
+ allocator.destroy(self);
+ }
+
+ fn initializeLLVMTargets() void {
+ llvm.initializeAllTargets();
+ llvm.initializeAllTargetInfos();
+ llvm.initializeAllTargetMCs();
+ llvm.initializeAllAsmPrinters();
+ llvm.initializeAllAsmParsers();
+ }
+
+ pub fn flushModule(self: *LLVMIRModule, comp: *Compilation) !void {
+ if (comp.verbose_llvm_ir) {
+ const dump = self.llvm_module.printToString();
+ defer llvm.disposeMessage(dump);
+
+ const stderr = std.io.getStdErr().writer();
+ try stderr.writeAll(std.mem.spanZ(dump));
+ }
+
+ {
+ var error_message: [*:0]const u8 = undefined;
+ // verifyModule always allocs the error_message even if there is no error
+ defer llvm.disposeMessage(error_message);
+
+ if (self.llvm_module.verify(.ReturnStatus, &error_message)) {
+ const stderr = std.io.getStdErr().writer();
+ try stderr.print("broken LLVM module found: {s}\nThis is a bug in the Zig compiler.", .{error_message});
+ return error.BrokenLLVMModule;
+ }
+ }
+
+ const object_pathZ = try self.gpa.dupeZ(u8, self.object_path);
+ defer self.gpa.free(object_pathZ);
+
+ var error_message: [*:0]const u8 = undefined;
+ if (self.target_machine.emitToFile(
+ self.llvm_module,
+ object_pathZ.ptr,
+ .ObjectFile,
+ &error_message,
+ )) {
+ defer llvm.disposeMessage(error_message);
+
+ const stderr = std.io.getStdErr().writer();
+ try stderr.print("LLVM failed to emit file: {s}\n", .{error_message});
+ return error.FailedToEmit;
+ }
+ }
+
+ pub fn updateDecl(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
+ self.gen(module, decl) catch |err| switch (err) {
+ error.CodegenFail => {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, self.err_msg.?);
+ self.err_msg = null;
+ return;
+ },
+ else => |e| return e,
+ };
+ }
+
+ fn gen(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const src = decl.src();
+
+ self.src_loc = decl.srcLoc();
+
+ log.debug("gen: {s} type: {}, value: {}", .{ decl.name, typed_value.ty, typed_value.val });
+
+ if (typed_value.val.castTag(.function)) |func_payload| {
+ const func = func_payload.data;
+
+ const llvm_func = try self.resolveLLVMFunction(func.owner_decl, src);
+
+ // This gets the LLVM values from the function and stores them in `self.args`.
+ const fn_param_len = func.owner_decl.typed_value.most_recent.typed_value.ty.fnParamLen();
+ var args = try self.gpa.alloc(*const llvm.Value, fn_param_len);
+ defer self.gpa.free(args);
+
+ for (args) |*arg, i| {
+ arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
+ }
+ self.args = args;
+ self.arg_index = 0;
+
+ // Make sure no other LLVM values from other functions can be referenced
+ self.func_inst_table.clearRetainingCapacity();
+
+ // We remove all the basic blocks of a function to support incremental
+ // compilation!
+ // TODO: remove all basic blocks if functions can have more than one
+ if (llvm_func.getFirstBasicBlock()) |bb| {
+ bb.deleteBasicBlock();
+ }
+
+ self.entry_block = self.context.appendBasicBlock(llvm_func, "Entry");
+ self.builder.positionBuilderAtEnd(self.entry_block);
+ self.latest_alloca_inst = null;
+ self.llvm_func = llvm_func;
+
+ try self.genBody(func.body);
+ } else if (typed_value.val.castTag(.extern_fn)) |extern_fn| {
+ _ = try self.resolveLLVMFunction(extern_fn.data, src);
+ } else {
+ _ = try self.resolveGlobalDecl(decl, src);
+ }
+ }
+
+ fn genBody(self: *LLVMIRModule, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
+ for (body.instructions) |inst| {
+ const opt_value = switch (inst.tag) {
+ .add => try self.genAdd(inst.castTag(.add).?),
+ .alloc => try self.genAlloc(inst.castTag(.alloc).?),
+ .arg => try self.genArg(inst.castTag(.arg).?),
+ .bitcast => try self.genBitCast(inst.castTag(.bitcast).?),
+ .block => try self.genBlock(inst.castTag(.block).?),
+ .br => try self.genBr(inst.castTag(.br).?),
+ .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?),
+ .call => try self.genCall(inst.castTag(.call).?),
+ .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq),
+ .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt),
+ .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte),
+ .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt),
+ .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte),
+ .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq),
+ .condbr => try self.genCondBr(inst.castTag(.condbr).?),
+ .intcast => try self.genIntCast(inst.castTag(.intcast).?),
+ .load => try self.genLoad(inst.castTag(.load).?),
+ .loop => try self.genLoop(inst.castTag(.loop).?),
+ .not => try self.genNot(inst.castTag(.not).?),
+ .ret => try self.genRet(inst.castTag(.ret).?),
+ .retvoid => self.genRetVoid(inst.castTag(.retvoid).?),
+ .store => try self.genStore(inst.castTag(.store).?),
+ .sub => try self.genSub(inst.castTag(.sub).?),
+ .unreach => self.genUnreach(inst.castTag(.unreach).?),
+ .dbg_stmt => blk: {
+ // TODO: implement debug info
+ break :blk null;
+ },
+ else => |tag| return self.fail(inst.src, "TODO implement LLVM codegen for Zir instruction: {}", .{tag}),
+ };
+ if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
+ }
+ }
+
+ fn genCall(self: *LLVMIRModule, inst: *Inst.Call) !?*const llvm.Value {
+ if (inst.func.value()) |func_value| {
+ const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
+ extern_fn.data
+ else if (func_value.castTag(.function)) |func_payload|
+ func_payload.data.owner_decl
+ else
+ unreachable;
+
+ const zig_fn_type = fn_decl.typed_value.most_recent.typed_value.ty;
+ const llvm_fn = try self.resolveLLVMFunction(fn_decl, inst.base.src);
+
+ const num_args = inst.args.len;
+
+ const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, num_args);
+ defer self.gpa.free(llvm_param_vals);
+
+ for (inst.args) |arg, i| {
+ llvm_param_vals[i] = try self.resolveInst(arg);
+ }
+
+ // TODO: LLVMBuildCall2 handles opaque function pointers, according to llvm docs
+ // Do we need that?
+ const call = self.builder.buildCall(
+ llvm_fn,
+ if (num_args == 0) null else llvm_param_vals.ptr,
+ @intCast(c_uint, num_args),
+ "",
+ );
+
+ const return_type = zig_fn_type.fnReturnType();
+ if (return_type.tag() == .noreturn) {
+ _ = self.builder.buildUnreachable();
+ }
+
+ // No need to store the LLVM value if the return type is void or noreturn
+ if (!return_type.hasCodeGenBits()) return null;
+
+ return call;
+ } else {
+ return self.fail(inst.base.src, "TODO implement calling runtime known function pointer LLVM backend", .{});
+ }
+ }
+
+ fn genRetVoid(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+ _ = self.builder.buildRetVoid();
+ return null;
+ }
+
+ fn genRet(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ _ = self.builder.buildRet(try self.resolveInst(inst.operand));
+ return null;
+ }
+
+ fn genCmp(self: *LLVMIRModule, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
+ const lhs = try self.resolveInst(inst.lhs);
+ const rhs = try self.resolveInst(inst.rhs);
+
+ if (!inst.base.ty.isInt())
+ if (inst.base.ty.tag() != .bool)
+ return self.fail(inst.base.src, "TODO implement 'genCmp' for type {}", .{inst.base.ty});
+
+ const is_signed = inst.base.ty.isSignedInt();
+ const operation = switch (op) {
+ .eq => .EQ,
+ .neq => .NE,
+ .lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
+ .lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
+ .gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
+ .gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
+ };
+
+ return self.builder.buildICmp(operation, lhs, rhs, "");
+ }
+
+ fn genBlock(self: *LLVMIRModule, inst: *Inst.Block) !?*const llvm.Value {
+ const parent_bb = self.context.createBasicBlock("Block");
+
+ // 5 breaks to a block seems like a reasonable default.
+ var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5);
+ var break_vals = try BreakValues.initCapacity(self.gpa, 5);
+ try self.blocks.putNoClobber(self.gpa, inst, .{
+ .parent_bb = parent_bb,
+ .break_bbs = &break_bbs,
+ .break_vals = &break_vals,
+ });
+ defer {
+ self.blocks.removeAssertDiscard(inst);
+ break_bbs.deinit(self.gpa);
+ break_vals.deinit(self.gpa);
+ }
+
+ try self.genBody(inst.body);
+
+ self.llvm_func.appendExistingBasicBlock(parent_bb);
+ self.builder.positionBuilderAtEnd(parent_bb);
+
+ // If the block does not return a value, we dont have to create a phi node.
+ if (!inst.base.ty.hasCodeGenBits()) return null;
+
+ const phi_node = self.builder.buildPhi(try self.getLLVMType(inst.base.ty, inst.base.src), "");
+ phi_node.addIncoming(
+ break_vals.items.ptr,
+ break_bbs.items.ptr,
+ @intCast(c_uint, break_vals.items.len),
+ );
+ return phi_node;
+ }
+
+ fn genBr(self: *LLVMIRModule, inst: *Inst.Br) !?*const llvm.Value {
+ // Get the block that we want to break to.
+ var block = self.blocks.get(inst.block).?;
+ _ = self.builder.buildBr(block.parent_bb);
+
+ // If the break doesn't break a value, then we don't have to add
+ // the values to the lists.
+ if (!inst.operand.ty.hasCodeGenBits()) return null;
+
+ // For the phi node, we need the basic blocks and the values of the
+ // break instructions.
+ try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
+
+ const val = try self.resolveInst(inst.operand);
+ try block.break_vals.append(self.gpa, val);
+
+ return null;
+ }
+
+ fn genCondBr(self: *LLVMIRModule, inst: *Inst.CondBr) !?*const llvm.Value {
+ const condition_value = try self.resolveInst(inst.condition);
+
+ const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
+ const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+ {
+ const prev_block = self.builder.getInsertBlock();
+ defer self.builder.positionBuilderAtEnd(prev_block);
+
+ self.builder.positionBuilderAtEnd(then_block);
+ try self.genBody(inst.then_body);
+
+ self.builder.positionBuilderAtEnd(else_block);
+ try self.genBody(inst.else_body);
+ }
+ _ = self.builder.buildCondBr(condition_value, then_block, else_block);
+ return null;
+ }
+
+ fn genLoop(self: *LLVMIRModule, inst: *Inst.Loop) !?*const llvm.Value {
+ const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop");
+ _ = self.builder.buildBr(loop_block);
+
+ self.builder.positionBuilderAtEnd(loop_block);
+ try self.genBody(inst.body);
+
+ _ = self.builder.buildBr(loop_block);
+ return null;
+ }
+
+ fn genNot(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ return self.builder.buildNot(try self.resolveInst(inst.operand), "");
+ }
+
+ fn genUnreach(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+ _ = self.builder.buildUnreachable();
+ return null;
+ }
+
+ fn genAdd(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ const lhs = try self.resolveInst(inst.lhs);
+ const rhs = try self.resolveInst(inst.rhs);
+
+ if (!inst.base.ty.isInt())
+ return self.fail(inst.base.src, "TODO implement 'genAdd' for type {}", .{inst.base.ty});
+
+ return if (inst.base.ty.isSignedInt())
+ self.builder.buildNSWAdd(lhs, rhs, "")
+ else
+ self.builder.buildNUWAdd(lhs, rhs, "");
+ }
+
+ fn genSub(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ const lhs = try self.resolveInst(inst.lhs);
+ const rhs = try self.resolveInst(inst.rhs);
+
+ if (!inst.base.ty.isInt())
+ return self.fail(inst.base.src, "TODO implement 'genSub' for type {}", .{inst.base.ty});
+
+ return if (inst.base.ty.isSignedInt())
+ self.builder.buildNSWSub(lhs, rhs, "")
+ else
+ self.builder.buildNUWSub(lhs, rhs, "");
+ }
+
+ fn genIntCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ const val = try self.resolveInst(inst.operand);
+
+ const signed = inst.base.ty.isSignedInt();
+ // TODO: Should we use intcast here or just a simple bitcast?
+ // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
+ return self.builder.buildIntCast2(val, try self.getLLVMType(inst.base.ty, inst.base.src), signed, "");
+ }
+
+ fn genBitCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ const val = try self.resolveInst(inst.operand);
+ const dest_type = try self.getLLVMType(inst.base.ty, inst.base.src);
+
+ return self.builder.buildBitCast(val, dest_type, "");
+ }
+
+ fn genArg(self: *LLVMIRModule, inst: *Inst.Arg) !?*const llvm.Value {
+ const arg_val = self.args[self.arg_index];
+ self.arg_index += 1;
+
+ const ptr_val = self.buildAlloca(try self.getLLVMType(inst.base.ty, inst.base.src));
+ _ = self.builder.buildStore(arg_val, ptr_val);
+ return self.builder.buildLoad(ptr_val, "");
+ }
+
+ fn genAlloc(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+ // buildAlloca expects the pointee type, not the pointer type, so assert that
+ // a Payload.PointerSimple is passed to the alloc instruction.
+ const pointee_type = inst.base.ty.castPointer().?.data;
+
+ // TODO: figure out a way to get the name of the var decl.
+ // TODO: set alignment and volatile
+ return self.buildAlloca(try self.getLLVMType(pointee_type, inst.base.src));
+ }
+
+ /// Use this instead of builder.buildAlloca, because this function makes sure to
+ /// put the alloca instruction at the top of the function!
+ fn buildAlloca(self: *LLVMIRModule, t: *const llvm.Type) *const llvm.Value {
+ const prev_block = self.builder.getInsertBlock();
+ defer self.builder.positionBuilderAtEnd(prev_block);
+
+ if (self.latest_alloca_inst) |latest_alloc| {
+ // builder.positionBuilder adds it before the instruction,
+ // but we want to put it after the last alloca instruction.
+ self.builder.positionBuilder(self.entry_block, latest_alloc.getNextInstruction().?);
+ } else {
+ // There might have been other instructions emitted before the
+ // first alloca has been generated. However the alloca should still
+ // be first in the function.
+ if (self.entry_block.getFirstInstruction()) |first_inst| {
+ self.builder.positionBuilder(self.entry_block, first_inst);
+ }
+ }
+
+ const val = self.builder.buildAlloca(t, "");
+ self.latest_alloca_inst = val;
+ return val;
+ }
+
+ fn genStore(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+ const val = try self.resolveInst(inst.rhs);
+ const ptr = try self.resolveInst(inst.lhs);
+ _ = self.builder.buildStore(val, ptr);
+ return null;
+ }
+
+ fn genLoad(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+ const ptr_val = try self.resolveInst(inst.operand);
+ return self.builder.buildLoad(ptr_val, "");
+ }
+
+ fn genBreakpoint(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+ const llvn_fn = self.getIntrinsic("llvm.debugtrap");
+ _ = self.builder.buildCall(llvn_fn, null, 0, "");
+ return null;
+ }
+
+ fn getIntrinsic(self: *LLVMIRModule, name: []const u8) *const llvm.Value {
+ const id = llvm.lookupIntrinsicID(name.ptr, name.len);
+ assert(id != 0);
+ // TODO: add support for overload intrinsics by passing the prefix of the intrinsic
+ // to `lookupIntrinsicID` and then passing the correct types to
+ // `getIntrinsicDeclaration`
+ return self.llvm_module.getIntrinsicDeclaration(id, null, 0);
+ }
+
+ fn resolveInst(self: *LLVMIRModule, inst: *ir.Inst) !*const llvm.Value {
+ if (inst.value()) |val| {
+ return self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = val });
+ }
+ if (self.func_inst_table.get(inst)) |value| return value;
+
+ return self.fail(inst.src, "TODO implement global llvm values (or the value is not in the func_inst_table table)", .{});
+ }
+
+ fn genTypedValue(self: *LLVMIRModule, src: usize, tv: TypedValue) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+ const llvm_type = try self.getLLVMType(tv.ty, src);
+
+ if (tv.val.isUndef())
+ return llvm_type.getUndef();
+
+ switch (tv.ty.zigTypeTag()) {
+ .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
+ .Int => {
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = tv.val.toBigInt(&bigint_space);
+
+ if (bigint.eqZero()) return llvm_type.constNull();
+
+ if (bigint.limbs.len != 1) {
+ return self.fail(src, "TODO implement bigger bigint", .{});
+ }
+ const llvm_int = llvm_type.constInt(bigint.limbs[0], false);
+ if (!bigint.positive) {
+ return llvm.constNeg(llvm_int);
+ }
+ return llvm_int;
+ },
+ .Pointer => switch (tv.val.tag()) {
+ .decl_ref => {
+ const decl = tv.val.castTag(.decl_ref).?.data;
+ const val = try self.resolveGlobalDecl(decl, src);
+
+ const usize_type = try self.getLLVMType(Type.initTag(.usize), src);
+
+ // TODO: second index should be the index into the memory!
+ var indices: [2]*const llvm.Value = .{
+ usize_type.constNull(),
+ usize_type.constNull(),
+ };
+
+ // TODO: consider using buildInBoundsGEP2 for opaque pointers
+ return self.builder.buildInBoundsGEP(val, &indices, 2, "");
+ },
+ else => return self.fail(src, "TODO implement const of pointer type '{}'", .{tv.ty}),
+ },
+ .Array => {
+ if (tv.val.castTag(.bytes)) |payload| {
+ const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
+ if (sentinel.tag() == .zero) break :blk true;
+ return self.fail(src, "TODO handle other sentinel values", .{});
+ } else false;
+
+ return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), !zero_sentinel);
+ } else {
+ return self.fail(src, "TODO handle more array values", .{});
+ }
+ },
+ else => return self.fail(src, "TODO implement const of type '{}'", .{tv.ty}),
+ }
+ }
+
+ fn getLLVMType(self: *LLVMIRModule, t: Type, src: usize) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
+ switch (t.zigTypeTag()) {
+ .Void => return self.context.voidType(),
+ .NoReturn => return self.context.voidType(),
+ .Int => {
+ const info = t.intInfo(self.module.getTarget());
+ return self.context.intType(info.bits);
+ },
+ .Bool => return self.context.intType(1),
+ .Pointer => {
+ if (t.isSlice()) {
+ return self.fail(src, "TODO: LLVM backend: implement slices", .{});
+ } else {
+ const elem_type = try self.getLLVMType(t.elemType(), src);
+ return elem_type.pointerType(0);
+ }
+ },
+ .Array => {
+ const elem_type = try self.getLLVMType(t.elemType(), src);
+ return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
+ },
+ else => return self.fail(src, "TODO implement getLLVMType for type '{}'", .{t}),
+ }
+ }
+
+ fn resolveGlobalDecl(self: *LLVMIRModule, decl: *Module.Decl, src: usize) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+ // TODO: do we want to store this in our own datastructure?
+ if (self.llvm_module.getNamedGlobal(decl.name)) |val| return val;
+
+ const typed_value = decl.typed_value.most_recent.typed_value;
+
+ // TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
+ const llvm_type = try self.getLLVMType(typed_value.ty, src);
+ const val = try self.genTypedValue(src, typed_value);
+ const global = self.llvm_module.addGlobal(llvm_type, decl.name);
+ llvm.setInitializer(global, val);
+
+ // TODO ask the Decl if it is const
+ // https://github.com/ziglang/zig/issues/7582
+
+ return global;
+ }
+
+ /// If the llvm function does not exist, create it
+ fn resolveLLVMFunction(self: *LLVMIRModule, func: *Module.Decl, src: usize) !*const llvm.Value {
+ // TODO: do we want to store this in our own datastructure?
+ if (self.llvm_module.getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
+
+ const zig_fn_type = func.typed_value.most_recent.typed_value.ty;
+ const return_type = zig_fn_type.fnReturnType();
+
+ const fn_param_len = zig_fn_type.fnParamLen();
+
+ const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
+ defer self.gpa.free(fn_param_types);
+ zig_fn_type.fnParamTypes(fn_param_types);
+
+ const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
+ defer self.gpa.free(llvm_param);
+
+ for (fn_param_types) |fn_param, i| {
+ llvm_param[i] = try self.getLLVMType(fn_param, src);
+ }
+
+ const fn_type = llvm.Type.functionType(
+ try self.getLLVMType(return_type, src),
+ if (fn_param_len == 0) null else llvm_param.ptr,
+ @intCast(c_uint, fn_param_len),
+ false,
+ );
+ const llvm_fn = self.llvm_module.addFunction(func.name, fn_type);
+
+ if (return_type.tag() == .noreturn) {
+ self.addFnAttr(llvm_fn, "noreturn");
+ }
+
+ return llvm_fn;
+ }
+
+ // Helper functions
+ fn addAttr(self: LLVMIRModule, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
+ assert(kind_id != 0);
+ const llvm_attr = self.context.createEnumAttribute(kind_id, 0);
+ val.addAttributeAtIndex(index, llvm_attr);
+ }
+
+ fn addFnAttr(self: *LLVMIRModule, val: *const llvm.Value, attr_name: []const u8) void {
+ // TODO: improve this API, `addAttr(-1, attr_name)`
+ self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
+ }
+
+ pub fn fail(self: *LLVMIRModule, src: usize, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ @setCold(true);
+ assert(self.err_msg == null);
+ self.err_msg = try Module.ErrorMsg.create(self.gpa, .{
+ .file_scope = self.src_loc.file_scope,
+ .byte_offset = src,
+ }, format, args);
+ return error.CodegenFail;
+ }
+};
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
new file mode 100644
index 0000000000..79ac833aac
--- /dev/null
+++ b/src/codegen/llvm/bindings.zig
@@ -0,0 +1,614 @@
+//! We do this instead of @cImport because the self-hosted compiler is easier
+//! to bootstrap if it does not depend on translate-c.
+
+const LLVMBool = bool;
+pub const AttributeIndex = c_uint;
+
+/// Make sure to use the *InContext functions instead of the global ones.
+pub const Context = opaque {
+ pub const create = LLVMContextCreate;
+ extern fn LLVMContextCreate() *const Context;
+
+ pub const dispose = LLVMContextDispose;
+ extern fn LLVMContextDispose(C: *const Context) void;
+
+ pub const createEnumAttribute = LLVMCreateEnumAttribute;
+ extern fn LLVMCreateEnumAttribute(*const Context, KindID: c_uint, Val: u64) *const Attribute;
+
+ pub const intType = LLVMIntTypeInContext;
+ extern fn LLVMIntTypeInContext(C: *const Context, NumBits: c_uint) *const Type;
+
+ pub const voidType = LLVMVoidTypeInContext;
+ extern fn LLVMVoidTypeInContext(C: *const Context) *const Type;
+
+ pub const constString = LLVMConstStringInContext;
+ extern fn LLVMConstStringInContext(C: *const Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: LLVMBool) *const Value;
+
+ pub const createBasicBlock = LLVMCreateBasicBlockInContext;
+ extern fn LLVMCreateBasicBlockInContext(C: *const Context, Name: [*:0]const u8) *const BasicBlock;
+
+ pub const appendBasicBlock = LLVMAppendBasicBlockInContext;
+ extern fn LLVMAppendBasicBlockInContext(C: *const Context, Fn: *const Value, Name: [*:0]const u8) *const BasicBlock;
+
+ pub const createBuilder = LLVMCreateBuilderInContext;
+ extern fn LLVMCreateBuilderInContext(C: *const Context) *const Builder;
+};
+
+pub const Value = opaque {
+ pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
+ extern fn LLVMAddAttributeAtIndex(*const Value, Idx: AttributeIndex, A: *const Attribute) void;
+
+ pub const getFirstBasicBlock = LLVMGetFirstBasicBlock;
+ extern fn LLVMGetFirstBasicBlock(Fn: *const Value) ?*const BasicBlock;
+
+ pub const appendExistingBasicBlock = LLVMAppendExistingBasicBlock;
+ extern fn LLVMAppendExistingBasicBlock(Fn: *const Value, BB: *const BasicBlock) void;
+
+ pub const addIncoming = LLVMAddIncoming;
+ extern fn LLVMAddIncoming(PhiNode: *const Value, IncomingValues: [*]*const Value, IncomingBlocks: [*]*const BasicBlock, Count: c_uint) void;
+
+ pub const getNextInstruction = LLVMGetNextInstruction;
+ extern fn LLVMGetNextInstruction(Inst: *const Value) ?*const Value;
+};
+
+pub const Type = opaque {
+ pub const functionType = LLVMFunctionType;
+ extern fn LLVMFunctionType(ReturnType: *const Type, ParamTypes: ?[*]*const Type, ParamCount: c_uint, IsVarArg: LLVMBool) *const Type;
+
+ pub const constNull = LLVMConstNull;
+ extern fn LLVMConstNull(Ty: *const Type) *const Value;
+
+ pub const constAllOnes = LLVMConstAllOnes;
+ extern fn LLVMConstAllOnes(Ty: *const Type) *const Value;
+
+ pub const constInt = LLVMConstInt;
+ extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: LLVMBool) *const Value;
+
+ pub const constArray = LLVMConstArray;
+ extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: ?[*]*const Value, Length: c_uint) *const Value;
+
+ pub const getUndef = LLVMGetUndef;
+ extern fn LLVMGetUndef(Ty: *const Type) *const Value;
+
+ pub const pointerType = LLVMPointerType;
+ extern fn LLVMPointerType(ElementType: *const Type, AddressSpace: c_uint) *const Type;
+
+ pub const arrayType = LLVMArrayType;
+ extern fn LLVMArrayType(ElementType: *const Type, ElementCount: c_uint) *const Type;
+};
+
+pub const Module = opaque {
+ pub const createWithName = LLVMModuleCreateWithNameInContext;
+ extern fn LLVMModuleCreateWithNameInContext(ModuleID: [*:0]const u8, C: *const Context) *const Module;
+
+ pub const dispose = LLVMDisposeModule;
+ extern fn LLVMDisposeModule(*const Module) void;
+
+ pub const verify = LLVMVerifyModule;
+ extern fn LLVMVerifyModule(*const Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) LLVMBool;
+
+ pub const addFunction = LLVMAddFunction;
+ extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value;
+
+ pub const getNamedFunction = LLVMGetNamedFunction;
+ extern fn LLVMGetNamedFunction(*const Module, Name: [*:0]const u8) ?*const Value;
+
+ pub const getIntrinsicDeclaration = LLVMGetIntrinsicDeclaration;
+ extern fn LLVMGetIntrinsicDeclaration(Mod: *const Module, ID: c_uint, ParamTypes: ?[*]*const Type, ParamCount: usize) *const Value;
+
+ pub const printToString = LLVMPrintModuleToString;
+ extern fn LLVMPrintModuleToString(*const Module) [*:0]const u8;
+
+ pub const addGlobal = LLVMAddGlobal;
+ extern fn LLVMAddGlobal(M: *const Module, Ty: *const Type, Name: [*:0]const u8) *const Value;
+
+ pub const getNamedGlobal = LLVMGetNamedGlobal;
+ extern fn LLVMGetNamedGlobal(M: *const Module, Name: [*:0]const u8) ?*const Value;
+};
+
+pub const lookupIntrinsicID = LLVMLookupIntrinsicID;
+extern fn LLVMLookupIntrinsicID(Name: [*]const u8, NameLen: usize) c_uint;
+
+pub const disposeMessage = LLVMDisposeMessage;
+extern fn LLVMDisposeMessage(Message: [*:0]const u8) void;
+
+pub const VerifierFailureAction = extern enum {
+ AbortProcess,
+ PrintMessage,
+ ReturnStatus,
+};
+
+pub const constNeg = LLVMConstNeg;
+extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
+
+pub const setInitializer = LLVMSetInitializer;
+extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
+
+pub const getParam = LLVMGetParam;
+extern fn LLVMGetParam(Fn: *const Value, Index: c_uint) *const Value;
+
+pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
+extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
+
+pub const Attribute = opaque {};
+
+pub const Builder = opaque {
+ pub const dispose = LLVMDisposeBuilder;
+ extern fn LLVMDisposeBuilder(Builder: *const Builder) void;
+
+ pub const positionBuilder = LLVMPositionBuilder;
+ extern fn LLVMPositionBuilder(Builder: *const Builder, Block: *const BasicBlock, Instr: *const Value) void;
+
+ pub const positionBuilderAtEnd = LLVMPositionBuilderAtEnd;
+ extern fn LLVMPositionBuilderAtEnd(Builder: *const Builder, Block: *const BasicBlock) void;
+
+ pub const getInsertBlock = LLVMGetInsertBlock;
+ extern fn LLVMGetInsertBlock(Builder: *const Builder) *const BasicBlock;
+
+ pub const buildCall = LLVMBuildCall;
+ extern fn LLVMBuildCall(*const Builder, Fn: *const Value, Args: ?[*]*const Value, NumArgs: c_uint, Name: [*:0]const u8) *const Value;
+
+ pub const buildCall2 = LLVMBuildCall2;
+ extern fn LLVMBuildCall2(*const Builder, *const Type, Fn: *const Value, Args: [*]*const Value, NumArgs: c_uint, Name: [*:0]const u8) *const Value;
+
+ pub const buildRetVoid = LLVMBuildRetVoid;
+ extern fn LLVMBuildRetVoid(*const Builder) *const Value;
+
+ pub const buildRet = LLVMBuildRet;
+ extern fn LLVMBuildRet(*const Builder, V: *const Value) *const Value;
+
+ pub const buildUnreachable = LLVMBuildUnreachable;
+ extern fn LLVMBuildUnreachable(*const Builder) *const Value;
+
+ pub const buildAlloca = LLVMBuildAlloca;
+ extern fn LLVMBuildAlloca(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
+
+ pub const buildStore = LLVMBuildStore;
+ extern fn LLVMBuildStore(*const Builder, Val: *const Value, Ptr: *const Value) *const Value;
+
+ pub const buildLoad = LLVMBuildLoad;
+ extern fn LLVMBuildLoad(*const Builder, PointerVal: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNot = LLVMBuildNot;
+ extern fn LLVMBuildNot(*const Builder, V: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNSWAdd = LLVMBuildNSWAdd;
+ extern fn LLVMBuildNSWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNUWAdd = LLVMBuildNUWAdd;
+ extern fn LLVMBuildNUWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNSWSub = LLVMBuildNSWSub;
+ extern fn LLVMBuildNSWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNUWSub = LLVMBuildNUWSub;
+ extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildIntCast2 = LLVMBuildIntCast2;
+ extern fn LLVMBuildIntCast2(*const Builder, Val: *const Value, DestTy: *const Type, IsSigned: LLVMBool, Name: [*:0]const u8) *const Value;
+
+ pub const buildBitCast = LLVMBuildBitCast;
+ extern fn LLVMBuildBitCast(*const Builder, Val: *const Value, DestTy: *const Type, Name: [*:0]const u8) *const Value;
+
+ pub const buildInBoundsGEP = LLVMBuildInBoundsGEP;
+ extern fn LLVMBuildInBoundsGEP(B: *const Builder, Pointer: *const Value, Indices: [*]*const Value, NumIndices: c_uint, Name: [*:0]const u8) *const Value;
+
+ pub const buildICmp = LLVMBuildICmp;
+ extern fn LLVMBuildICmp(*const Builder, Op: IntPredicate, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildBr = LLVMBuildBr;
+ extern fn LLVMBuildBr(*const Builder, Dest: *const BasicBlock) *const Value;
+
+ pub const buildCondBr = LLVMBuildCondBr;
+ extern fn LLVMBuildCondBr(*const Builder, If: *const Value, Then: *const BasicBlock, Else: *const BasicBlock) *const Value;
+
+ pub const buildPhi = LLVMBuildPhi;
+ extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
+};
+
+pub const IntPredicate = extern enum {
+ EQ = 32,
+ NE = 33,
+ UGT = 34,
+ UGE = 35,
+ ULT = 36,
+ ULE = 37,
+ SGT = 38,
+ SGE = 39,
+ SLT = 40,
+ SLE = 41,
+};
+
+pub const BasicBlock = opaque {
+ pub const deleteBasicBlock = LLVMDeleteBasicBlock;
+ extern fn LLVMDeleteBasicBlock(BB: *const BasicBlock) void;
+
+ pub const getFirstInstruction = LLVMGetFirstInstruction;
+ extern fn LLVMGetFirstInstruction(BB: *const BasicBlock) ?*const Value;
+};
+
+pub const TargetMachine = opaque {
+ pub const create = LLVMCreateTargetMachine;
+ extern fn LLVMCreateTargetMachine(
+ T: *const Target,
+ Triple: [*:0]const u8,
+ CPU: [*:0]const u8,
+ Features: [*:0]const u8,
+ Level: CodeGenOptLevel,
+ Reloc: RelocMode,
+ CodeModel: CodeMode,
+ ) *const TargetMachine;
+
+ pub const dispose = LLVMDisposeTargetMachine;
+ extern fn LLVMDisposeTargetMachine(T: *const TargetMachine) void;
+
+ pub const emitToFile = LLVMTargetMachineEmitToFile;
+ extern fn LLVMTargetMachineEmitToFile(
+ *const TargetMachine,
+ M: *const Module,
+ Filename: [*:0]const u8,
+ codegen: CodeGenFileType,
+ ErrorMessage: *[*:0]const u8,
+ ) LLVMBool;
+};
+
+pub const CodeMode = extern enum {
+ Default,
+ JITDefault,
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+};
+
+pub const CodeGenOptLevel = extern enum {
+ None,
+ Less,
+ Default,
+ Aggressive,
+};
+
+pub const RelocMode = extern enum {
+ Default,
+ Static,
+ PIC,
+ DynamicNoPic,
+ ROPI,
+ RWPI,
+ ROPI_RWPI,
+};
+
+pub const CodeGenFileType = extern enum {
+ AssemblyFile,
+ ObjectFile,
+};
+
+pub const Target = opaque {
+ pub const getFromTriple = LLVMGetTargetFromTriple;
+ extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **const Target, ErrorMessage: *[*:0]const u8) LLVMBool;
+};
+
+extern fn LLVMInitializeAArch64TargetInfo() void;
+extern fn LLVMInitializeAMDGPUTargetInfo() void;
+extern fn LLVMInitializeARMTargetInfo() void;
+extern fn LLVMInitializeAVRTargetInfo() void;
+extern fn LLVMInitializeBPFTargetInfo() void;
+extern fn LLVMInitializeHexagonTargetInfo() void;
+extern fn LLVMInitializeLanaiTargetInfo() void;
+extern fn LLVMInitializeMipsTargetInfo() void;
+extern fn LLVMInitializeMSP430TargetInfo() void;
+extern fn LLVMInitializeNVPTXTargetInfo() void;
+extern fn LLVMInitializePowerPCTargetInfo() void;
+extern fn LLVMInitializeRISCVTargetInfo() void;
+extern fn LLVMInitializeSparcTargetInfo() void;
+extern fn LLVMInitializeSystemZTargetInfo() void;
+extern fn LLVMInitializeWebAssemblyTargetInfo() void;
+extern fn LLVMInitializeX86TargetInfo() void;
+extern fn LLVMInitializeXCoreTargetInfo() void;
+extern fn LLVMInitializeAArch64Target() void;
+extern fn LLVMInitializeAMDGPUTarget() void;
+extern fn LLVMInitializeARMTarget() void;
+extern fn LLVMInitializeAVRTarget() void;
+extern fn LLVMInitializeBPFTarget() void;
+extern fn LLVMInitializeHexagonTarget() void;
+extern fn LLVMInitializeLanaiTarget() void;
+extern fn LLVMInitializeMipsTarget() void;
+extern fn LLVMInitializeMSP430Target() void;
+extern fn LLVMInitializeNVPTXTarget() void;
+extern fn LLVMInitializePowerPCTarget() void;
+extern fn LLVMInitializeRISCVTarget() void;
+extern fn LLVMInitializeSparcTarget() void;
+extern fn LLVMInitializeSystemZTarget() void;
+extern fn LLVMInitializeWebAssemblyTarget() void;
+extern fn LLVMInitializeX86Target() void;
+extern fn LLVMInitializeXCoreTarget() void;
+extern fn LLVMInitializeAArch64TargetMC() void;
+extern fn LLVMInitializeAMDGPUTargetMC() void;
+extern fn LLVMInitializeARMTargetMC() void;
+extern fn LLVMInitializeAVRTargetMC() void;
+extern fn LLVMInitializeBPFTargetMC() void;
+extern fn LLVMInitializeHexagonTargetMC() void;
+extern fn LLVMInitializeLanaiTargetMC() void;
+extern fn LLVMInitializeMipsTargetMC() void;
+extern fn LLVMInitializeMSP430TargetMC() void;
+extern fn LLVMInitializeNVPTXTargetMC() void;
+extern fn LLVMInitializePowerPCTargetMC() void;
+extern fn LLVMInitializeRISCVTargetMC() void;
+extern fn LLVMInitializeSparcTargetMC() void;
+extern fn LLVMInitializeSystemZTargetMC() void;
+extern fn LLVMInitializeWebAssemblyTargetMC() void;
+extern fn LLVMInitializeX86TargetMC() void;
+extern fn LLVMInitializeXCoreTargetMC() void;
+extern fn LLVMInitializeAArch64AsmPrinter() void;
+extern fn LLVMInitializeAMDGPUAsmPrinter() void;
+extern fn LLVMInitializeARMAsmPrinter() void;
+extern fn LLVMInitializeAVRAsmPrinter() void;
+extern fn LLVMInitializeBPFAsmPrinter() void;
+extern fn LLVMInitializeHexagonAsmPrinter() void;
+extern fn LLVMInitializeLanaiAsmPrinter() void;
+extern fn LLVMInitializeMipsAsmPrinter() void;
+extern fn LLVMInitializeMSP430AsmPrinter() void;
+extern fn LLVMInitializeNVPTXAsmPrinter() void;
+extern fn LLVMInitializePowerPCAsmPrinter() void;
+extern fn LLVMInitializeRISCVAsmPrinter() void;
+extern fn LLVMInitializeSparcAsmPrinter() void;
+extern fn LLVMInitializeSystemZAsmPrinter() void;
+extern fn LLVMInitializeWebAssemblyAsmPrinter() void;
+extern fn LLVMInitializeX86AsmPrinter() void;
+extern fn LLVMInitializeXCoreAsmPrinter() void;
+extern fn LLVMInitializeAArch64AsmParser() void;
+extern fn LLVMInitializeAMDGPUAsmParser() void;
+extern fn LLVMInitializeARMAsmParser() void;
+extern fn LLVMInitializeAVRAsmParser() void;
+extern fn LLVMInitializeBPFAsmParser() void;
+extern fn LLVMInitializeHexagonAsmParser() void;
+extern fn LLVMInitializeLanaiAsmParser() void;
+extern fn LLVMInitializeMipsAsmParser() void;
+extern fn LLVMInitializeMSP430AsmParser() void;
+extern fn LLVMInitializePowerPCAsmParser() void;
+extern fn LLVMInitializeRISCVAsmParser() void;
+extern fn LLVMInitializeSparcAsmParser() void;
+extern fn LLVMInitializeSystemZAsmParser() void;
+extern fn LLVMInitializeWebAssemblyAsmParser() void;
+extern fn LLVMInitializeX86AsmParser() void;
+
+pub const initializeAllTargetInfos = LLVMInitializeAllTargetInfos;
+fn LLVMInitializeAllTargetInfos() callconv(.C) void {
+ LLVMInitializeAArch64TargetInfo();
+ LLVMInitializeAMDGPUTargetInfo();
+ LLVMInitializeARMTargetInfo();
+ LLVMInitializeAVRTargetInfo();
+ LLVMInitializeBPFTargetInfo();
+ LLVMInitializeHexagonTargetInfo();
+ LLVMInitializeLanaiTargetInfo();
+ LLVMInitializeMipsTargetInfo();
+ LLVMInitializeMSP430TargetInfo();
+ LLVMInitializeNVPTXTargetInfo();
+ LLVMInitializePowerPCTargetInfo();
+ LLVMInitializeRISCVTargetInfo();
+ LLVMInitializeSparcTargetInfo();
+ LLVMInitializeSystemZTargetInfo();
+ LLVMInitializeWebAssemblyTargetInfo();
+ LLVMInitializeX86TargetInfo();
+ LLVMInitializeXCoreTargetInfo();
+}
+pub const initializeAllTargets = LLVMInitializeAllTargets;
+fn LLVMInitializeAllTargets() callconv(.C) void {
+ LLVMInitializeAArch64Target();
+ LLVMInitializeAMDGPUTarget();
+ LLVMInitializeARMTarget();
+ LLVMInitializeAVRTarget();
+ LLVMInitializeBPFTarget();
+ LLVMInitializeHexagonTarget();
+ LLVMInitializeLanaiTarget();
+ LLVMInitializeMipsTarget();
+ LLVMInitializeMSP430Target();
+ LLVMInitializeNVPTXTarget();
+ LLVMInitializePowerPCTarget();
+ LLVMInitializeRISCVTarget();
+ LLVMInitializeSparcTarget();
+ LLVMInitializeSystemZTarget();
+ LLVMInitializeWebAssemblyTarget();
+ LLVMInitializeX86Target();
+ LLVMInitializeXCoreTarget();
+}
+pub const initializeAllTargetMCs = LLVMInitializeAllTargetMCs;
+fn LLVMInitializeAllTargetMCs() callconv(.C) void {
+ LLVMInitializeAArch64TargetMC();
+ LLVMInitializeAMDGPUTargetMC();
+ LLVMInitializeARMTargetMC();
+ LLVMInitializeAVRTargetMC();
+ LLVMInitializeBPFTargetMC();
+ LLVMInitializeHexagonTargetMC();
+ LLVMInitializeLanaiTargetMC();
+ LLVMInitializeMipsTargetMC();
+ LLVMInitializeMSP430TargetMC();
+ LLVMInitializeNVPTXTargetMC();
+ LLVMInitializePowerPCTargetMC();
+ LLVMInitializeRISCVTargetMC();
+ LLVMInitializeSparcTargetMC();
+ LLVMInitializeSystemZTargetMC();
+ LLVMInitializeWebAssemblyTargetMC();
+ LLVMInitializeX86TargetMC();
+ LLVMInitializeXCoreTargetMC();
+}
+pub const initializeAllAsmPrinters = LLVMInitializeAllAsmPrinters;
+fn LLVMInitializeAllAsmPrinters() callconv(.C) void {
+ LLVMInitializeAArch64AsmPrinter();
+ LLVMInitializeAMDGPUAsmPrinter();
+ LLVMInitializeARMAsmPrinter();
+ LLVMInitializeAVRAsmPrinter();
+ LLVMInitializeBPFAsmPrinter();
+ LLVMInitializeHexagonAsmPrinter();
+ LLVMInitializeLanaiAsmPrinter();
+ LLVMInitializeMipsAsmPrinter();
+ LLVMInitializeMSP430AsmPrinter();
+ LLVMInitializeNVPTXAsmPrinter();
+ LLVMInitializePowerPCAsmPrinter();
+ LLVMInitializeRISCVAsmPrinter();
+ LLVMInitializeSparcAsmPrinter();
+ LLVMInitializeSystemZAsmPrinter();
+ LLVMInitializeWebAssemblyAsmPrinter();
+ LLVMInitializeX86AsmPrinter();
+ LLVMInitializeXCoreAsmPrinter();
+}
+pub const initializeAllAsmParsers = LLVMInitializeAllAsmParsers;
+fn LLVMInitializeAllAsmParsers() callconv(.C) void {
+ LLVMInitializeAArch64AsmParser();
+ LLVMInitializeAMDGPUAsmParser();
+ LLVMInitializeARMAsmParser();
+ LLVMInitializeAVRAsmParser();
+ LLVMInitializeBPFAsmParser();
+ LLVMInitializeHexagonAsmParser();
+ LLVMInitializeLanaiAsmParser();
+ LLVMInitializeMipsAsmParser();
+ LLVMInitializeMSP430AsmParser();
+ LLVMInitializePowerPCAsmParser();
+ LLVMInitializeRISCVAsmParser();
+ LLVMInitializeSparcAsmParser();
+ LLVMInitializeSystemZAsmParser();
+ LLVMInitializeWebAssemblyAsmParser();
+ LLVMInitializeX86AsmParser();
+}
+
+extern fn ZigLLDLinkCOFF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
+extern fn ZigLLDLinkELF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
+extern fn ZigLLDLinkMachO(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
+extern fn ZigLLDLinkWasm(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
+
+pub const LinkCOFF = ZigLLDLinkCOFF;
+pub const LinkELF = ZigLLDLinkELF;
+pub const LinkMachO = ZigLLDLinkMachO;
+pub const LinkWasm = ZigLLDLinkWasm;
+
+pub const ObjectFormatType = extern enum(c_int) {
+ Unknown,
+ COFF,
+ ELF,
+ GOFF,
+ MachO,
+ Wasm,
+ XCOFF,
+};
+
+pub const GetHostCPUName = LLVMGetHostCPUName;
+extern fn LLVMGetHostCPUName() ?[*:0]u8;
+
+pub const GetNativeFeatures = ZigLLVMGetNativeFeatures;
+extern fn ZigLLVMGetNativeFeatures() ?[*:0]u8;
+
+pub const WriteArchive = ZigLLVMWriteArchive;
+extern fn ZigLLVMWriteArchive(
+ archive_name: [*:0]const u8,
+ file_names_ptr: [*]const [*:0]const u8,
+ file_names_len: usize,
+ os_type: OSType,
+) bool;
+
+pub const OSType = extern enum(c_int) {
+ UnknownOS,
+ Ananas,
+ CloudABI,
+ Darwin,
+ DragonFly,
+ FreeBSD,
+ Fuchsia,
+ IOS,
+ KFreeBSD,
+ Linux,
+ Lv2,
+ MacOSX,
+ NetBSD,
+ OpenBSD,
+ Solaris,
+ Win32,
+ ZOS,
+ Haiku,
+ Minix,
+ RTEMS,
+ NaCl,
+ AIX,
+ CUDA,
+ NVCL,
+ AMDHSA,
+ PS4,
+ ELFIAMCU,
+ TvOS,
+ WatchOS,
+ Mesa3D,
+ Contiki,
+ AMDPAL,
+ HermitCore,
+ Hurd,
+ WASI,
+ Emscripten,
+};
+
+pub const ArchType = extern enum(c_int) {
+ UnknownArch,
+ arm,
+ armeb,
+ aarch64,
+ aarch64_be,
+ aarch64_32,
+ arc,
+ avr,
+ bpfel,
+ bpfeb,
+ csky,
+ hexagon,
+ mips,
+ mipsel,
+ mips64,
+ mips64el,
+ msp430,
+ ppc,
+ ppcle,
+ ppc64,
+ ppc64le,
+ r600,
+ amdgcn,
+ riscv32,
+ riscv64,
+ sparc,
+ sparcv9,
+ sparcel,
+ systemz,
+ tce,
+ tcele,
+ thumb,
+ thumbeb,
+ x86,
+ x86_64,
+ xcore,
+ nvptx,
+ nvptx64,
+ le32,
+ le64,
+ amdil,
+ amdil64,
+ hsail,
+ hsail64,
+ spir,
+ spir64,
+ kalimba,
+ shave,
+ lanai,
+ wasm32,
+ wasm64,
+ renderscript32,
+ renderscript64,
+ ve,
+};
+
+pub const ParseCommandLineOptions = ZigLLVMParseCommandLineOptions;
+extern fn ZigLLVMParseCommandLineOptions(argc: usize, argv: [*]const [*:0]const u8) void;
+
+pub const WriteImportLibrary = ZigLLVMWriteImportLibrary;
+extern fn ZigLLVMWriteImportLibrary(
+ def_path: [*:0]const u8,
+ arch: ArchType,
+ output_lib_path: [*c]const u8,
+ kill_at: bool,
+) bool;
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
new file mode 100644
index 0000000000..23fc45616f
--- /dev/null
+++ b/src/codegen/spirv.zig
@@ -0,0 +1,49 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+
+const spec = @import("spirv/spec.zig");
+const Module = @import("../Module.zig");
+const Decl = Module.Decl;
+
+pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []const u32) !void {
+ const word_count = @intCast(u32, args.len + 1);
+ try code.append((word_count << 16) | @enumToInt(instr));
+ try code.appendSlice(args);
+}
+
+pub const SPIRVModule = struct {
+ next_id: u32 = 0,
+ free_id_list: std.ArrayList(u32),
+
+ pub fn init(allocator: *Allocator) SPIRVModule {
+ return .{
+ .free_id_list = std.ArrayList(u32).init(allocator),
+ };
+ }
+
+ pub fn deinit(self: *SPIRVModule) void {
+ self.free_id_list.deinit();
+ }
+
+ pub fn allocId(self: *SPIRVModule) u32 {
+ if (self.free_id_list.popOrNull()) |id| return id;
+
+ defer self.next_id += 1;
+ return self.next_id;
+ }
+
+ pub fn freeId(self: *SPIRVModule, id: u32) void {
+ if (id + 1 == self.next_id) {
+ self.next_id -= 1;
+ } else {
+ // If no more memory to append the id to the free list, just ignore it.
+ self.free_id_list.append(id) catch {};
+ }
+ }
+
+ pub fn idBound(self: *SPIRVModule) u32 {
+ return self.next_id;
+ }
+
+ pub fn genDecl(self: SPIRVModule, id: u32, code: *std.ArrayList(u32), decl: *Decl) !void {}
+};
diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig
new file mode 100644
index 0000000000..ceb62f1e5d
--- /dev/null
+++ b/src/codegen/spirv/spec.zig
@@ -0,0 +1,1645 @@
+// Copyright (c) 2014-2020 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+//
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+const Version = @import("builtin").Version;
+pub const version = Version{ .major = 1, .minor = 5, .patch = 4 };
+pub const magic_number: u32 = 0x07230203;
+pub const Opcode = extern enum(u16) {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpExecutionModeId = 331,
+ OpDecorateId = 332,
+ OpGroupNonUniformElect = 333,
+ OpGroupNonUniformAll = 334,
+ OpGroupNonUniformAny = 335,
+ OpGroupNonUniformAllEqual = 336,
+ OpGroupNonUniformBroadcast = 337,
+ OpGroupNonUniformBroadcastFirst = 338,
+ OpGroupNonUniformBallot = 339,
+ OpGroupNonUniformInverseBallot = 340,
+ OpGroupNonUniformBallotBitExtract = 341,
+ OpGroupNonUniformBallotBitCount = 342,
+ OpGroupNonUniformBallotFindLSB = 343,
+ OpGroupNonUniformBallotFindMSB = 344,
+ OpGroupNonUniformShuffle = 345,
+ OpGroupNonUniformShuffleXor = 346,
+ OpGroupNonUniformShuffleUp = 347,
+ OpGroupNonUniformShuffleDown = 348,
+ OpGroupNonUniformIAdd = 349,
+ OpGroupNonUniformFAdd = 350,
+ OpGroupNonUniformIMul = 351,
+ OpGroupNonUniformFMul = 352,
+ OpGroupNonUniformSMin = 353,
+ OpGroupNonUniformUMin = 354,
+ OpGroupNonUniformFMin = 355,
+ OpGroupNonUniformSMax = 356,
+ OpGroupNonUniformUMax = 357,
+ OpGroupNonUniformFMax = 358,
+ OpGroupNonUniformBitwiseAnd = 359,
+ OpGroupNonUniformBitwiseOr = 360,
+ OpGroupNonUniformBitwiseXor = 361,
+ OpGroupNonUniformLogicalAnd = 362,
+ OpGroupNonUniformLogicalOr = 363,
+ OpGroupNonUniformLogicalXor = 364,
+ OpGroupNonUniformQuadBroadcast = 365,
+ OpGroupNonUniformQuadSwap = 366,
+ OpCopyLogical = 400,
+ OpPtrEqual = 401,
+ OpPtrNotEqual = 402,
+ OpPtrDiff = 403,
+ OpTerminateInvocation = 4416,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ OpTraceRayKHR = 4445,
+ OpExecuteCallableKHR = 4446,
+ OpConvertUToAccelerationStructureKHR = 4447,
+ OpIgnoreIntersectionKHR = 4448,
+ OpTerminateRayKHR = 4449,
+ OpTypeRayQueryKHR = 4472,
+ OpRayQueryInitializeKHR = 4473,
+ OpRayQueryTerminateKHR = 4474,
+ OpRayQueryGenerateIntersectionKHR = 4475,
+ OpRayQueryConfirmIntersectionKHR = 4476,
+ OpRayQueryProceedKHR = 4477,
+ OpRayQueryGetIntersectionTypeKHR = 4479,
+ OpGroupIAddNonUniformAMD = 5000,
+ OpGroupFAddNonUniformAMD = 5001,
+ OpGroupFMinNonUniformAMD = 5002,
+ OpGroupUMinNonUniformAMD = 5003,
+ OpGroupSMinNonUniformAMD = 5004,
+ OpGroupFMaxNonUniformAMD = 5005,
+ OpGroupUMaxNonUniformAMD = 5006,
+ OpGroupSMaxNonUniformAMD = 5007,
+ OpFragmentMaskFetchAMD = 5011,
+ OpFragmentFetchAMD = 5012,
+ OpReadClockKHR = 5056,
+ OpImageSampleFootprintNV = 5283,
+ OpGroupNonUniformPartitionNV = 5296,
+ OpWritePackedPrimitiveIndices4x8NV = 5299,
+ OpReportIntersectionNV = 5334,
+ OpReportIntersectionKHR = 5334,
+ OpIgnoreIntersectionNV = 5335,
+ OpTerminateRayNV = 5336,
+ OpTraceNV = 5337,
+ OpTypeAccelerationStructureNV = 5341,
+ OpTypeAccelerationStructureKHR = 5341,
+ OpExecuteCallableNV = 5344,
+ OpTypeCooperativeMatrixNV = 5358,
+ OpCooperativeMatrixLoadNV = 5359,
+ OpCooperativeMatrixStoreNV = 5360,
+ OpCooperativeMatrixMulAddNV = 5361,
+ OpCooperativeMatrixLengthNV = 5362,
+ OpBeginInvocationInterlockEXT = 5364,
+ OpEndInvocationInterlockEXT = 5365,
+ OpDemoteToHelperInvocationEXT = 5380,
+ OpIsHelperInvocationEXT = 5381,
+ OpSubgroupShuffleINTEL = 5571,
+ OpSubgroupShuffleDownINTEL = 5572,
+ OpSubgroupShuffleUpINTEL = 5573,
+ OpSubgroupShuffleXorINTEL = 5574,
+ OpSubgroupBlockReadINTEL = 5575,
+ OpSubgroupBlockWriteINTEL = 5576,
+ OpSubgroupImageBlockReadINTEL = 5577,
+ OpSubgroupImageBlockWriteINTEL = 5578,
+ OpSubgroupImageMediaBlockReadINTEL = 5580,
+ OpSubgroupImageMediaBlockWriteINTEL = 5581,
+ OpUCountLeadingZerosINTEL = 5585,
+ OpUCountTrailingZerosINTEL = 5586,
+ OpAbsISubINTEL = 5587,
+ OpAbsUSubINTEL = 5588,
+ OpIAddSatINTEL = 5589,
+ OpUAddSatINTEL = 5590,
+ OpIAverageINTEL = 5591,
+ OpUAverageINTEL = 5592,
+ OpIAverageRoundedINTEL = 5593,
+ OpUAverageRoundedINTEL = 5594,
+ OpISubSatINTEL = 5595,
+ OpUSubSatINTEL = 5596,
+ OpIMul32x16INTEL = 5597,
+ OpUMul32x16INTEL = 5598,
+ OpFunctionPointerINTEL = 5600,
+ OpFunctionPointerCallINTEL = 5601,
+ OpDecorateString = 5632,
+ OpDecorateStringGOOGLE = 5632,
+ OpMemberDecorateString = 5633,
+ OpMemberDecorateStringGOOGLE = 5633,
+ OpVmeImageINTEL = 5699,
+ OpTypeVmeImageINTEL = 5700,
+ OpTypeAvcImePayloadINTEL = 5701,
+ OpTypeAvcRefPayloadINTEL = 5702,
+ OpTypeAvcSicPayloadINTEL = 5703,
+ OpTypeAvcMcePayloadINTEL = 5704,
+ OpTypeAvcMceResultINTEL = 5705,
+ OpTypeAvcImeResultINTEL = 5706,
+ OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707,
+ OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708,
+ OpTypeAvcImeSingleReferenceStreaminINTEL = 5709,
+ OpTypeAvcImeDualReferenceStreaminINTEL = 5710,
+ OpTypeAvcRefResultINTEL = 5711,
+ OpTypeAvcSicResultINTEL = 5712,
+ OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713,
+ OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714,
+ OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715,
+ OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716,
+ OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717,
+ OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718,
+ OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719,
+ OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720,
+ OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721,
+ OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722,
+ OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723,
+ OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724,
+ OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725,
+ OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726,
+ OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727,
+ OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728,
+ OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729,
+ OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730,
+ OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731,
+ OpSubgroupAvcMceConvertToImePayloadINTEL = 5732,
+ OpSubgroupAvcMceConvertToImeResultINTEL = 5733,
+ OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734,
+ OpSubgroupAvcMceConvertToRefResultINTEL = 5735,
+ OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736,
+ OpSubgroupAvcMceConvertToSicResultINTEL = 5737,
+ OpSubgroupAvcMceGetMotionVectorsINTEL = 5738,
+ OpSubgroupAvcMceGetInterDistortionsINTEL = 5739,
+ OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740,
+ OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741,
+ OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742,
+ OpSubgroupAvcMceGetInterDirectionsINTEL = 5743,
+ OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744,
+ OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745,
+ OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746,
+ OpSubgroupAvcImeInitializeINTEL = 5747,
+ OpSubgroupAvcImeSetSingleReferenceINTEL = 5748,
+ OpSubgroupAvcImeSetDualReferenceINTEL = 5749,
+ OpSubgroupAvcImeRefWindowSizeINTEL = 5750,
+ OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751,
+ OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752,
+ OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753,
+ OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754,
+ OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755,
+ OpSubgroupAvcImeSetWeightedSadINTEL = 5756,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757,
+ OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764,
+ OpSubgroupAvcImeConvertToMceResultINTEL = 5765,
+ OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766,
+ OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767,
+ OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768,
+ OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775,
+ OpSubgroupAvcImeGetBorderReachedINTEL = 5776,
+ OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777,
+ OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778,
+ OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779,
+ OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780,
+ OpSubgroupAvcFmeInitializeINTEL = 5781,
+ OpSubgroupAvcBmeInitializeINTEL = 5782,
+ OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783,
+ OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784,
+ OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785,
+ OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786,
+ OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789,
+ OpSubgroupAvcRefConvertToMceResultINTEL = 5790,
+ OpSubgroupAvcSicInitializeINTEL = 5791,
+ OpSubgroupAvcSicConfigureSkcINTEL = 5792,
+ OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793,
+ OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794,
+ OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795,
+ OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796,
+ OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797,
+ OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798,
+ OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799,
+ OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800,
+ OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801,
+ OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802,
+ OpSubgroupAvcSicEvaluateIpeINTEL = 5803,
+ OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804,
+ OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807,
+ OpSubgroupAvcSicConvertToMceResultINTEL = 5808,
+ OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809,
+ OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810,
+ OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811,
+ OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812,
+ OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813,
+ OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,
+ OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,
+ OpSubgroupAvcSicGetInterRawSadsINTEL = 5816,
+ OpLoopControlINTEL = 5887,
+ OpReadPipeBlockingINTEL = 5946,
+ OpWritePipeBlockingINTEL = 5947,
+ OpFPGARegINTEL = 5949,
+ OpRayQueryGetRayTMinKHR = 6016,
+ OpRayQueryGetRayFlagsKHR = 6017,
+ OpRayQueryGetIntersectionTKHR = 6018,
+ OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019,
+ OpRayQueryGetIntersectionInstanceIdKHR = 6020,
+ OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021,
+ OpRayQueryGetIntersectionGeometryIndexKHR = 6022,
+ OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023,
+ OpRayQueryGetIntersectionBarycentricsKHR = 6024,
+ OpRayQueryGetIntersectionFrontFaceKHR = 6025,
+ OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026,
+ OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027,
+ OpRayQueryGetIntersectionObjectRayOriginKHR = 6028,
+ OpRayQueryGetWorldRayDirectionKHR = 6029,
+ OpRayQueryGetWorldRayOriginKHR = 6030,
+ OpRayQueryGetIntersectionObjectToWorldKHR = 6031,
+ OpRayQueryGetIntersectionWorldToObjectKHR = 6032,
+ OpAtomicFAddEXT = 6035,
+ _,
+};
+pub const ImageOperands = packed struct {
+ Bias: bool align(@alignOf(u32)) = false,
+ Lod: bool = false,
+ Grad: bool = false,
+ ConstOffset: bool = false,
+ Offset: bool = false,
+ ConstOffsets: bool = false,
+ Sample: bool = false,
+ MinLod: bool = false,
+ MakeTexelAvailable: bool = false,
+ MakeTexelVisible: bool = false,
+ NonPrivateTexel: bool = false,
+ VolatileTexel: bool = false,
+ SignExtend: bool = false,
+ ZeroExtend: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const FPFastMathMode = packed struct {
+ NotNaN: bool align(@alignOf(u32)) = false,
+ NotInf: bool = false,
+ NSZ: bool = false,
+ AllowRecip: bool = false,
+ Fast: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const SelectionControl = packed struct {
+ Flatten: bool align(@alignOf(u32)) = false,
+ DontFlatten: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const LoopControl = packed struct {
+ Unroll: bool align(@alignOf(u32)) = false,
+ DontUnroll: bool = false,
+ DependencyInfinite: bool = false,
+ DependencyLength: bool = false,
+ MinIterations: bool = false,
+ MaxIterations: bool = false,
+ IterationMultiple: bool = false,
+ PeelCount: bool = false,
+ PartialCount: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ InitiationIntervalINTEL: bool = false,
+ MaxConcurrencyINTEL: bool = false,
+ DependencyArrayINTEL: bool = false,
+ PipelineEnableINTEL: bool = false,
+ LoopCoalesceINTEL: bool = false,
+ MaxInterleavingINTEL: bool = false,
+ SpeculatedIterationsINTEL: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const FunctionControl = packed struct {
+ Inline: bool align(@alignOf(u32)) = false,
+ DontInline: bool = false,
+ Pure: bool = false,
+ Const: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const MemorySemantics = packed struct {
+ _reserved_bit_0: bool align(@alignOf(u32)) = false,
+ Acquire: bool = false,
+ Release: bool = false,
+ AcquireRelease: bool = false,
+ SequentiallyConsistent: bool = false,
+ _reserved_bit_5: bool = false,
+ UniformMemory: bool = false,
+ SubgroupMemory: bool = false,
+ WorkgroupMemory: bool = false,
+ CrossWorkgroupMemory: bool = false,
+ AtomicCounterMemory: bool = false,
+ ImageMemory: bool = false,
+ OutputMemory: bool = false,
+ MakeAvailable: bool = false,
+ MakeVisible: bool = false,
+ Volatile: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const MemoryAccess = packed struct {
+ Volatile: bool align(@alignOf(u32)) = false,
+ Aligned: bool = false,
+ Nontemporal: bool = false,
+ MakePointerAvailable: bool = false,
+ MakePointerVisible: bool = false,
+ NonPrivatePointer: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const KernelProfilingInfo = packed struct {
+ CmdExecTime: bool align(@alignOf(u32)) = false,
+ _reserved_bit_1: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const RayFlags = packed struct {
+ OpaqueKHR: bool align(@alignOf(u32)) = false,
+ NoOpaqueKHR: bool = false,
+ TerminateOnFirstHitKHR: bool = false,
+ SkipClosestHitShaderKHR: bool = false,
+ CullBackFacingTrianglesKHR: bool = false,
+ CullFrontFacingTrianglesKHR: bool = false,
+ CullOpaqueKHR: bool = false,
+ CullNoOpaqueKHR: bool = false,
+ SkipTrianglesKHR: bool = false,
+ SkipAABBsKHR: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const FragmentShadingRate = packed struct {
+ Vertical2Pixels: bool align(@alignOf(u32)) = false,
+ Vertical4Pixels: bool = false,
+ Horizontal2Pixels: bool = false,
+ Horizontal4Pixels: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const SourceLanguage = extern enum(u32) {
+ Unknown = 0,
+ ESSL = 1,
+ GLSL = 2,
+ OpenCL_C = 3,
+ OpenCL_CPP = 4,
+ HLSL = 5,
+ _,
+};
+pub const ExecutionModel = extern enum(u32) {
+ Vertex = 0,
+ TessellationControl = 1,
+ TessellationEvaluation = 2,
+ Geometry = 3,
+ Fragment = 4,
+ GLCompute = 5,
+ Kernel = 6,
+ TaskNV = 5267,
+ MeshNV = 5268,
+ RayGenerationNV = 5313,
+ RayGenerationKHR = 5313,
+ IntersectionNV = 5314,
+ IntersectionKHR = 5314,
+ AnyHitNV = 5315,
+ AnyHitKHR = 5315,
+ ClosestHitNV = 5316,
+ ClosestHitKHR = 5316,
+ MissNV = 5317,
+ MissKHR = 5317,
+ CallableNV = 5318,
+ CallableKHR = 5318,
+ _,
+};
+pub const AddressingModel = extern enum(u32) {
+ Logical = 0,
+ Physical32 = 1,
+ Physical64 = 2,
+ PhysicalStorageBuffer64 = 5348,
+ PhysicalStorageBuffer64EXT = 5348,
+ _,
+};
+pub const MemoryModel = extern enum(u32) {
+ Simple = 0,
+ GLSL450 = 1,
+ OpenCL = 2,
+ Vulkan = 3,
+ VulkanKHR = 3,
+ _,
+};
+pub const ExecutionMode = extern enum(u32) {
+ Invocations = 0,
+ SpacingEqual = 1,
+ SpacingFractionalEven = 2,
+ SpacingFractionalOdd = 3,
+ VertexOrderCw = 4,
+ VertexOrderCcw = 5,
+ PixelCenterInteger = 6,
+ OriginUpperLeft = 7,
+ OriginLowerLeft = 8,
+ EarlyFragmentTests = 9,
+ PointMode = 10,
+ Xfb = 11,
+ DepthReplacing = 12,
+ DepthGreater = 14,
+ DepthLess = 15,
+ DepthUnchanged = 16,
+ LocalSize = 17,
+ LocalSizeHint = 18,
+ InputPoints = 19,
+ InputLines = 20,
+ InputLinesAdjacency = 21,
+ Triangles = 22,
+ InputTrianglesAdjacency = 23,
+ Quads = 24,
+ Isolines = 25,
+ OutputVertices = 26,
+ OutputPoints = 27,
+ OutputLineStrip = 28,
+ OutputTriangleStrip = 29,
+ VecTypeHint = 30,
+ ContractionOff = 31,
+ Initializer = 33,
+ Finalizer = 34,
+ SubgroupSize = 35,
+ SubgroupsPerWorkgroup = 36,
+ SubgroupsPerWorkgroupId = 37,
+ LocalSizeId = 38,
+ LocalSizeHintId = 39,
+ PostDepthCoverage = 4446,
+ DenormPreserve = 4459,
+ DenormFlushToZero = 4460,
+ SignedZeroInfNanPreserve = 4461,
+ RoundingModeRTE = 4462,
+ RoundingModeRTZ = 4463,
+ StencilRefReplacingEXT = 5027,
+ OutputLinesNV = 5269,
+ OutputPrimitivesNV = 5270,
+ DerivativeGroupQuadsNV = 5289,
+ DerivativeGroupLinearNV = 5290,
+ OutputTrianglesNV = 5298,
+ PixelInterlockOrderedEXT = 5366,
+ PixelInterlockUnorderedEXT = 5367,
+ SampleInterlockOrderedEXT = 5368,
+ SampleInterlockUnorderedEXT = 5369,
+ ShadingRateInterlockOrderedEXT = 5370,
+ ShadingRateInterlockUnorderedEXT = 5371,
+ MaxWorkgroupSizeINTEL = 5893,
+ MaxWorkDimINTEL = 5894,
+ NoGlobalOffsetINTEL = 5895,
+ NumSIMDWorkitemsINTEL = 5896,
+ _,
+};
+pub const StorageClass = extern enum(u32) {
+ UniformConstant = 0,
+ Input = 1,
+ Uniform = 2,
+ Output = 3,
+ Workgroup = 4,
+ CrossWorkgroup = 5,
+ Private = 6,
+ Function = 7,
+ Generic = 8,
+ PushConstant = 9,
+ AtomicCounter = 10,
+ Image = 11,
+ StorageBuffer = 12,
+ CallableDataNV = 5328,
+ CallableDataKHR = 5328,
+ IncomingCallableDataNV = 5329,
+ IncomingCallableDataKHR = 5329,
+ RayPayloadNV = 5338,
+ RayPayloadKHR = 5338,
+ HitAttributeNV = 5339,
+ HitAttributeKHR = 5339,
+ IncomingRayPayloadNV = 5342,
+ IncomingRayPayloadKHR = 5342,
+ ShaderRecordBufferNV = 5343,
+ ShaderRecordBufferKHR = 5343,
+ PhysicalStorageBuffer = 5349,
+ PhysicalStorageBufferEXT = 5349,
+ CodeSectionINTEL = 5605,
+ _,
+};
+pub const Dim = extern enum(u32) {
+ @"1D" = 0,
+ @"2D" = 1,
+ @"3D" = 2,
+ Cube = 3,
+ Rect = 4,
+ Buffer = 5,
+ SubpassData = 6,
+ _,
+};
+pub const SamplerAddressingMode = extern enum(u32) {
+ None = 0,
+ ClampToEdge = 1,
+ Clamp = 2,
+ Repeat = 3,
+ RepeatMirrored = 4,
+ _,
+};
+pub const SamplerFilterMode = extern enum(u32) {
+ Nearest = 0,
+ Linear = 1,
+ _,
+};
+pub const ImageFormat = extern enum(u32) {
+ Unknown = 0,
+ Rgba32f = 1,
+ Rgba16f = 2,
+ R32f = 3,
+ Rgba8 = 4,
+ Rgba8Snorm = 5,
+ Rg32f = 6,
+ Rg16f = 7,
+ R11fG11fB10f = 8,
+ R16f = 9,
+ Rgba16 = 10,
+ Rgb10A2 = 11,
+ Rg16 = 12,
+ Rg8 = 13,
+ R16 = 14,
+ R8 = 15,
+ Rgba16Snorm = 16,
+ Rg16Snorm = 17,
+ Rg8Snorm = 18,
+ R16Snorm = 19,
+ R8Snorm = 20,
+ Rgba32i = 21,
+ Rgba16i = 22,
+ Rgba8i = 23,
+ R32i = 24,
+ Rg32i = 25,
+ Rg16i = 26,
+ Rg8i = 27,
+ R16i = 28,
+ R8i = 29,
+ Rgba32ui = 30,
+ Rgba16ui = 31,
+ Rgba8ui = 32,
+ R32ui = 33,
+ Rgb10a2ui = 34,
+ Rg32ui = 35,
+ Rg16ui = 36,
+ Rg8ui = 37,
+ R16ui = 38,
+ R8ui = 39,
+ R64ui = 40,
+ R64i = 41,
+ _,
+};
+pub const ImageChannelOrder = extern enum(u32) {
+ R = 0,
+ A = 1,
+ RG = 2,
+ RA = 3,
+ RGB = 4,
+ RGBA = 5,
+ BGRA = 6,
+ ARGB = 7,
+ Intensity = 8,
+ Luminance = 9,
+ Rx = 10,
+ RGx = 11,
+ RGBx = 12,
+ Depth = 13,
+ DepthStencil = 14,
+ sRGB = 15,
+ sRGBx = 16,
+ sRGBA = 17,
+ sBGRA = 18,
+ ABGR = 19,
+ _,
+};
+pub const ImageChannelDataType = extern enum(u32) {
+ SnormInt8 = 0,
+ SnormInt16 = 1,
+ UnormInt8 = 2,
+ UnormInt16 = 3,
+ UnormShort565 = 4,
+ UnormShort555 = 5,
+ UnormInt101010 = 6,
+ SignedInt8 = 7,
+ SignedInt16 = 8,
+ SignedInt32 = 9,
+ UnsignedInt8 = 10,
+ UnsignedInt16 = 11,
+ UnsignedInt32 = 12,
+ HalfFloat = 13,
+ Float = 14,
+ UnormInt24 = 15,
+ UnormInt101010_2 = 16,
+ _,
+};
+pub const FPRoundingMode = extern enum(u32) {
+ RTE = 0,
+ RTZ = 1,
+ RTP = 2,
+ RTN = 3,
+ _,
+};
+pub const LinkageType = extern enum(u32) {
+ Export = 0,
+ Import = 1,
+ _,
+};
+pub const AccessQualifier = extern enum(u32) {
+ ReadOnly = 0,
+ WriteOnly = 1,
+ ReadWrite = 2,
+ _,
+};
+pub const FunctionParameterAttribute = extern enum(u32) {
+ Zext = 0,
+ Sext = 1,
+ ByVal = 2,
+ Sret = 3,
+ NoAlias = 4,
+ NoCapture = 5,
+ NoWrite = 6,
+ NoReadWrite = 7,
+ _,
+};
+pub const Decoration = extern enum(u32) {
+ RelaxedPrecision = 0,
+ SpecId = 1,
+ Block = 2,
+ BufferBlock = 3,
+ RowMajor = 4,
+ ColMajor = 5,
+ ArrayStride = 6,
+ MatrixStride = 7,
+ GLSLShared = 8,
+ GLSLPacked = 9,
+ CPacked = 10,
+ BuiltIn = 11,
+ NoPerspective = 13,
+ Flat = 14,
+ Patch = 15,
+ Centroid = 16,
+ Sample = 17,
+ Invariant = 18,
+ Restrict = 19,
+ Aliased = 20,
+ Volatile = 21,
+ Constant = 22,
+ Coherent = 23,
+ NonWritable = 24,
+ NonReadable = 25,
+ Uniform = 26,
+ UniformId = 27,
+ SaturatedConversion = 28,
+ Stream = 29,
+ Location = 30,
+ Component = 31,
+ Index = 32,
+ Binding = 33,
+ DescriptorSet = 34,
+ Offset = 35,
+ XfbBuffer = 36,
+ XfbStride = 37,
+ FuncParamAttr = 38,
+ FPRoundingMode = 39,
+ FPFastMathMode = 40,
+ LinkageAttributes = 41,
+ NoContraction = 42,
+ InputAttachmentIndex = 43,
+ Alignment = 44,
+ MaxByteOffset = 45,
+ AlignmentId = 46,
+ MaxByteOffsetId = 47,
+ NoSignedWrap = 4469,
+ NoUnsignedWrap = 4470,
+ ExplicitInterpAMD = 4999,
+ OverrideCoverageNV = 5248,
+ PassthroughNV = 5250,
+ ViewportRelativeNV = 5252,
+ SecondaryViewportRelativeNV = 5256,
+ PerPrimitiveNV = 5271,
+ PerViewNV = 5272,
+ PerTaskNV = 5273,
+ PerVertexNV = 5285,
+ NonUniform = 5300,
+ NonUniformEXT = 5300,
+ RestrictPointer = 5355,
+ RestrictPointerEXT = 5355,
+ AliasedPointer = 5356,
+ AliasedPointerEXT = 5356,
+ ReferencedIndirectlyINTEL = 5602,
+ CounterBuffer = 5634,
+ HlslCounterBufferGOOGLE = 5634,
+ UserSemantic = 5635,
+ HlslSemanticGOOGLE = 5635,
+ UserTypeGOOGLE = 5636,
+ RegisterINTEL = 5825,
+ MemoryINTEL = 5826,
+ NumbanksINTEL = 5827,
+ BankwidthINTEL = 5828,
+ MaxPrivateCopiesINTEL = 5829,
+ SinglepumpINTEL = 5830,
+ DoublepumpINTEL = 5831,
+ MaxReplicatesINTEL = 5832,
+ SimpleDualPortINTEL = 5833,
+ MergeINTEL = 5834,
+ BankBitsINTEL = 5835,
+ ForcePow2DepthINTEL = 5836,
+ _,
+};
+pub const BuiltIn = extern enum(u32) {
+ Position = 0,
+ PointSize = 1,
+ ClipDistance = 3,
+ CullDistance = 4,
+ VertexId = 5,
+ InstanceId = 6,
+ PrimitiveId = 7,
+ InvocationId = 8,
+ Layer = 9,
+ ViewportIndex = 10,
+ TessLevelOuter = 11,
+ TessLevelInner = 12,
+ TessCoord = 13,
+ PatchVertices = 14,
+ FragCoord = 15,
+ PointCoord = 16,
+ FrontFacing = 17,
+ SampleId = 18,
+ SamplePosition = 19,
+ SampleMask = 20,
+ FragDepth = 22,
+ HelperInvocation = 23,
+ NumWorkgroups = 24,
+ WorkgroupSize = 25,
+ WorkgroupId = 26,
+ LocalInvocationId = 27,
+ GlobalInvocationId = 28,
+ LocalInvocationIndex = 29,
+ WorkDim = 30,
+ GlobalSize = 31,
+ EnqueuedWorkgroupSize = 32,
+ GlobalOffset = 33,
+ GlobalLinearId = 34,
+ SubgroupSize = 36,
+ SubgroupMaxSize = 37,
+ NumSubgroups = 38,
+ NumEnqueuedSubgroups = 39,
+ SubgroupId = 40,
+ SubgroupLocalInvocationId = 41,
+ VertexIndex = 42,
+ InstanceIndex = 43,
+ SubgroupEqMask = 4416,
+ SubgroupGeMask = 4417,
+ SubgroupGtMask = 4418,
+ SubgroupLeMask = 4419,
+ SubgroupLtMask = 4420,
+ SubgroupEqMaskKHR = 4416,
+ SubgroupGeMaskKHR = 4417,
+ SubgroupGtMaskKHR = 4418,
+ SubgroupLeMaskKHR = 4419,
+ SubgroupLtMaskKHR = 4420,
+ BaseVertex = 4424,
+ BaseInstance = 4425,
+ DrawIndex = 4426,
+ PrimitiveShadingRateKHR = 4432,
+ DeviceIndex = 4438,
+ ViewIndex = 4440,
+ ShadingRateKHR = 4444,
+ BaryCoordNoPerspAMD = 4992,
+ BaryCoordNoPerspCentroidAMD = 4993,
+ BaryCoordNoPerspSampleAMD = 4994,
+ BaryCoordSmoothAMD = 4995,
+ BaryCoordSmoothCentroidAMD = 4996,
+ BaryCoordSmoothSampleAMD = 4997,
+ BaryCoordPullModelAMD = 4998,
+ FragStencilRefEXT = 5014,
+ ViewportMaskNV = 5253,
+ SecondaryPositionNV = 5257,
+ SecondaryViewportMaskNV = 5258,
+ PositionPerViewNV = 5261,
+ ViewportMaskPerViewNV = 5262,
+ FullyCoveredEXT = 5264,
+ TaskCountNV = 5274,
+ PrimitiveCountNV = 5275,
+ PrimitiveIndicesNV = 5276,
+ ClipDistancePerViewNV = 5277,
+ CullDistancePerViewNV = 5278,
+ LayerPerViewNV = 5279,
+ MeshViewCountNV = 5280,
+ MeshViewIndicesNV = 5281,
+ BaryCoordNV = 5286,
+ BaryCoordNoPerspNV = 5287,
+ FragSizeEXT = 5292,
+ FragmentSizeNV = 5292,
+ FragInvocationCountEXT = 5293,
+ InvocationsPerPixelNV = 5293,
+ LaunchIdNV = 5319,
+ LaunchIdKHR = 5319,
+ LaunchSizeNV = 5320,
+ LaunchSizeKHR = 5320,
+ WorldRayOriginNV = 5321,
+ WorldRayOriginKHR = 5321,
+ WorldRayDirectionNV = 5322,
+ WorldRayDirectionKHR = 5322,
+ ObjectRayOriginNV = 5323,
+ ObjectRayOriginKHR = 5323,
+ ObjectRayDirectionNV = 5324,
+ ObjectRayDirectionKHR = 5324,
+ RayTminNV = 5325,
+ RayTminKHR = 5325,
+ RayTmaxNV = 5326,
+ RayTmaxKHR = 5326,
+ InstanceCustomIndexNV = 5327,
+ InstanceCustomIndexKHR = 5327,
+ ObjectToWorldNV = 5330,
+ ObjectToWorldKHR = 5330,
+ WorldToObjectNV = 5331,
+ WorldToObjectKHR = 5331,
+ HitTNV = 5332,
+ HitKindNV = 5333,
+ HitKindKHR = 5333,
+ IncomingRayFlagsNV = 5351,
+ IncomingRayFlagsKHR = 5351,
+ RayGeometryIndexKHR = 5352,
+ WarpsPerSMNV = 5374,
+ SMCountNV = 5375,
+ WarpIDNV = 5376,
+ SMIDNV = 5377,
+ _,
+};
+pub const Scope = extern enum(u32) {
+ CrossDevice = 0,
+ Device = 1,
+ Workgroup = 2,
+ Subgroup = 3,
+ Invocation = 4,
+ QueueFamily = 5,
+ QueueFamilyKHR = 5,
+ ShaderCallKHR = 6,
+ _,
+};
+pub const GroupOperation = extern enum(u32) {
+ Reduce = 0,
+ InclusiveScan = 1,
+ ExclusiveScan = 2,
+ ClusteredReduce = 3,
+ PartitionedReduceNV = 6,
+ PartitionedInclusiveScanNV = 7,
+ PartitionedExclusiveScanNV = 8,
+ _,
+};
+pub const KernelEnqueueFlags = extern enum(u32) {
+ NoWait = 0,
+ WaitKernel = 1,
+ WaitWorkGroup = 2,
+ _,
+};
+pub const Capability = extern enum(u32) {
+ Matrix = 0,
+ Shader = 1,
+ Geometry = 2,
+ Tessellation = 3,
+ Addresses = 4,
+ Linkage = 5,
+ Kernel = 6,
+ Vector16 = 7,
+ Float16Buffer = 8,
+ Float16 = 9,
+ Float64 = 10,
+ Int64 = 11,
+ Int64Atomics = 12,
+ ImageBasic = 13,
+ ImageReadWrite = 14,
+ ImageMipmap = 15,
+ Pipes = 17,
+ Groups = 18,
+ DeviceEnqueue = 19,
+ LiteralSampler = 20,
+ AtomicStorage = 21,
+ Int16 = 22,
+ TessellationPointSize = 23,
+ GeometryPointSize = 24,
+ ImageGatherExtended = 25,
+ StorageImageMultisample = 27,
+ UniformBufferArrayDynamicIndexing = 28,
+ SampledImageArrayDynamicIndexing = 29,
+ StorageBufferArrayDynamicIndexing = 30,
+ StorageImageArrayDynamicIndexing = 31,
+ ClipDistance = 32,
+ CullDistance = 33,
+ ImageCubeArray = 34,
+ SampleRateShading = 35,
+ ImageRect = 36,
+ SampledRect = 37,
+ GenericPointer = 38,
+ Int8 = 39,
+ InputAttachment = 40,
+ SparseResidency = 41,
+ MinLod = 42,
+ Sampled1D = 43,
+ Image1D = 44,
+ SampledCubeArray = 45,
+ SampledBuffer = 46,
+ ImageBuffer = 47,
+ ImageMSArray = 48,
+ StorageImageExtendedFormats = 49,
+ ImageQuery = 50,
+ DerivativeControl = 51,
+ InterpolationFunction = 52,
+ TransformFeedback = 53,
+ GeometryStreams = 54,
+ StorageImageReadWithoutFormat = 55,
+ StorageImageWriteWithoutFormat = 56,
+ MultiViewport = 57,
+ SubgroupDispatch = 58,
+ NamedBarrier = 59,
+ PipeStorage = 60,
+ GroupNonUniform = 61,
+ GroupNonUniformVote = 62,
+ GroupNonUniformArithmetic = 63,
+ GroupNonUniformBallot = 64,
+ GroupNonUniformShuffle = 65,
+ GroupNonUniformShuffleRelative = 66,
+ GroupNonUniformClustered = 67,
+ GroupNonUniformQuad = 68,
+ ShaderLayer = 69,
+ ShaderViewportIndex = 70,
+ FragmentShadingRateKHR = 4422,
+ SubgroupBallotKHR = 4423,
+ DrawParameters = 4427,
+ SubgroupVoteKHR = 4431,
+ StorageBuffer16BitAccess = 4433,
+ StorageUniformBufferBlock16 = 4433,
+ UniformAndStorageBuffer16BitAccess = 4434,
+ StorageUniform16 = 4434,
+ StoragePushConstant16 = 4435,
+ StorageInputOutput16 = 4436,
+ DeviceGroup = 4437,
+ MultiView = 4439,
+ VariablePointersStorageBuffer = 4441,
+ VariablePointers = 4442,
+ AtomicStorageOps = 4445,
+ SampleMaskPostDepthCoverage = 4447,
+ StorageBuffer8BitAccess = 4448,
+ UniformAndStorageBuffer8BitAccess = 4449,
+ StoragePushConstant8 = 4450,
+ DenormPreserve = 4464,
+ DenormFlushToZero = 4465,
+ SignedZeroInfNanPreserve = 4466,
+ RoundingModeRTE = 4467,
+ RoundingModeRTZ = 4468,
+ RayQueryProvisionalKHR = 4471,
+ RayQueryKHR = 4472,
+ RayTraversalPrimitiveCullingKHR = 4478,
+ RayTracingKHR = 4479,
+ Float16ImageAMD = 5008,
+ ImageGatherBiasLodAMD = 5009,
+ FragmentMaskAMD = 5010,
+ StencilExportEXT = 5013,
+ ImageReadWriteLodAMD = 5015,
+ Int64ImageEXT = 5016,
+ ShaderClockKHR = 5055,
+ SampleMaskOverrideCoverageNV = 5249,
+ GeometryShaderPassthroughNV = 5251,
+ ShaderViewportIndexLayerEXT = 5254,
+ ShaderViewportIndexLayerNV = 5254,
+ ShaderViewportMaskNV = 5255,
+ ShaderStereoViewNV = 5259,
+ PerViewAttributesNV = 5260,
+ FragmentFullyCoveredEXT = 5265,
+ MeshShadingNV = 5266,
+ ImageFootprintNV = 5282,
+ FragmentBarycentricNV = 5284,
+ ComputeDerivativeGroupQuadsNV = 5288,
+ FragmentDensityEXT = 5291,
+ ShadingRateNV = 5291,
+ GroupNonUniformPartitionedNV = 5297,
+ ShaderNonUniform = 5301,
+ ShaderNonUniformEXT = 5301,
+ RuntimeDescriptorArray = 5302,
+ RuntimeDescriptorArrayEXT = 5302,
+ InputAttachmentArrayDynamicIndexing = 5303,
+ InputAttachmentArrayDynamicIndexingEXT = 5303,
+ UniformTexelBufferArrayDynamicIndexing = 5304,
+ UniformTexelBufferArrayDynamicIndexingEXT = 5304,
+ StorageTexelBufferArrayDynamicIndexing = 5305,
+ StorageTexelBufferArrayDynamicIndexingEXT = 5305,
+ UniformBufferArrayNonUniformIndexing = 5306,
+ UniformBufferArrayNonUniformIndexingEXT = 5306,
+ SampledImageArrayNonUniformIndexing = 5307,
+ SampledImageArrayNonUniformIndexingEXT = 5307,
+ StorageBufferArrayNonUniformIndexing = 5308,
+ StorageBufferArrayNonUniformIndexingEXT = 5308,
+ StorageImageArrayNonUniformIndexing = 5309,
+ StorageImageArrayNonUniformIndexingEXT = 5309,
+ InputAttachmentArrayNonUniformIndexing = 5310,
+ InputAttachmentArrayNonUniformIndexingEXT = 5310,
+ UniformTexelBufferArrayNonUniformIndexing = 5311,
+ UniformTexelBufferArrayNonUniformIndexingEXT = 5311,
+ StorageTexelBufferArrayNonUniformIndexing = 5312,
+ StorageTexelBufferArrayNonUniformIndexingEXT = 5312,
+ RayTracingNV = 5340,
+ VulkanMemoryModel = 5345,
+ VulkanMemoryModelKHR = 5345,
+ VulkanMemoryModelDeviceScope = 5346,
+ VulkanMemoryModelDeviceScopeKHR = 5346,
+ PhysicalStorageBufferAddresses = 5347,
+ PhysicalStorageBufferAddressesEXT = 5347,
+ ComputeDerivativeGroupLinearNV = 5350,
+ RayTracingProvisionalKHR = 5353,
+ CooperativeMatrixNV = 5357,
+ FragmentShaderSampleInterlockEXT = 5363,
+ FragmentShaderShadingRateInterlockEXT = 5372,
+ ShaderSMBuiltinsNV = 5373,
+ FragmentShaderPixelInterlockEXT = 5378,
+ DemoteToHelperInvocationEXT = 5379,
+ SubgroupShuffleINTEL = 5568,
+ SubgroupBufferBlockIOINTEL = 5569,
+ SubgroupImageBlockIOINTEL = 5570,
+ SubgroupImageMediaBlockIOINTEL = 5579,
+ IntegerFunctions2INTEL = 5584,
+ FunctionPointersINTEL = 5603,
+ IndirectReferencesINTEL = 5604,
+ SubgroupAvcMotionEstimationINTEL = 5696,
+ SubgroupAvcMotionEstimationIntraINTEL = 5697,
+ SubgroupAvcMotionEstimationChromaINTEL = 5698,
+ FPGAMemoryAttributesINTEL = 5824,
+ UnstructuredLoopControlsINTEL = 5886,
+ FPGALoopControlsINTEL = 5888,
+ KernelAttributesINTEL = 5892,
+ FPGAKernelAttributesINTEL = 5897,
+ BlockingPipesINTEL = 5945,
+ FPGARegINTEL = 5948,
+ AtomicFloat32AddEXT = 6033,
+ AtomicFloat64AddEXT = 6034,
+ _,
+};
+pub const RayQueryIntersection = extern enum(u32) {
+ RayQueryCandidateIntersectionKHR = 0,
+ RayQueryCommittedIntersectionKHR = 1,
+ _,
+};
+pub const RayQueryCommittedIntersectionType = extern enum(u32) {
+ RayQueryCommittedIntersectionNoneKHR = 0,
+ RayQueryCommittedIntersectionTriangleKHR = 1,
+ RayQueryCommittedIntersectionGeneratedKHR = 2,
+ _,
+};
+pub const RayQueryCandidateIntersectionType = extern enum(u32) {
+ RayQueryCandidateIntersectionTriangleKHR = 0,
+ RayQueryCandidateIntersectionAABBKHR = 1,
+ _,
+};
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index 7297ea1d54..34e0b2f9b5 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -4,139 +4,523 @@ const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const leb = std.leb;
const mem = std.mem;
+const wasm = std.wasm;
const Module = @import("../Module.zig");
const Decl = Module.Decl;
-const Inst = @import("../ir.zig").Inst;
+const ir = @import("../ir.zig");
+const Inst = ir.Inst;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
+const Compilation = @import("../Compilation.zig");
+const AnyMCValue = @import("../codegen.zig").AnyMCValue;
-fn genValtype(ty: Type) u8 {
- return switch (ty.tag()) {
- .u32, .i32 => 0x7F,
- .u64, .i64 => 0x7E,
- .f32 => 0x7D,
- .f64 => 0x7C,
- else => @panic("TODO: Implement more types for wasm."),
- };
-}
-
-pub fn genFunctype(buf: *ArrayList(u8), decl: *Decl) !void {
- const ty = decl.typed_value.most_recent.typed_value.ty;
- const writer = buf.writer();
-
- // functype magic
- try writer.writeByte(0x60);
-
- // param types
- try leb.writeULEB128(writer, @intCast(u32, ty.fnParamLen()));
- if (ty.fnParamLen() != 0) {
- const params = try buf.allocator.alloc(Type, ty.fnParamLen());
- defer buf.allocator.free(params);
- ty.fnParamTypes(params);
- for (params) |param_type| try writer.writeByte(genValtype(param_type));
- }
-
- // return type
- const return_type = ty.fnReturnType();
- switch (return_type.tag()) {
- .void, .noreturn => try leb.writeULEB128(writer, @as(u32, 0)),
- else => {
- try leb.writeULEB128(writer, @as(u32, 1));
- try writer.writeByte(genValtype(return_type));
- },
- }
-}
-
-pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
- assert(buf.items.len == 0);
- const writer = buf.writer();
-
- // Reserve space to write the size after generating the code
- try buf.resize(5);
-
- // Write the size of the locals vec
- // TODO: implement locals
- try leb.writeULEB128(writer, @as(u32, 0));
-
- // Write instructions
- // TODO: check for and handle death of instructions
- const tv = decl.typed_value.most_recent.typed_value;
- const mod_fn = tv.val.cast(Value.Payload.Function).?.func;
- for (mod_fn.analysis.success.instructions) |inst| try genInst(buf, decl, inst);
-
- // Write 'end' opcode
- try writer.writeByte(0x0B);
-
- // Fill in the size of the generated code to the reserved space at the
- // beginning of the buffer.
- const size = buf.items.len - 5 + decl.fn_link.wasm.?.idx_refs.items.len * 5;
- leb.writeUnsignedFixed(5, buf.items[0..5], @intCast(u32, size));
-}
-
-fn genInst(buf: *ArrayList(u8), decl: *Decl, inst: *Inst) !void {
- return switch (inst.tag) {
- .call => genCall(buf, decl, inst.castTag(.call).?),
- .constant => genConstant(buf, decl, inst.castTag(.constant).?),
- .dbg_stmt => {},
- .ret => genRet(buf, decl, inst.castTag(.ret).?),
- .retvoid => {},
- else => error.TODOImplementMoreWasmCodegen,
+/// Wasm Value, created when generating an instruction
+const WValue = union(enum) {
+ none: void,
+ /// Index of the local variable
+ local: u32,
+ /// Instruction holding a constant `Value`
+ constant: *Inst,
+ /// Offset position in the list of bytecode instructions
+ code_offset: usize,
+ /// The label of the block, used by breaks to find its relative distance
+ block_idx: u32,
+};
+
+/// Hashmap to store generated `WValue` for each `Inst`
+pub const ValueTable = std.AutoHashMapUnmanaged(*Inst, WValue);
+
+/// Code represents the `Code` section of wasm that
+/// belongs to a function
+pub const Context = struct {
+ /// Reference to the function declaration the code
+ /// section belongs to
+ decl: *Decl,
+ gpa: *mem.Allocator,
+ /// Table to save `WValue`'s generated by an `Inst`
+ values: ValueTable,
+ /// `bytes` contains the wasm bytecode belonging to the 'code' section.
+ code: ArrayList(u8),
+ /// Contains the generated function type bytecode for the current function
+ /// found in `decl`
+ func_type_data: ArrayList(u8),
+ /// The index the next local generated will have
+ /// NOTE: arguments share the index with locals therefore the first variable
+ /// will have the index that comes after the last argument's index
+ local_index: u32 = 0,
+ /// If codegen fails, an error messages will be allocated and saved in `err_msg`
+ err_msg: *Module.ErrorMsg,
+ /// Current block depth. Used to calculate the relative difference between a break
+ /// and block
+ block_depth: u32 = 0,
+ /// List of all locals' types generated throughout this declaration
+ /// used to emit locals count at start of 'code' section.
+ locals: std.ArrayListUnmanaged(u8),
+
+ const InnerError = error{
+ OutOfMemory,
+ CodegenFail,
};
-}
-
-fn genConstant(buf: *ArrayList(u8), decl: *Decl, inst: *Inst.Constant) !void {
- const writer = buf.writer();
- switch (inst.base.ty.tag()) {
- .u32 => {
- try writer.writeByte(0x41); // i32.const
- try leb.writeILEB128(writer, inst.val.toUnsignedInt());
- },
- .i32 => {
- try writer.writeByte(0x41); // i32.const
- try leb.writeILEB128(writer, inst.val.toSignedInt());
- },
- .u64 => {
- try writer.writeByte(0x42); // i64.const
- try leb.writeILEB128(writer, inst.val.toUnsignedInt());
- },
- .i64 => {
- try writer.writeByte(0x42); // i64.const
- try leb.writeILEB128(writer, inst.val.toSignedInt());
- },
- .f32 => {
- try writer.writeByte(0x43); // f32.const
- // TODO: enforce LE byte order
- try writer.writeAll(mem.asBytes(&inst.val.toFloat(f32)));
- },
- .f64 => {
- try writer.writeByte(0x44); // f64.const
- // TODO: enforce LE byte order
- try writer.writeAll(mem.asBytes(&inst.val.toFloat(f64)));
- },
- .void => {},
- else => return error.TODOImplementMoreWasmCodegen,
- }
-}
-
-fn genRet(buf: *ArrayList(u8), decl: *Decl, inst: *Inst.UnOp) !void {
- try genInst(buf, decl, inst.operand);
-}
-
-fn genCall(buf: *ArrayList(u8), decl: *Decl, inst: *Inst.Call) !void {
- const func_inst = inst.func.castTag(.constant).?;
- const func_val = func_inst.val.cast(Value.Payload.Function).?;
- const target = func_val.func.owner_decl;
- const target_ty = target.typed_value.most_recent.typed_value.ty;
-
- if (inst.args.len != 0) return error.TODOImplementMoreWasmCodegen;
-
- try buf.append(0x10); // call
-
- // The function index immediate argument will be filled in using this data
- // in link.Wasm.flush().
- try decl.fn_link.wasm.?.idx_refs.append(buf.allocator, .{
- .offset = @intCast(u32, buf.items.len),
- .decl = target,
- });
-}
+
+ pub fn deinit(self: *Context) void {
+ self.values.deinit(self.gpa);
+ self.locals.deinit(self.gpa);
+ self.* = undefined;
+ }
+
+ /// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig
+ fn fail(self: *Context, src: usize, comptime fmt: []const u8, args: anytype) InnerError {
+ self.err_msg = try Module.ErrorMsg.create(self.gpa, .{
+ .file_scope = self.decl.getFileScope(),
+ .byte_offset = src,
+ }, fmt, args);
+ return error.CodegenFail;
+ }
+
+ /// Resolves the `WValue` for the given instruction `inst`
+ /// When the given instruction has a `Value`, it returns a constant instead
+ fn resolveInst(self: Context, inst: *Inst) WValue {
+ if (!inst.ty.hasCodeGenBits()) return .none;
+
+ if (inst.value()) |_| {
+ return WValue{ .constant = inst };
+ }
+
+ return self.values.get(inst).?; // Instruction does not dominate all uses!
+ }
+
+ /// Using a given `Type`, returns the corresponding wasm value type
+ fn genValtype(self: *Context, src: usize, ty: Type) InnerError!u8 {
+ return switch (ty.tag()) {
+ .f32 => wasm.valtype(.f32),
+ .f64 => wasm.valtype(.f64),
+ .u32, .i32 => wasm.valtype(.i32),
+ .u64, .i64 => wasm.valtype(.i64),
+ else => self.fail(src, "TODO - Wasm genValtype for type '{s}'", .{ty.tag()}),
+ };
+ }
+
+ /// Using a given `Type`, returns the corresponding wasm value type
+ /// Differently from `genValtype` this also allows `void` to create a block
+ /// with no return type
+ fn genBlockType(self: *Context, src: usize, ty: Type) InnerError!u8 {
+ return switch (ty.tag()) {
+ .void, .noreturn => wasm.block_empty,
+ else => self.genValtype(src, ty),
+ };
+ }
+
+ /// Writes the bytecode depending on the given `WValue` in `val`
+ fn emitWValue(self: *Context, val: WValue) InnerError!void {
+ const writer = self.code.writer();
+ switch (val) {
+ .block_idx => unreachable,
+ .none, .code_offset => {},
+ .local => |idx| {
+ try writer.writeByte(wasm.opcode(.local_get));
+ try leb.writeULEB128(writer, idx);
+ },
+ .constant => |inst| try self.emitConstant(inst.castTag(.constant).?), // creates a new constant onto the stack
+ }
+ }
+
+ fn genFunctype(self: *Context) InnerError!void {
+ const ty = self.decl.typed_value.most_recent.typed_value.ty;
+ const writer = self.func_type_data.writer();
+
+ try writer.writeByte(wasm.function_type);
+
+ // param types
+ try leb.writeULEB128(writer, @intCast(u32, ty.fnParamLen()));
+ if (ty.fnParamLen() != 0) {
+ const params = try self.gpa.alloc(Type, ty.fnParamLen());
+ defer self.gpa.free(params);
+ ty.fnParamTypes(params);
+ for (params) |param_type| {
+ // Can we maybe get the source index of each param?
+ const val_type = try self.genValtype(self.decl.src(), param_type);
+ try writer.writeByte(val_type);
+ }
+ }
+
+ // return type
+ const return_type = ty.fnReturnType();
+ switch (return_type.tag()) {
+ .void, .noreturn => try leb.writeULEB128(writer, @as(u32, 0)),
+ else => |ret_type| {
+ try leb.writeULEB128(writer, @as(u32, 1));
+ // Can we maybe get the source index of the return type?
+ const val_type = try self.genValtype(self.decl.src(), return_type);
+ try writer.writeByte(val_type);
+ },
+ }
+ }
+
+ /// Generates the wasm bytecode for the function declaration belonging to `Context`
+ pub fn gen(self: *Context) InnerError!void {
+ assert(self.code.items.len == 0);
+ try self.genFunctype();
+
+ // Write instructions
+ // TODO: check for and handle death of instructions
+ const tv = self.decl.typed_value.most_recent.typed_value;
+ const mod_fn = blk: {
+ if (tv.val.castTag(.function)) |func| break :blk func.data;
+ if (tv.val.castTag(.extern_fn)) |ext_fn| return; // don't need codegen for extern functions
+ return self.fail(self.decl.src(), "TODO: Wasm codegen for decl type '{s}'", .{tv.ty.tag()});
+ };
+
+ // Reserve space to write the size after generating the code as well as space for locals count
+ try self.code.resize(10);
+
+ try self.genBody(mod_fn.body);
+
+ // finally, write our local types at the 'offset' position
+ {
+ leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len));
+
+ // offset into 'code' section where we will put our locals types
+ var local_offset: usize = 10;
+
+ // emit the actual locals amount
+ for (self.locals.items) |local| {
+ var buf: [6]u8 = undefined;
+ leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1));
+ buf[5] = local;
+ try self.code.insertSlice(local_offset, &buf);
+ local_offset += 6;
+ }
+ }
+
+ const writer = self.code.writer();
+ try writer.writeByte(wasm.opcode(.end));
+
+ // Fill in the size of the generated code to the reserved space at the
+ // beginning of the buffer.
+ const size = self.code.items.len - 5 + self.decl.fn_link.wasm.?.idx_refs.items.len * 5;
+ leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size));
+ }
+
+ fn genInst(self: *Context, inst: *Inst) InnerError!WValue {
+ return switch (inst.tag) {
+ .add => self.genAdd(inst.castTag(.add).?),
+ .alloc => self.genAlloc(inst.castTag(.alloc).?),
+ .arg => self.genArg(inst.castTag(.arg).?),
+ .block => self.genBlock(inst.castTag(.block).?),
+ .br => self.genBr(inst.castTag(.br).?),
+ .call => self.genCall(inst.castTag(.call).?),
+ .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq),
+ .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte),
+ .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt),
+ .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte),
+ .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt),
+ .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq),
+ .condbr => self.genCondBr(inst.castTag(.condbr).?),
+ .constant => unreachable,
+ .dbg_stmt => WValue.none,
+ .load => self.genLoad(inst.castTag(.load).?),
+ .loop => self.genLoop(inst.castTag(.loop).?),
+ .ret => self.genRet(inst.castTag(.ret).?),
+ .retvoid => WValue.none,
+ .store => self.genStore(inst.castTag(.store).?),
+ else => self.fail(inst.src, "TODO: Implement wasm inst: {s}", .{inst.tag}),
+ };
+ }
+
+ fn genBody(self: *Context, body: ir.Body) InnerError!void {
+ for (body.instructions) |inst| {
+ const result = try self.genInst(inst);
+ try self.values.putNoClobber(self.gpa, inst, result);
+ }
+ }
+
+ fn genRet(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
+ // TODO: Implement tail calls
+ const operand = self.resolveInst(inst.operand);
+ try self.emitWValue(operand);
+ return .none;
+ }
+
+ fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue {
+ const func_inst = inst.func.castTag(.constant).?;
+ const func_val = inst.func.value().?;
+
+ const target = blk: {
+ if (func_val.castTag(.function)) |func| {
+ break :blk func.data.owner_decl;
+ } else if (func_val.castTag(.extern_fn)) |ext_fn| {
+ break :blk ext_fn.data;
+ }
+ return self.fail(inst.base.src, "Expected a function, but instead found type '{s}'", .{func_val.tag()});
+ };
+
+ for (inst.args) |arg| {
+ const arg_val = self.resolveInst(arg);
+ try self.emitWValue(arg_val);
+ }
+
+ try self.code.append(wasm.opcode(.call));
+
+ // The function index immediate argument will be filled in using this data
+ // in link.Wasm.flush().
+ try self.decl.fn_link.wasm.?.idx_refs.append(self.gpa, .{
+ .offset = @intCast(u32, self.code.items.len),
+ .decl = target,
+ });
+
+ return .none;
+ }
+
+ fn genAlloc(self: *Context, inst: *Inst.NoOp) InnerError!WValue {
+ const elem_type = inst.base.ty.elemType();
+ const valtype = try self.genValtype(inst.base.src, elem_type);
+ try self.locals.append(self.gpa, valtype);
+
+ defer self.local_index += 1;
+ return WValue{ .local = self.local_index };
+ }
+
+ fn genStore(self: *Context, inst: *Inst.BinOp) InnerError!WValue {
+ const writer = self.code.writer();
+
+ const lhs = self.resolveInst(inst.lhs);
+ const rhs = self.resolveInst(inst.rhs);
+ try self.emitWValue(rhs);
+
+ try writer.writeByte(wasm.opcode(.local_set));
+ try leb.writeULEB128(writer, lhs.local);
+ return .none;
+ }
+
+ fn genLoad(self: *Context, inst: *Inst.UnOp) InnerError!WValue {
+ return self.resolveInst(inst.operand);
+ }
+
+ fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue {
+ // arguments share the index with locals
+ defer self.local_index += 1;
+ return WValue{ .local = self.local_index };
+ }
+
+ fn genAdd(self: *Context, inst: *Inst.BinOp) InnerError!WValue {
+ const lhs = self.resolveInst(inst.lhs);
+ const rhs = self.resolveInst(inst.rhs);
+
+ try self.emitWValue(lhs);
+ try self.emitWValue(rhs);
+
+ const opcode: wasm.Opcode = switch (inst.base.ty.tag()) {
+ .u32, .i32 => .i32_add,
+ .u64, .i64 => .i64_add,
+ .f32 => .f32_add,
+ .f64 => .f64_add,
+ else => return self.fail(inst.base.src, "TODO - Implement wasm genAdd for type '{s}'", .{inst.base.ty.tag()}),
+ };
+
+ try self.code.append(wasm.opcode(opcode));
+ return .none;
+ }
+
+ fn emitConstant(self: *Context, inst: *Inst.Constant) InnerError!void {
+ const writer = self.code.writer();
+ switch (inst.base.ty.tag()) {
+ .u32 => {
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, inst.val.toUnsignedInt());
+ },
+ .i32 => {
+ try writer.writeByte(wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, inst.val.toSignedInt());
+ },
+ .u64 => {
+ try writer.writeByte(wasm.opcode(.i64_const));
+ try leb.writeILEB128(writer, inst.val.toUnsignedInt());
+ },
+ .i64 => {
+ try writer.writeByte(wasm.opcode(.i64_const));
+ try leb.writeILEB128(writer, inst.val.toSignedInt());
+ },
+ .f32 => {
+ try writer.writeByte(wasm.opcode(.f32_const));
+ // TODO: enforce LE byte order
+ try writer.writeAll(mem.asBytes(&inst.val.toFloat(f32)));
+ },
+ .f64 => {
+ try writer.writeByte(wasm.opcode(.f64_const));
+ // TODO: enforce LE byte order
+ try writer.writeAll(mem.asBytes(&inst.val.toFloat(f64)));
+ },
+ .void => {},
+ else => |ty| return self.fail(inst.base.src, "Wasm TODO: emitConstant for type {s}", .{ty}),
+ }
+ }
+
+ fn genBlock(self: *Context, block: *Inst.Block) InnerError!WValue {
+ const block_ty = try self.genBlockType(block.base.src, block.base.ty);
+
+ try self.startBlock(.block, block_ty, null);
+ block.codegen = .{
+ // we don't use relocs, so using `relocs` is illegal behaviour.
+ .relocs = undefined,
+ // Here we set the current block idx, so breaks know the depth to jump
+ // to when breaking out.
+ .mcv = @bitCast(AnyMCValue, WValue{ .block_idx = self.block_depth }),
+ };
+ try self.genBody(block.body);
+ try self.endBlock();
+
+ return .none;
+ }
+
+ /// appends a new wasm block to the code section and increases the `block_depth` by 1
+ fn startBlock(self: *Context, block_type: wasm.Opcode, valtype: u8, with_offset: ?usize) !void {
+ self.block_depth += 1;
+ if (with_offset) |offset| {
+ try self.code.insert(offset, wasm.opcode(block_type));
+ try self.code.insert(offset + 1, valtype);
+ } else {
+ try self.code.append(wasm.opcode(block_type));
+ try self.code.append(valtype);
+ }
+ }
+
+ /// Ends the current wasm block and decreases the `block_depth` by 1
+ fn endBlock(self: *Context) !void {
+ try self.code.append(wasm.opcode(.end));
+ self.block_depth -= 1;
+ }
+
+ fn genLoop(self: *Context, loop: *Inst.Loop) InnerError!WValue {
+ const loop_ty = try self.genBlockType(loop.base.src, loop.base.ty);
+
+ try self.startBlock(.loop, loop_ty, null);
+ try self.genBody(loop.body);
+
+ // breaking to the index of a loop block will continue the loop instead
+ try self.code.append(wasm.opcode(.br));
+ try leb.writeULEB128(self.code.writer(), @as(u32, 0));
+
+ try self.endBlock();
+
+ return .none;
+ }
+
+ fn genCondBr(self: *Context, condbr: *Inst.CondBr) InnerError!WValue {
+ const condition = self.resolveInst(condbr.condition);
+ const writer = self.code.writer();
+
+ // TODO: Handle death instructions for then and else body
+
+ // insert blocks at the position of `offset` so
+ // the condition can jump to it
+ const offset = condition.code_offset;
+ const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty);
+ try self.startBlock(.block, block_ty, offset);
+
+ // we inserted the block in front of the condition
+ // so now check if condition matches. If not, break outside this block
+ // and continue with the then codepath
+ try writer.writeByte(wasm.opcode(.br_if));
+ try leb.writeULEB128(writer, @as(u32, 0));
+
+ try self.genBody(condbr.else_body);
+ try self.endBlock();
+
+ // Outer block that matches the condition
+ try self.genBody(condbr.then_body);
+
+ return .none;
+ }
+
+ fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue {
+ const ty = inst.lhs.ty.tag();
+
+ // save offset, so potential conditions can insert blocks in front of
+ // the comparison that we can later jump back to
+ const offset = self.code.items.len;
+
+ const lhs = self.resolveInst(inst.lhs);
+ const rhs = self.resolveInst(inst.rhs);
+
+ try self.emitWValue(lhs);
+ try self.emitWValue(rhs);
+
+ const opcode_maybe: ?wasm.Opcode = switch (op) {
+ .lt => @as(?wasm.Opcode, switch (ty) {
+ .i32 => .i32_lt_s,
+ .u32 => .i32_lt_u,
+ .i64 => .i64_lt_s,
+ .u64 => .i64_lt_u,
+ .f32 => .f32_lt,
+ .f64 => .f64_lt,
+ else => null,
+ }),
+ .lte => @as(?wasm.Opcode, switch (ty) {
+ .i32 => .i32_le_s,
+ .u32 => .i32_le_u,
+ .i64 => .i64_le_s,
+ .u64 => .i64_le_u,
+ .f32 => .f32_le,
+ .f64 => .f64_le,
+ else => null,
+ }),
+ .eq => @as(?wasm.Opcode, switch (ty) {
+ .i32, .u32 => .i32_eq,
+ .i64, .u64 => .i64_eq,
+ .f32 => .f32_eq,
+ .f64 => .f64_eq,
+ else => null,
+ }),
+ .gte => @as(?wasm.Opcode, switch (ty) {
+ .i32 => .i32_ge_s,
+ .u32 => .i32_ge_u,
+ .i64 => .i64_ge_s,
+ .u64 => .i64_ge_u,
+ .f32 => .f32_ge,
+ .f64 => .f64_ge,
+ else => null,
+ }),
+ .gt => @as(?wasm.Opcode, switch (ty) {
+ .i32 => .i32_gt_s,
+ .u32 => .i32_gt_u,
+ .i64 => .i64_gt_s,
+ .u64 => .i64_gt_u,
+ .f32 => .f32_gt,
+ .f64 => .f64_gt,
+ else => null,
+ }),
+ .neq => @as(?wasm.Opcode, switch (ty) {
+ .i32, .u32 => .i32_ne,
+ .i64, .u64 => .i64_ne,
+ .f32 => .f32_ne,
+ .f64 => .f64_ne,
+ else => null,
+ }),
+ };
+
+ const opcode = opcode_maybe orelse
+ return self.fail(inst.base.src, "TODO - Wasm genCmp for type '{s}' and operator '{s}'", .{ ty, @tagName(op) });
+
+ try self.code.append(wasm.opcode(opcode));
+ return WValue{ .code_offset = offset };
+ }
+
+ fn genBr(self: *Context, br: *Inst.Br) InnerError!WValue {
+ // if operand has codegen bits we should break with a value
+ if (br.operand.ty.hasCodeGenBits()) {
+ const operand = self.resolveInst(br.operand);
+ try self.emitWValue(operand);
+ }
+
+ // every block contains a `WValue` with its block index.
+ // We then determine how far we have to jump to it by substracting it from current block depth
+ const wvalue = @bitCast(WValue, br.block.codegen.mcv);
+ const idx: u32 = self.block_depth - wvalue.block_idx;
+ const writer = self.code.writer();
+ try writer.writeByte(wasm.opcode(.br));
+ try leb.writeULEB128(writer, idx);
+
+ return .none;
+ }
+};