aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt7
-rw-r--r--lib/compiler/test_runner.zig15
-rw-r--r--lib/std/Thread.zig2
-rw-r--r--lib/std/builtin.zig10
-rw-r--r--lib/std/start.zig47
-rw-r--r--src/Compilation.zig1
-rw-r--r--src/arch/riscv64/CodeGen.zig2772
-rw-r--r--src/arch/riscv64/Emit.zig173
-rw-r--r--src/arch/riscv64/Encoding.zig1119
-rw-r--r--src/arch/riscv64/Lower.zig925
-rw-r--r--src/arch/riscv64/Mir.zig300
-rw-r--r--src/arch/riscv64/abi.zig9
-rw-r--r--src/arch/riscv64/bits.zig8
-rw-r--r--src/arch/riscv64/encoder.zig80
-rw-r--r--src/arch/riscv64/encoding.zig729
-rw-r--r--src/arch/riscv64/mnem.zig257
-rw-r--r--src/codegen.zig4
-rw-r--r--src/link/riscv.zig40
-rw-r--r--test/behavior/abs.zig1
-rw-r--r--test/behavior/align.zig3
-rw-r--r--test/behavior/array.zig20
-rw-r--r--test/behavior/atomics.zig5
-rw-r--r--test/behavior/basic.zig17
-rw-r--r--test/behavior/bit_shifting.zig1
-rw-r--r--test/behavior/bitcast.zig3
-rw-r--r--test/behavior/call.zig2
-rw-r--r--test/behavior/cast.zig22
-rw-r--r--test/behavior/cast_int.zig2
-rw-r--r--test/behavior/defer.zig3
-rw-r--r--test/behavior/destructure.zig2
-rw-r--r--test/behavior/enum.zig6
-rw-r--r--test/behavior/error.zig10
-rw-r--r--test/behavior/eval.zig1
-rw-r--r--test/behavior/floatop.zig1
-rw-r--r--test/behavior/fn.zig2
-rw-r--r--test/behavior/for.zig4
-rw-r--r--test/behavior/generics.zig6
-rw-r--r--test/behavior/globals.zig1
-rw-r--r--test/behavior/if.zig6
-rw-r--r--test/behavior/math.zig9
-rw-r--r--test/behavior/memcpy.zig4
-rw-r--r--test/behavior/null.zig3
-rw-r--r--test/behavior/optional.zig10
-rw-r--r--test/behavior/packed-struct.zig11
-rw-r--r--test/behavior/packed-union.zig2
-rw-r--r--test/behavior/pointers.zig6
-rw-r--r--test/behavior/popcount.zig1
-rw-r--r--test/behavior/slice.zig6
-rw-r--r--test/behavior/src.zig3
-rw-r--r--test/behavior/string_literals.zig2
-rw-r--r--test/behavior/struct.zig19
-rw-r--r--test/behavior/switch.zig12
-rw-r--r--test/behavior/switch_prong_implicit_cast.zig1
-rw-r--r--test/behavior/threadlocal.zig3
-rw-r--r--test/behavior/try.zig1
-rw-r--r--test/behavior/tuple.zig5
-rw-r--r--test/behavior/tuple_declarations.zig2
-rw-r--r--test/behavior/type_info.zig5
-rw-r--r--test/behavior/typename.zig8
-rw-r--r--test/behavior/union.zig9
-rw-r--r--test/behavior/union_with_members.zig1
-rw-r--r--test/behavior/vector.zig13
-rw-r--r--test/behavior/while.zig5
-rw-r--r--test/tests.zig2
64 files changed, 3498 insertions, 3261 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cc5ada18b3..f99e59e41f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -535,11 +535,14 @@ set(ZIG_STAGE2_SOURCES
src/arch/arm/Mir.zig
src/arch/arm/abi.zig
src/arch/arm/bits.zig
+ src/arch/riscv64/abi.zig
+ src/arch/riscv64/bits.zig
src/arch/riscv64/CodeGen.zig
src/arch/riscv64/Emit.zig
+ src/arch/riscv64/encoding.zig
+ src/arch/riscv64/Lower.zig
src/arch/riscv64/Mir.zig
- src/arch/riscv64/abi.zig
- src/arch/riscv64/bits.zig
+ src/arch/riscv64/mnem.zig
src/arch/sparc64/CodeGen.zig
src/arch/sparc64/Emit.zig
src/arch/sparc64/Mir.zig
diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig
index 25f029a183..3e97062982 100644
--- a/lib/compiler/test_runner.zig
+++ b/lib/compiler/test_runner.zig
@@ -282,11 +282,13 @@ pub fn mainSimple() anyerror!void {
const stderr = if (comptime enable_print) std.io.getStdErr() else {};
for (builtin.test_functions) |test_fn| {
- if (enable_print) {
- stderr.writeAll(test_fn.name) catch {};
- stderr.writeAll("... ") catch {};
- }
- test_fn.func() catch |err| {
+ if (test_fn.func()) |_| {
+ if (enable_print) {
+ stderr.writeAll(test_fn.name) catch {};
+ stderr.writeAll("... ") catch {};
+ stderr.writeAll("PASS\n") catch {};
+ }
+ } else |err| if (enable_print) {
if (enable_print) {
stderr.writeAll(test_fn.name) catch {};
stderr.writeAll("... ") catch {};
@@ -300,8 +302,7 @@ pub fn mainSimple() anyerror!void {
if (enable_print) stderr.writeAll("SKIP\n") catch {};
skipped += 1;
continue;
- };
- if (enable_print) stderr.writeAll("PASS\n") catch {};
+ }
passed += 1;
}
if (enable_print and print_summary) {
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index c3802cc1fe..8775b9a52e 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -385,7 +385,7 @@ pub fn yield() YieldError!void {
}
/// State to synchronize detachment of spawner thread to spawned thread
-const Completion = std.atomic.Value(enum(u8) {
+const Completion = std.atomic.Value(enum(if (builtin.zig_backend == .stage2_riscv64) u32 else u8) {
running,
detached,
completed,
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 0ef5cffd24..86f8da6cd4 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -775,14 +775,8 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
}
if (builtin.zig_backend == .stage2_riscv64) {
- asm volatile ("ecall"
- :
- : [number] "{a7}" (64),
- [arg1] "{a0}" (1),
- [arg2] "{a1}" (@intFromPtr(msg.ptr)),
- [arg3] "{a2}" (msg.len),
- : "memory"
- );
+ std.debug.print("panic: {s}\n", .{msg});
+ @breakpoint();
std.posix.exit(127);
}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 326857d9c0..aeefbaffc0 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -20,8 +20,7 @@ pub const simplified_logic =
builtin.zig_backend == .stage2_arm or
builtin.zig_backend == .stage2_sparc64 or
builtin.cpu.arch == .spirv32 or
- builtin.cpu.arch == .spirv64 or
- builtin.zig_backend == .stage2_riscv64;
+ builtin.cpu.arch == .spirv64;
comptime {
// No matter what, we import the root file, so that any export, test, comptime
@@ -41,10 +40,6 @@ comptime {
} else if (builtin.os.tag == .opencl) {
if (@hasDecl(root, "main"))
@export(spirvMain2, .{ .name = "main" });
- } else if (native_arch.isRISCV()) {
- if (!@hasDecl(root, "_start")) {
- @export(riscv_start, .{ .name = "_start" });
- }
} else {
if (!@hasDecl(root, "_start")) {
@export(_start2, .{ .name = "_start" });
@@ -206,42 +201,6 @@ fn wasi_start() callconv(.C) void {
}
}
-fn riscv_start() callconv(.C) noreturn {
- std.process.exit(switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
- .NoReturn => root.main(),
- .Void => ret: {
- root.main();
- break :ret 0;
- },
- .Int => |info| ret: {
- if (info.bits != 8 or info.signedness == .signed) {
- @compileError(bad_main_ret);
- }
- break :ret root.main();
- },
- .ErrorUnion => ret: {
- const result = root.main() catch {
- const stderr = std.io.getStdErr().writer();
- stderr.writeAll("failed with error\n") catch {
- @panic("failed to print when main returned error");
- };
- break :ret 1;
- };
- switch (@typeInfo(@TypeOf(result))) {
- .Void => break :ret 0,
- .Int => |info| {
- if (info.bits != 8 or info.signedness == .signed) {
- @compileError(bad_main_ret);
- }
- return result;
- },
- else => @compileError(bad_main_ret),
- }
- },
- else => @compileError(bad_main_ret),
- });
-}
-
fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize {
uefi.handle = handle;
uefi.system_table = system_table;
@@ -563,6 +522,10 @@ pub inline fn callMain() u8 {
if (@typeInfo(ReturnType) != .ErrorUnion) @compileError(bad_main_ret);
const result = root.main() catch |err| {
+ if (builtin.zig_backend == .stage2_riscv64) {
+ std.debug.print("error: failed with error\n", .{});
+ return 1;
+ }
std.log.err("{s}", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
diff --git a/src/Compilation.zig b/src/Compilation.zig
index e9a8aa7774..cd46c0fc50 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -6334,6 +6334,7 @@ fn canBuildZigLibC(target: std.Target, use_llvm: bool) bool {
}
return switch (target_util.zigBackend(target, use_llvm)) {
.stage2_llvm => true,
+ .stage2_riscv64 => true,
.stage2_x86_64 => if (target.ofmt == .elf or target.ofmt == .macho) true else build_options.have_llvm,
else => build_options.have_llvm,
};
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 6e203161c2..4fd47a43ff 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -37,6 +37,10 @@ const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
const Lower = @import("Lower.zig");
+const mnem_import = @import("mnem.zig");
+const Mnemonic = mnem_import.Mnemonic;
+const Pseudo = mnem_import.Pseudo;
+const encoding = @import("encoding.zig");
const Register = bits.Register;
const CSR = bits.CSR;
@@ -45,19 +49,18 @@ const Memory = bits.Memory;
const FrameIndex = bits.FrameIndex;
const RegisterManager = abi.RegisterManager;
const RegisterLock = RegisterManager.RegisterLock;
+const Instruction = encoding.Instruction;
const InnerError = CodeGenError || error{OutOfRegisters};
pt: Zcu.PerThread,
air: Air,
liveness: Liveness,
-zcu: *Zcu,
bin_file: *link.File,
gpa: Allocator,
mod: *Package.Module,
target: *const std.Target,
-func_index: InternPool.Index,
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
@@ -67,7 +70,8 @@ arg_index: usize,
src_loc: Zcu.LazySrcLoc,
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
-mir_extra: std.ArrayListUnmanaged(u32) = .{},
+
+owner: Owner,
/// Byte offset within the source file of the ending curly.
end_di_line: u32,
@@ -113,6 +117,34 @@ const SymbolOffset = struct { sym: u32, off: i32 = 0 };
const RegisterOffset = struct { reg: Register, off: i32 = 0 };
pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };
+const Owner = union(enum) {
+ func_index: InternPool.Index,
+ lazy_sym: link.File.LazySymbol,
+
+ fn getDecl(owner: Owner, zcu: *Zcu) InternPool.DeclIndex {
+ return switch (owner) {
+ .func_index => |func_index| zcu.funcOwnerDeclIndex(func_index),
+ .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(zcu),
+ };
+ }
+
+ fn getSymbolIndex(owner: Owner, func: *Func) !u32 {
+ const pt = func.pt;
+ switch (owner) {
+ .func_index => |func_index| {
+ const decl_index = func.pt.zcu.funcOwnerDeclIndex(func_index);
+ const elf_file = func.bin_file.cast(link.File.Elf).?;
+ return elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
+ },
+ .lazy_sym => |lazy_sym| {
+ const elf_file = func.bin_file.cast(link.File.Elf).?;
+ return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
+ func.fail("{s} creating lazy symbol", .{@errorName(err)});
+ },
+ }
+ }
+};
+
const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
/// TODO Look into deleting this tag and using `dead` instead, since every use
@@ -131,8 +163,12 @@ const MCValue = union(enum) {
immediate: u64,
/// The value doesn't exist in memory yet.
load_symbol: SymbolOffset,
+ /// A TLV value.
+ load_tlv: u32,
/// The address of the memory location not-yet-allocated by the linker.
lea_symbol: SymbolOffset,
+ /// The address of a TLV value.
+ lea_tlv: u32,
/// The value is in a target-specific register.
register: Register,
/// The value is split across two registers
@@ -189,6 +225,7 @@ const MCValue = union(enum) {
.lea_frame,
.undef,
.lea_symbol,
+ .lea_tlv,
.air_ref,
.reserved_frame,
=> false,
@@ -198,6 +235,7 @@ const MCValue = union(enum) {
.register_offset,
.load_frame,
.load_symbol,
+ .load_tlv,
.indirect,
=> true,
};
@@ -216,10 +254,12 @@ const MCValue = union(enum) {
.undef,
.air_ref,
.lea_symbol,
+ .lea_tlv,
.reserved_frame,
=> unreachable, // not in memory
.load_symbol => |sym_off| .{ .lea_symbol = sym_off },
+ .load_tlv => |sym| .{ .lea_tlv = sym },
.memory => |addr| .{ .immediate = addr },
.load_frame => |off| .{ .lea_frame = off },
.indirect => |reg_off| switch (reg_off.off) {
@@ -238,17 +278,19 @@ const MCValue = union(enum) {
.indirect,
.undef,
.air_ref,
- .load_frame,
.register_pair,
+ .load_frame,
.load_symbol,
+ .load_tlv,
.reserved_frame,
=> unreachable, // not a pointer
.immediate => |addr| .{ .memory = addr },
- .lea_frame => |off| .{ .load_frame = off },
.register => |reg| .{ .indirect = .{ .reg = reg } },
.register_offset => |reg_off| .{ .indirect = reg_off },
+ .lea_frame => |off| .{ .load_frame = off },
.lea_symbol => |sym_off| .{ .load_symbol = sym_off },
+ .lea_tlv => |sym| .{ .load_tlv = sym },
};
}
@@ -264,13 +306,15 @@ const MCValue = union(enum) {
.register_pair,
.memory,
.indirect,
- .load_frame,
.load_symbol,
.lea_symbol,
+ .lea_tlv,
+ .load_tlv,
=> switch (off) {
0 => mcv,
- else => unreachable, // not offsettable
+ else => unreachable,
},
+ .load_frame => |frame| .{ .load_frame = .{ .index = frame.index, .off = frame.off + off } },
.immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) },
.register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
.register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off } },
@@ -323,6 +367,8 @@ const InstTracking = struct {
.memory,
.load_frame,
.lea_frame,
+ .load_tlv,
+ .lea_tlv,
.load_symbol,
.lea_symbol,
=> result,
@@ -378,6 +424,8 @@ const InstTracking = struct {
.lea_frame,
.load_symbol,
.lea_symbol,
+ .load_tlv,
+ .lea_tlv,
=> inst_tracking.long,
.dead,
.register,
@@ -406,6 +454,8 @@ const InstTracking = struct {
.lea_frame,
.load_symbol,
.lea_symbol,
+ .load_tlv,
+ .lea_tlv,
=> assert(std.meta.eql(inst_tracking.long, target.long)),
.load_frame,
.reserved_frame,
@@ -737,12 +787,11 @@ pub fn generate(
.air = air,
.pt = pt,
.mod = mod,
- .zcu = zcu,
.bin_file = bin_file,
.liveness = liveness,
.target = target,
- .func_index = func_index,
.debug_output = debug_output,
+ .owner = .{ .func_index = func_index },
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -767,7 +816,6 @@ pub fn generate(
function.const_tracking.deinit(gpa);
function.exitlude_jump_relocs.deinit(gpa);
function.mir_instructions.deinit(gpa);
- function.mir_extra.deinit(gpa);
}
wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)});
@@ -799,11 +847,11 @@ pub fn generate(
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(pt),
- .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align),
+ .size = Type.u64.abiSize(pt),
+ .alignment = Type.u64.abiAlignment(pt).min(call_info.stack_align),
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(pt),
+ .size = Type.u64.abiSize(pt),
.alignment = Alignment.min(
call_info.stack_align,
Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -815,7 +863,7 @@ pub fn generate(
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.spill_frame), FrameAlloc.init(.{
.size = 0,
- .alignment = Type.usize.abiAlignment(pt),
+ .alignment = Type.u64.abiAlignment(pt),
}));
function.gen() catch |err| switch (err) {
@@ -828,7 +876,6 @@ pub fn generate(
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
- .extra = try function.mir_extra.toOwnedSlice(gpa),
.frame_locs = function.frame_locs.toOwnedSlice(),
};
defer mir.deinit(gpa);
@@ -878,6 +925,102 @@ pub fn generate(
}
}
+pub fn generateLazy(
+ bin_file: *link.File,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
+ lazy_sym: link.File.LazySymbol,
+ code: *std.ArrayList(u8),
+ debug_output: DebugInfoOutput,
+) CodeGenError!Result {
+ const comp = bin_file.comp;
+ const gpa = comp.gpa;
+ const mod = comp.root_mod;
+
+ var function: Func = .{
+ .gpa = gpa,
+ .air = undefined,
+ .pt = pt,
+ .mod = mod,
+ .bin_file = bin_file,
+ .liveness = undefined,
+ .target = &mod.resolved_target.result,
+ .debug_output = debug_output,
+ .owner = .{ .lazy_sym = lazy_sym },
+ .err_msg = null,
+ .args = undefined, // populated after `resolveCallingConventionValues`
+ .ret_mcv = undefined, // populated after `resolveCallingConventionValues`
+ .fn_type = undefined,
+ .arg_index = 0,
+ .branch_stack = undefined,
+ .src_loc = src_loc,
+ .end_di_line = undefined,
+ .end_di_column = undefined,
+ .scope_generation = 0,
+ .avl = null,
+ .vtype = null,
+ };
+ defer function.mir_instructions.deinit(gpa);
+
+ function.genLazy(lazy_sym) catch |err| switch (err) {
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
+ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
+ else => |e| return e,
+ };
+
+ var mir: Mir = .{
+ .instructions = function.mir_instructions.toOwnedSlice(),
+ .frame_locs = function.frame_locs.toOwnedSlice(),
+ };
+ defer mir.deinit(gpa);
+
+ var emit: Emit = .{
+ .lower = .{
+ .pt = pt,
+ .allocator = gpa,
+ .mir = mir,
+ .cc = .Unspecified,
+ .src_loc = src_loc,
+ .output_mode = comp.config.output_mode,
+ .link_mode = comp.config.link_mode,
+ .pic = mod.pic,
+ },
+ .bin_file = bin_file,
+ .debug_output = debug_output,
+ .code = code,
+ .prev_di_pc = undefined, // no debug info yet
+ .prev_di_line = undefined, // no debug info yet
+ .prev_di_column = undefined, // no debug info yet
+ };
+ defer emit.deinit();
+
+ emit.emitMir() catch |err| switch (err) {
+ error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
+ error.InvalidInstruction => |e| {
+ const msg = switch (e) {
+ error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
+ };
+ return Result{
+ .fail = try ErrorMsg.create(
+ gpa,
+ src_loc,
+ "{s} This is a bug in the Zig compiler.",
+ .{msg},
+ ),
+ };
+ },
+ else => |e| return e,
+ };
+
+ if (function.err_msg) |em| {
+ return Result{ .fail = em };
+ } else {
+ return Result.ok;
+ }
+}
+
const FormatWipMirData = struct {
func: *Func,
inst: Mir.Inst.Index,
@@ -895,7 +1038,6 @@ fn formatWipMir(
.allocator = data.func.gpa,
.mir = .{
.instructions = data.func.mir_instructions.slice(),
- .extra = data.func.mir_extra.items,
.frame_locs = data.func.frame_locs.slice(),
},
.cc = .Unspecified,
@@ -946,7 +1088,7 @@ fn formatDecl(
}
fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) {
return .{ .data = .{
- .zcu = func.zcu,
+ .zcu = func.pt.zcu,
.decl_index = decl_index,
} };
}
@@ -993,7 +1135,7 @@ fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
try func.mir_instructions.ensureUnusedCapacity(gpa, 1);
const result_index: Mir.Inst.Index = @intCast(func.mir_instructions.len);
func.mir_instructions.appendAssumeCapacity(inst);
- if (inst.tag != .pseudo or switch (inst.ops) {
+ if (switch (inst.tag) {
else => true,
.pseudo_dbg_prologue_end,
.pseudo_dbg_line_column,
@@ -1004,65 +1146,26 @@ fn addInst(func: *Func, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
return result_index;
}
-fn addNop(func: *Func) error{OutOfMemory}!Mir.Inst.Index {
+fn addPseudo(func: *Func, mnem: Mnemonic) error{OutOfMemory}!Mir.Inst.Index {
return func.addInst(.{
- .tag = .nop,
- .ops = .none,
- .data = undefined,
- });
-}
-
-fn addPseudoNone(func: *Func, ops: Mir.Inst.Ops) !void {
- _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = ops,
- .data = undefined,
+ .tag = mnem,
+ .data = .none,
});
}
-fn addPseudo(func: *Func, ops: Mir.Inst.Ops) !Mir.Inst.Index {
- return func.addInst(.{
- .tag = .pseudo,
- .ops = ops,
- .data = undefined,
- });
-}
-
-pub fn addExtra(func: *Func, extra: anytype) Allocator.Error!u32 {
- const fields = std.meta.fields(@TypeOf(extra));
- try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len);
- return func.addExtraAssumeCapacity(extra);
-}
-
-pub fn addExtraAssumeCapacity(func: *Func, extra: anytype) u32 {
- const fields = std.meta.fields(@TypeOf(extra));
- const result: u32 = @intCast(func.mir_extra.items.len);
- inline for (fields) |field| {
- func.mir_extra.appendAssumeCapacity(switch (field.type) {
- u32 => @field(extra, field.name),
- i32 => @bitCast(@field(extra, field.name)),
- else => @compileError("bad field type"),
- });
- }
- return result;
-}
-
/// Returns a temporary register that contains the value of the `reg` csr.
///
/// Caller's duty to lock the return register is needed.
fn getCsr(func: *Func, csr: CSR) !Register {
assert(func.hasFeature(.zicsr));
- const dst_reg = try func.register_manager.allocReg(null, func.regTempClassForType(Type.usize));
+ const dst_reg = try func.register_manager.allocReg(null, func.regTempClassForType(Type.u64));
_ = try func.addInst(.{
.tag = .csrrs,
- .ops = .csr,
- .data = .{
- .csr = .{
- .csr = csr,
- .rd = dst_reg,
- .rs1 = .x0,
- },
- },
+ .data = .{ .csr = .{
+ .csr = csr,
+ .rd = dst_reg,
+ .rs1 = .x0,
+ } },
});
return dst_reg;
}
@@ -1081,7 +1184,6 @@ fn setVl(func: *Func, dst_reg: Register, avl: u64, options: bits.VType) !void {
const options_int: u12 = @as(u12, 0) | @as(u8, @bitCast(options));
_ = try func.addInst(.{
.tag = .vsetvli,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = dst_reg,
.rs1 = .zero,
@@ -1094,7 +1196,6 @@ fn setVl(func: *Func, dst_reg: Register, avl: u64, options: bits.VType) !void {
const options_int: u12 = (~@as(u12, 0) << 10) | @as(u8, @bitCast(options));
_ = try func.addInst(.{
.tag = .vsetivli,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst_reg,
@@ -1105,10 +1206,9 @@ fn setVl(func: *Func, dst_reg: Register, avl: u64, options: bits.VType) !void {
});
} else {
const options_int: u12 = @as(u12, 0) | @as(u8, @bitCast(options));
- const temp_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = avl });
+ const temp_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = avl });
_ = try func.addInst(.{
.tag = .vsetvli,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = dst_reg,
.rs1 = temp_reg,
@@ -1125,6 +1225,7 @@ const required_features = [_]Target.riscv.Feature{
.a,
.zicsr,
.v,
+ .zbb,
};
fn gen(func: *Func) !void {
@@ -1142,7 +1243,7 @@ fn gen(func: *Func) !void {
}
if (fn_info.cc != .Naked) {
- try func.addPseudoNone(.pseudo_dbg_prologue_end);
+ _ = try func.addPseudo(.pseudo_dbg_prologue_end);
const backpatch_stack_alloc = try func.addPseudo(.pseudo_dead);
const backpatch_ra_spill = try func.addPseudo(.pseudo_dead);
@@ -1156,11 +1257,11 @@ fn gen(func: *Func) !void {
// The address where to store the return value for the caller is in a
// register which the callee is free to clobber. Therefore, we purposely
// spill it to stack immediately.
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.u64, pt));
try func.genSetMem(
.{ .frame = frame_index },
0,
- Type.usize,
+ Type.u64,
func.ret_mcv.long.address().offset(-func.ret_mcv.short.indirect.off),
);
func.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } };
@@ -1172,11 +1273,11 @@ fn gen(func: *Func) !void {
try func.genBody(func.air.getMainBody());
for (func.exitlude_jump_relocs.items) |jmp_reloc| {
- func.mir_instructions.items(.data)[jmp_reloc].inst =
+ func.mir_instructions.items(.data)[jmp_reloc].j_type.inst =
@intCast(func.mir_instructions.len);
}
- try func.addPseudoNone(.pseudo_dbg_epilogue_begin);
+ _ = try func.addPseudo(.pseudo_dbg_epilogue_begin);
const backpatch_restore_callee_preserved_regs = try func.addPseudo(.pseudo_dead);
const backpatch_ra_restore = try func.addPseudo(.pseudo_dead);
@@ -1186,14 +1287,11 @@ fn gen(func: *Func) !void {
// ret
_ = try func.addInst(.{
.tag = .jalr,
- .ops = .rri,
- .data = .{
- .i_type = .{
- .rd = .zero,
- .rs1 = .ra,
- .imm12 = Immediate.s(0),
- },
- },
+ .data = .{ .i_type = .{
+ .rd = .zero,
+ .rs1 = .ra,
+ .imm12 = Immediate.s(0),
+ } },
});
const frame_layout = try func.computeFrameLayout();
@@ -1201,7 +1299,6 @@ fn gen(func: *Func) !void {
func.mir_instructions.set(backpatch_stack_alloc, .{
.tag = .addi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = .sp,
.rs1 = .sp,
@@ -1209,8 +1306,7 @@ fn gen(func: *Func) !void {
} },
});
func.mir_instructions.set(backpatch_ra_spill, .{
- .tag = .pseudo,
- .ops = .pseudo_store_rm,
+ .tag = .pseudo_store_rm,
.data = .{ .rm = .{
.r = .ra,
.m = .{
@@ -1220,8 +1316,7 @@ fn gen(func: *Func) !void {
} },
});
func.mir_instructions.set(backpatch_ra_restore, .{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
+ .tag = .pseudo_load_rm,
.data = .{ .rm = .{
.r = .ra,
.m = .{
@@ -1231,8 +1326,7 @@ fn gen(func: *Func) !void {
} },
});
func.mir_instructions.set(backpatch_fp_spill, .{
- .tag = .pseudo,
- .ops = .pseudo_store_rm,
+ .tag = .pseudo_store_rm,
.data = .{ .rm = .{
.r = .s0,
.m = .{
@@ -1242,8 +1336,7 @@ fn gen(func: *Func) !void {
} },
});
func.mir_instructions.set(backpatch_fp_restore, .{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
+ .tag = .pseudo_load_rm,
.data = .{ .rm = .{
.r = .s0,
.m = .{
@@ -1254,7 +1347,6 @@ fn gen(func: *Func) !void {
});
func.mir_instructions.set(backpatch_fp_add, .{
.tag = .addi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = .s0,
.rs1 = .sp,
@@ -1263,7 +1355,6 @@ fn gen(func: *Func) !void {
});
func.mir_instructions.set(backpatch_stack_alloc_restore, .{
.tag = .addi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = .sp,
.rs1 = .sp,
@@ -1273,27 +1364,24 @@ fn gen(func: *Func) !void {
if (need_save_reg) {
func.mir_instructions.set(backpatch_spill_callee_preserved_regs, .{
- .tag = .pseudo,
- .ops = .pseudo_spill_regs,
+ .tag = .pseudo_spill_regs,
.data = .{ .reg_list = frame_layout.save_reg_list },
});
func.mir_instructions.set(backpatch_restore_callee_preserved_regs, .{
- .tag = .pseudo,
- .ops = .pseudo_restore_regs,
+ .tag = .pseudo_restore_regs,
.data = .{ .reg_list = frame_layout.save_reg_list },
});
}
} else {
- try func.addPseudoNone(.pseudo_dbg_prologue_end);
+ _ = try func.addPseudo(.pseudo_dbg_prologue_end);
try func.genBody(func.air.getMainBody());
- try func.addPseudoNone(.pseudo_dbg_epilogue_begin);
+ _ = try func.addPseudo(.pseudo_dbg_epilogue_begin);
}
// Drop them off at the rbrace.
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_dbg_line_column,
+ .tag = .pseudo_dbg_line_column,
.data = .{ .pseudo_dbg_line_column = .{
.line = func.end_di_line,
.column = func.end_di_column,
@@ -1301,6 +1389,101 @@ fn gen(func: *Func) !void {
});
}
+fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
+ const pt = func.pt;
+ const mod = pt.zcu;
+ const ip = &mod.intern_pool;
+ switch (lazy_sym.ty.zigTypeTag(mod)) {
+ .Enum => {
+ const enum_ty = lazy_sym.ty;
+ wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
+
+ const param_regs = abi.Registers.Integer.function_arg_regs;
+ const ret_reg = param_regs[0];
+ const enum_mcv: MCValue = .{ .register = param_regs[1] };
+
+ const exitlude_jump_relocs = try func.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(mod));
+ defer func.gpa.free(exitlude_jump_relocs);
+
+ const data_reg, const data_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(data_lock);
+
+ const elf_file = func.bin_file.cast(link.File.Elf).?;
+ const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, .{
+ .kind = .const_data,
+ .ty = enum_ty,
+ }) catch |err|
+ return func.fail("{s} creating lazy symbol", .{@errorName(err)});
+ const sym = elf_file.symbol(sym_index);
+
+ try func.genSetReg(Type.u64, data_reg, .{ .lea_symbol = .{ .sym = sym.esym_index } });
+
+ const cmp_reg, const cmp_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(cmp_lock);
+
+ var data_off: i32 = 0;
+ const tag_names = enum_ty.enumFields(mod);
+ for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
+ const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
+ const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
+ const tag_mcv = try func.genTypedValue(tag_val);
+
+ _ = try func.genBinOp(
+ .cmp_neq,
+ enum_mcv,
+ enum_ty,
+ tag_mcv,
+ enum_ty,
+ cmp_reg,
+ );
+ const skip_reloc = try func.condBr(Type.bool, .{ .register = cmp_reg });
+
+ try func.genSetMem(
+ .{ .reg = ret_reg },
+ 0,
+ Type.u64,
+ .{ .register_offset = .{ .reg = data_reg, .off = data_off } },
+ );
+
+ try func.genSetMem(
+ .{ .reg = ret_reg },
+ 8,
+ Type.u64,
+ .{ .immediate = tag_name_len },
+ );
+
+ exitlude_jump_reloc.* = try func.addInst(.{
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
+ .inst = undefined,
+ } },
+ });
+ func.performReloc(skip_reloc);
+
+ data_off += @intCast(tag_name_len + 1);
+ }
+
+ try func.airTrap();
+
+ for (exitlude_jump_relocs) |reloc| func.performReloc(reloc);
+
+ _ = try func.addInst(.{
+ .tag = .jalr,
+ .data = .{ .i_type = .{
+ .rd = .zero,
+ .rs1 = .ra,
+ .imm12 = Immediate.s(0),
+ } },
+ });
+ },
+ else => return func.fail(
+ "TODO implement {s} for {}",
+ .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) },
+ ),
+ }
+}
+
fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
@@ -1322,9 +1505,12 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.sub,
.sub_wrap,
+ .add_sat,
+
.mul,
.mul_wrap,
.div_trunc,
+ .rem,
.shl, .shl_exact,
.shr, .shr_exact,
@@ -1344,7 +1530,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.ptr_add,
.ptr_sub => try func.airPtrArithmetic(inst, tag),
- .rem,
.mod,
.div_float,
.div_floor,
@@ -1373,7 +1558,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.shl_with_overflow => try func.airShlWithOverflow(inst),
- .add_sat => try func.airAddSat(inst),
.sub_sat => try func.airSubSat(inst),
.mul_sat => try func.airMulSat(inst),
.shl_sat => try func.airShlSat(inst),
@@ -1440,8 +1624,8 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.struct_field_val=> try func.airStructFieldVal(inst),
.float_from_int => try func.airFloatFromInt(inst),
.int_from_float => try func.airIntFromFloat(inst),
- .cmpxchg_strong => try func.airCmpxchg(inst),
- .cmpxchg_weak => try func.airCmpxchg(inst),
+ .cmpxchg_strong => try func.airCmpxchg(inst, .strong),
+ .cmpxchg_weak => try func.airCmpxchg(inst, .weak),
.atomic_rmw => try func.airAtomicRmw(inst),
.atomic_load => try func.airAtomicLoad(inst),
.memcpy => try func.airMemcpy(inst),
@@ -1579,7 +1763,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
for (tracking.getRegs()) |reg| {
if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break;
} else return std.debug.panic(
- \\%{} takes up these regs: {any}, however these regs {any}, don't use it
+ \\%{} takes up these regs: {any}, however this regs {any}, don't use it
, .{ tracked_inst, tracking.getRegs(), RegisterManager.regAtTrackedIndex(@intCast(index)) });
}
}
@@ -1657,7 +1841,7 @@ fn finishAir(
}
const FrameLayout = struct {
- stack_adjust: u32,
+ stack_adjust: i12,
save_reg_list: Mir.RegisterList,
};
@@ -1671,10 +1855,7 @@ fn setFrameLoc(
const frame_i = @intFromEnum(frame_index);
if (aligned) {
const alignment: InternPool.Alignment = func.frame_allocs.items(.abi_align)[frame_i];
- offset.* = if (math.sign(offset.*) < 0)
- -1 * @as(i32, @intCast(alignment.backward(@intCast(@abs(offset.*)))))
- else
- @intCast(alignment.forward(@intCast(@abs(offset.*))));
+ offset.* = math.sign(offset.*) * @as(i32, @intCast(alignment.backward(@intCast(@abs(offset.*)))));
}
func.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
offset.* += func.frame_allocs.items(.abi_size)[frame_i];
@@ -1717,8 +1898,8 @@ fn computeFrameLayout(func: *Func) !FrameLayout {
}
break :blk i;
};
- const saved_reg_size = save_reg_list.size();
+ const saved_reg_size = save_reg_list.size();
frame_size[@intFromEnum(FrameIndex.spill_frame)] = @intCast(saved_reg_size);
// The total frame size is calculated by the amount of s registers you need to save * 8, as each
@@ -1811,12 +1992,14 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
.signedness = .unsigned,
.bits = @intCast(ty.bitSize(pt)),
};
+ assert(reg.class() == .int);
+
const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
switch (int_info.signedness) {
.signed => {
_ = try func.addInst(.{
.tag = .slli,
- .ops = .rri,
+
.data = .{
.i_type = .{
.rd = reg,
@@ -1827,7 +2010,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
});
_ = try func.addInst(.{
.tag = .srai,
- .ops = .rri,
+
.data = .{
.i_type = .{
.rd = reg,
@@ -1842,7 +2025,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
if (mask < 256) {
_ = try func.addInst(.{
.tag = .andi,
- .ops = .rri,
+
.data = .{
.i_type = .{
.rd = reg,
@@ -1854,7 +2037,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
} else {
_ = try func.addInst(.{
.tag = .slli,
- .ops = .rri,
+
.data = .{
.i_type = .{
.rd = reg,
@@ -1865,7 +2048,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
});
_ = try func.addInst(.{
.tag = .srli,
- .ops = .rri,
+
.data = .{
.i_type = .{
.rd = reg,
@@ -1879,15 +2062,6 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
}
}
-fn symbolIndex(func: *Func) !u32 {
- const pt = func.pt;
- const zcu = pt.zcu;
- const decl_index = zcu.funcOwnerDeclIndex(func.func_index);
- const elf_file = func.bin_file.cast(link.File.Elf).?;
- const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
- return atom_index;
-}
-
fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex {
const frame_allocs_slice = func.frame_allocs.slice();
const frame_size = frame_allocs_slice.items(.abi_size);
@@ -2051,6 +2225,10 @@ pub fn spillInstruction(func: *Func, reg: Register, inst: Air.Inst.Index) !void
try tracking.trackSpill(func, inst);
}
+pub fn spillRegisters(func: *Func, comptime registers: []const Register) !void {
+ inline for (registers) |reg| try func.register_manager.getKnownReg(reg, null);
+}
+
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
@@ -2151,11 +2329,16 @@ fn airTrunc(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (func.liveness.isUnused(inst))
return func.finishAir(inst, .unreach, .{ ty_op.operand, .none, .none });
-
+ // we assume no zeroext in the "Zig ABI", so it's fine to just not truncate it.
const operand = try func.resolveInst(ty_op.operand);
- _ = operand;
- return func.fail("TODO implement trunc for {}", .{func.target.cpu.arch});
- // return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+
+ // we can do it just to be safe, but this shouldn't be needed for no-runtime safety modes
+ switch (operand) {
+ .register => |reg| try func.truncateRegister(func.typeOf(ty_op.operand), reg),
+ else => {},
+ }
+
+ return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
}
fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void {
@@ -2186,8 +2369,7 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
switch (ty.zigTypeTag(zcu)) {
.Bool => {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_not,
+ .tag = .pseudo_not,
.data = .{
.rr = .{
.rs = operand_reg,
@@ -2205,7 +2387,6 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
32, 64 => {
_ = try func.addInst(.{
.tag = .xori,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst_reg,
@@ -2305,10 +2486,7 @@ fn binOp(
80, 128 => true,
else => unreachable,
};
- switch (air_tag) {
- .rem, .mod => {},
- else => if (!type_needs_libcall) break :libcall,
- }
+ if (!type_needs_libcall) break :libcall;
return func.fail("binOp libcall runtime-float ops", .{});
}
@@ -2367,7 +2545,6 @@ fn genBinOp(
const pt = func.pt;
const zcu = pt.zcu;
const bit_size = lhs_ty.bitSize(pt);
- assert(bit_size <= 64);
const is_unsigned = lhs_ty.isUnsignedInt(zcu);
@@ -2384,16 +2561,30 @@ fn genBinOp(
.sub_wrap,
.mul,
.mul_wrap,
+ .rem,
+ .div_trunc,
=> {
- if (!math.isPowerOfTwo(bit_size))
- return func.fail(
- "TODO: genBinOp {s} non-pow 2, found {}",
- .{ @tagName(tag), bit_size },
- );
+ switch (tag) {
+ .rem,
+ .div_trunc,
+ => {
+ if (!math.isPowerOfTwo(bit_size)) {
+ try func.truncateRegister(lhs_ty, lhs_reg);
+ try func.truncateRegister(rhs_ty, rhs_reg);
+ }
+ },
+ else => {
+ if (!math.isPowerOfTwo(bit_size))
+ return func.fail(
+ "TODO: genBinOp verify {s} non-pow 2, found {}",
+ .{ @tagName(tag), bit_size },
+ );
+ },
+ }
switch (lhs_ty.zigTypeTag(zcu)) {
.Int => {
- const mir_tag: Mir.Inst.Tag = switch (tag) {
+ const mnem: Mnemonic = switch (tag) {
.add, .add_wrap => switch (bit_size) {
8, 16, 64 => .add,
32 => .addw,
@@ -2409,12 +2600,19 @@ fn genBinOp(
32 => .mulw,
else => unreachable,
},
+ .rem => switch (bit_size) {
+ 8, 16, 32 => if (is_unsigned) .remuw else .remw,
+ else => if (is_unsigned) .remu else .rem,
+ },
+ .div_trunc => switch (bit_size) {
+ 8, 16, 32 => if (is_unsigned) .divuw else .divw,
+ else => if (is_unsigned) .divu else .div,
+ },
else => unreachable,
};
_ = try func.addInst(.{
- .tag = mir_tag,
- .ops = .rrr,
+ .tag = mnem,
.data = .{
.r_type = .{
.rd = dst_reg,
@@ -2423,17 +2621,9 @@ fn genBinOp(
},
},
});
-
- // truncate when the instruction is larger than the bit size.
- switch (bit_size) {
- 8, 16 => try func.truncateRegister(lhs_ty, dst_reg),
- 32 => {}, // addw/subw affects the first 32-bits
- 64 => {}, // add/sub affects the entire register
- else => unreachable,
- }
},
.Float => {
- const mir_tag: Mir.Inst.Tag = switch (tag) {
+ const mir_tag: Mnemonic = switch (tag) {
.add => switch (bit_size) {
32 => .fadds,
64 => .faddd,
@@ -2449,12 +2639,11 @@ fn genBinOp(
64 => .fmuld,
else => unreachable,
},
- else => unreachable,
+ else => return func.fail("TODO: genBinOp {s} Float", .{@tagName(tag)}),
};
_ = try func.addInst(.{
.tag = mir_tag,
- .ops = .rrr,
.data = .{
.r_type = .{
.rd = dst_reg,
@@ -2470,7 +2659,7 @@ fn genBinOp(
const child_ty = lhs_ty.childType(zcu);
- const mir_tag: Mir.Inst.Tag = switch (tag) {
+ const mir_tag: Mnemonic = switch (tag) {
.add => switch (child_ty.zigTypeTag(zcu)) {
.Int => .vaddvv,
.Float => .vfaddvv,
@@ -2481,6 +2670,11 @@ fn genBinOp(
.Float => .vfsubvv,
else => unreachable,
},
+ .mul => switch (child_ty.zigTypeTag(zcu)) {
+ .Int => .vmulvv,
+ .Float => .vfmulvv,
+ else => unreachable,
+ },
else => return func.fail("TODO: genBinOp {s} Vector", .{@tagName(tag)}),
};
@@ -2490,7 +2684,7 @@ fn genBinOp(
16 => .@"16",
32 => .@"32",
64 => .@"64",
- else => unreachable,
+ else => return func.fail("TODO: genBinOp > 64 bit elements, found {d}", .{elem_size}),
},
.vlmul = .m1,
.vma = true,
@@ -2499,7 +2693,6 @@ fn genBinOp(
_ = try func.addInst(.{
.tag = mir_tag,
- .ops = .rrr,
.data = .{
.r_type = .{
.rd = dst_reg,
@@ -2513,6 +2706,53 @@ fn genBinOp(
}
},
+ .add_sat,
+ => {
+ if (bit_size != 64 or !is_unsigned)
+ return func.fail("TODO: genBinOp ty: {}", .{lhs_ty.fmt(pt)});
+
+ const tmp_reg = try func.copyToTmpRegister(rhs_ty, .{ .register = rhs_reg });
+ const tmp_lock = func.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer func.register_manager.unlockReg(tmp_lock);
+
+ _ = try func.addInst(.{
+ .tag = .add,
+ .data = .{ .r_type = .{
+ .rd = tmp_reg,
+ .rs1 = rhs_reg,
+ .rs2 = lhs_reg,
+ } },
+ });
+
+ _ = try func.addInst(.{
+ .tag = .sltu,
+ .data = .{ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = tmp_reg,
+ .rs2 = lhs_reg,
+ } },
+ });
+
+ // neg dst_reg, dst_reg
+ _ = try func.addInst(.{
+ .tag = .sub,
+ .data = .{ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = .zero,
+ .rs2 = dst_reg,
+ } },
+ });
+
+ _ = try func.addInst(.{
+ .tag = .@"or",
+ .data = .{ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = dst_reg,
+ .rs2 = tmp_reg,
+ } },
+ });
+ },
+
.ptr_add,
.ptr_sub,
=> {
@@ -2523,14 +2763,14 @@ fn genBinOp(
// RISC-V has no immediate mul, so we copy the size to a temporary register
const elem_size = lhs_ty.elemType2(zcu).abiSize(pt);
- const elem_size_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = elem_size });
+ const elem_size_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = elem_size });
try func.genBinOp(
.mul,
tmp_mcv,
rhs_ty,
.{ .register = elem_size_reg },
- Type.usize,
+ Type.u64,
tmp_reg,
);
@@ -2541,9 +2781,9 @@ fn genBinOp(
else => unreachable,
},
lhs_mcv,
- Type.usize, // we know it's a pointer, so it'll be usize.
+ Type.u64, // we know it's a pointer, so it'll be usize.
tmp_mcv,
- Type.usize,
+ Type.u64,
dst_reg,
);
},
@@ -2559,7 +2799,6 @@ fn genBinOp(
.bit_or, .bool_or => .@"or",
else => unreachable,
},
- .ops = .rrr,
.data = .{
.r_type = .{
.rd = dst_reg,
@@ -2577,68 +2816,22 @@ fn genBinOp(
}
},
- .div_trunc,
- => {
- if (!math.isPowerOfTwo(bit_size))
- return func.fail(
- "TODO: genBinOp {s} non-pow 2, found {}",
- .{ @tagName(tag), bit_size },
- );
-
- const mir_tag: Mir.Inst.Tag = switch (tag) {
- .div_trunc => switch (bit_size) {
- 8, 16, 32 => if (is_unsigned) .divuw else .divw,
- 64 => if (is_unsigned) .divu else .div,
- else => unreachable,
- },
- else => unreachable,
- };
-
- _ = try func.addInst(.{
- .tag = mir_tag,
- .ops = .rrr,
- .data = .{
- .r_type = .{
- .rd = dst_reg,
- .rs1 = lhs_reg,
- .rs2 = rhs_reg,
- },
- },
- });
-
- if (!is_unsigned) {
- // truncate when the instruction is larger than the bit size.
- switch (bit_size) {
- 8, 16 => try func.truncateRegister(lhs_ty, dst_reg),
- 32 => {}, // divw affects the first 32-bits
- 64 => {}, // div affects the entire register
- else => unreachable,
- }
- }
- },
-
.shr,
.shr_exact,
.shl,
.shl_exact,
=> {
- if (!math.isPowerOfTwo(bit_size))
- return func.fail(
- "TODO: genBinOp {s} non-pow 2, found {}",
- .{ @tagName(tag), bit_size },
- );
-
- // it's important that the shift amount is exact
+ if (bit_size > 64) return func.fail("TODO: genBinOp shift > 64 bits, {}", .{bit_size});
try func.truncateRegister(rhs_ty, rhs_reg);
- const mir_tag: Mir.Inst.Tag = switch (tag) {
+ const mir_tag: Mnemonic = switch (tag) {
.shl, .shl_exact => switch (bit_size) {
- 8, 16, 64 => .sll,
+ 1...31, 33...64 => .sll,
32 => .sllw,
else => unreachable,
},
.shr, .shr_exact => switch (bit_size) {
- 8, 16, 64 => .srl,
+ 1...31, 33...64 => .srl,
32 => .srlw,
else => unreachable,
},
@@ -2647,20 +2840,12 @@ fn genBinOp(
_ = try func.addInst(.{
.tag = mir_tag,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = dst_reg,
.rs1 = lhs_reg,
.rs2 = rhs_reg,
} },
});
-
- switch (bit_size) {
- 8, 16 => try func.truncateRegister(lhs_ty, dst_reg),
- 32 => {},
- 64 => {},
- else => unreachable,
- }
},
// TODO: move the isel logic out of lower and into here.
@@ -2671,9 +2856,14 @@ fn genBinOp(
.cmp_gt,
.cmp_gte,
=> {
+ assert(lhs_reg.class() == rhs_reg.class());
+ if (lhs_reg.class() == .int) {
+ try func.truncateRegister(lhs_ty, lhs_reg);
+ try func.truncateRegister(rhs_ty, rhs_reg);
+ }
+
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_compare,
+ .tag = .pseudo_compare,
.data = .{
.compare = .{
.op = switch (tag) {
@@ -2719,60 +2909,60 @@ fn genBinOp(
// a1, s0 was -1, flipping all the bits in a2 and effectively restoring a0. If a0 was greater than or equal to a1,
// s0 was 0, leaving a2 unchanged as a0.
.min, .max => {
- const int_info = lhs_ty.intInfo(zcu);
+ switch (lhs_ty.zigTypeTag(zcu)) {
+ .Int => {
+ const int_info = lhs_ty.intInfo(zcu);
- const mask_reg, const mask_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(mask_lock);
+ const mask_reg, const mask_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(mask_lock);
- _ = try func.addInst(.{
- .tag = if (int_info.signedness == .unsigned) .sltu else .slt,
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = mask_reg,
- .rs1 = lhs_reg,
- .rs2 = rhs_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = if (int_info.signedness == .unsigned) .sltu else .slt,
+ .data = .{ .r_type = .{
+ .rd = mask_reg,
+ .rs1 = lhs_reg,
+ .rs2 = rhs_reg,
+ } },
+ });
- _ = try func.addInst(.{
- .tag = .sub,
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = mask_reg,
- .rs1 = .zero,
- .rs2 = mask_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .sub,
+ .data = .{ .r_type = .{
+ .rd = mask_reg,
+ .rs1 = .zero,
+ .rs2 = mask_reg,
+ } },
+ });
- _ = try func.addInst(.{
- .tag = .xor,
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = dst_reg,
- .rs1 = lhs_reg,
- .rs2 = rhs_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .xor,
+ .data = .{ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = lhs_reg,
+ .rs2 = rhs_reg,
+ } },
+ });
- _ = try func.addInst(.{
- .tag = .@"and",
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = mask_reg,
- .rs1 = dst_reg,
- .rs2 = mask_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .@"and",
+ .data = .{ .r_type = .{
+ .rd = mask_reg,
+ .rs1 = dst_reg,
+ .rs2 = mask_reg,
+ } },
+ });
- _ = try func.addInst(.{
- .tag = .xor,
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = dst_reg,
- .rs1 = if (tag == .min) rhs_reg else lhs_reg,
- .rs2 = mask_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .xor,
+ .data = .{ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = if (tag == .min) rhs_reg else lhs_reg,
+ .rs2 = mask_reg,
+ } },
+ });
+ },
+ else => |t| return func.fail("TODO: genBinOp min/max for {s}", .{@tagName(t)}),
+ }
},
else => return func.fail("TODO: genBinOp {}", .{tag}),
}
@@ -2791,12 +2981,14 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+ const rhs_ty = func.typeOf(extra.rhs);
+ const lhs_ty = func.typeOf(extra.lhs);
+
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
- const ty = func.typeOf(extra.lhs);
- switch (ty.zigTypeTag(zcu)) {
+ switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return func.fail("TODO implement add with overflow for Vector type", .{}),
.Int => {
- const int_info = ty.intInfo(zcu);
+ const int_info = lhs_ty.intInfo(zcu);
const tuple_ty = func.typeOfIndex(inst);
const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false);
@@ -2805,26 +2997,29 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) {
const add_result = try func.binOp(null, .add, extra.lhs, extra.rhs);
- const add_result_reg = try func.copyToTmpRegister(ty, add_result);
- const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg);
- defer func.register_manager.unlockReg(add_result_reg_lock);
-
try func.genSetMem(
.{ .frame = offset.index },
offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
- ty,
+ lhs_ty,
add_result,
);
+ const trunc_reg = try func.copyToTmpRegister(lhs_ty, add_result);
+ const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg);
+ defer func.register_manager.unlockReg(trunc_reg_lock);
+
const overflow_reg, const overflow_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(overflow_lock);
+ // if the result isn't equal after truncating it to the given type,
+ // an overflow must have happened.
+ try func.truncateRegister(lhs_ty, trunc_reg);
try func.genBinOp(
.cmp_neq,
- .{ .register = add_result_reg },
- ty,
- .{ .register = add_result_reg },
- ty,
+ add_result,
+ lhs_ty,
+ .{ .register = trunc_reg },
+ rhs_ty,
overflow_reg,
);
@@ -2837,7 +3032,68 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
break :result result_mcv;
} else {
- return func.fail("TODO: less than 8 bit or non-pow 2 addition", .{});
+ const rhs_mcv = try func.resolveInst(extra.rhs);
+ const lhs_mcv = try func.resolveInst(extra.lhs);
+
+ const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs_mcv);
+ const lhs_reg, const lhs_lock = try func.promoteReg(lhs_ty, lhs_mcv);
+ defer {
+ if (rhs_lock) |lock| func.register_manager.unlockReg(lock);
+ if (lhs_lock) |lock| func.register_manager.unlockReg(lock);
+ }
+
+ try func.truncateRegister(rhs_ty, rhs_reg);
+ try func.truncateRegister(lhs_ty, lhs_reg);
+
+ const dest_reg, const dest_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(dest_lock);
+
+ _ = try func.addInst(.{
+ .tag = .add,
+ .data = .{ .r_type = .{
+ .rs1 = rhs_reg,
+ .rs2 = lhs_reg,
+ .rd = dest_reg,
+ } },
+ });
+
+ try func.truncateRegister(func.typeOfIndex(inst), dest_reg);
+ const add_result: MCValue = .{ .register = dest_reg };
+
+ try func.genSetMem(
+ .{ .frame = offset.index },
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+ lhs_ty,
+ add_result,
+ );
+
+ const trunc_reg = try func.copyToTmpRegister(lhs_ty, add_result);
+ const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg);
+ defer func.register_manager.unlockReg(trunc_reg_lock);
+
+ const overflow_reg, const overflow_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(overflow_lock);
+
+ // if the result isn't equal after truncating it to the given type,
+ // an overflow must have happened.
+ try func.truncateRegister(lhs_ty, trunc_reg);
+ try func.genBinOp(
+ .cmp_neq,
+ add_result,
+ lhs_ty,
+ .{ .register = trunc_reg },
+ rhs_ty,
+ overflow_reg,
+ );
+
+ try func.genSetMem(
+ .{ .frame = offset.index },
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+ Type.u1,
+ .{ .register = overflow_reg },
+ );
+
+ break :result result_mcv;
}
},
else => unreachable,
@@ -2890,7 +3146,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
const rhs_reg, const rhs_lock = try func.promoteReg(rhs_ty, rhs);
defer if (rhs_lock) |lock| func.register_manager.unlockReg(lock);
- const overflow_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = 0 });
+ const overflow_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = 0 });
const overflow_lock = func.register_manager.lockRegAssumeUnused(overflow_reg);
defer func.register_manager.unlockReg(overflow_lock);
@@ -2899,7 +3155,6 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
.unsigned => {
_ = try func.addInst(.{
.tag = .sltu,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = overflow_reg,
.rs1 = lhs_reg,
@@ -2921,7 +3176,6 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
64 => {
_ = try func.addInst(.{
.tag = .slt,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = overflow_reg,
.rs1 = overflow_reg,
@@ -2931,7 +3185,6 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
_ = try func.addInst(.{
.tag = .slt,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = rhs_reg,
.rs1 = rhs_reg,
@@ -2941,7 +3194,6 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
_ = try func.addInst(.{
.tag = .xor,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = lhs_reg,
.rs1 = overflow_reg,
@@ -2952,9 +3204,9 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genBinOp(
.cmp_neq,
.{ .register = overflow_reg },
- Type.usize,
+ Type.u64,
.{ .register = rhs_reg },
- Type.usize,
+ Type.u64,
overflow_reg,
);
@@ -3017,61 +3269,34 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
switch (lhs_ty.zigTypeTag(zcu)) {
else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}),
.Int => {
- assert(lhs_ty.eql(rhs_ty, zcu));
- const int_info = lhs_ty.intInfo(zcu);
- switch (int_info.bits) {
- 1...32 => {
- if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) {
- if (int_info.signedness == .unsigned) {
- switch (int_info.bits) {
- 1...8 => {
- const max_val = std.math.pow(u16, 2, int_info.bits) - 1;
-
- const add_reg, const add_lock = try func.promoteReg(lhs_ty, lhs);
- defer if (add_lock) |lock| func.register_manager.unlockReg(lock);
-
- const overflow_reg, const overflow_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(overflow_lock);
-
- _ = try func.addInst(.{
- .tag = .andi,
- .ops = .rri,
- .data = .{ .i_type = .{
- .rd = overflow_reg,
- .rs1 = add_reg,
- .imm12 = Immediate.s(max_val),
- } },
- });
-
- try func.genBinOp(
- .cmp_neq,
- .{ .register = overflow_reg },
- lhs_ty,
- .{ .register = add_reg },
- lhs_ty,
- overflow_reg,
- );
-
- try func.genCopy(
- lhs_ty,
- result_mcv.offset(overflow_off),
- .{ .register = overflow_reg },
- );
-
- break :result result_mcv;
- },
-
- else => return func.fail("TODO: airMulWithOverflow check for size {d}", .{int_info.bits}),
- }
- } else {
- return func.fail("TODO: airMulWithOverflow calculate carry for signed addition", .{});
- }
- } else {
- return func.fail("TODO: airMulWithOverflow with < 8 bits or non-pow of 2", .{});
- }
- },
- else => return func.fail("TODO: airMulWithOverflow larger than 32-bit mul", .{}),
- }
+ if (std.debug.runtime_safety) assert(lhs_ty.eql(rhs_ty, zcu));
+
+ const trunc_reg = try func.copyToTmpRegister(lhs_ty, .{ .register = dest_reg });
+ const trunc_reg_lock = func.register_manager.lockRegAssumeUnused(trunc_reg);
+ defer func.register_manager.unlockReg(trunc_reg_lock);
+
+ const overflow_reg, const overflow_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(overflow_lock);
+
+ // if the result isn't equal after truncating it to the given type,
+ // an overflow must have happened.
+ try func.truncateRegister(func.typeOf(extra.lhs), trunc_reg);
+ try func.genBinOp(
+ .cmp_neq,
+ .{ .register = dest_reg },
+ lhs_ty,
+ .{ .register = trunc_reg },
+ rhs_ty,
+ overflow_reg,
+ );
+
+ try func.genCopy(
+ lhs_ty,
+ result_mcv.offset(overflow_off),
+ .{ .register = overflow_reg },
+ );
+
+ break :result result_mcv;
},
}
};
@@ -3085,12 +3310,6 @@ fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAddSat(func: *Func, inst: Air.Inst.Index) !void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airAddSat", .{});
- return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airSubSat(func: *Func, inst: Air.Inst.Index) !void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airSubSat", .{});
@@ -3300,19 +3519,21 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
};
defer if (pl_lock) |lock| func.register_manager.unlockReg(lock);
- const opt_mcv = try func.allocRegOrMem(opt_ty, inst, true);
+ const opt_mcv = try func.allocRegOrMem(opt_ty, inst, false);
try func.genCopy(pl_ty, opt_mcv, pl_mcv);
if (!same_repr) {
const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
switch (opt_mcv) {
- .load_frame => |frame_addr| try func.genSetMem(
- .{ .frame = frame_addr.index },
- frame_addr.off + pl_abi_size,
- Type.u8,
- .{ .immediate = 1 },
- ),
- .register => return func.fail("TODO: airWrapOption opt_mcv register", .{}),
+ .load_frame => |frame_addr| {
+ try func.genCopy(pl_ty, opt_mcv, pl_mcv);
+ try func.genSetMem(
+ .{ .frame = frame_addr.index },
+ frame_addr.off + pl_abi_size,
+ Type.u8,
+ .{ .immediate = 1 },
+ );
+ },
else => unreachable,
}
}
@@ -3454,7 +3675,7 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void {
if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv;
const dst_mcv = try func.allocRegOrMem(ty, inst, true);
- try func.genCopy(Type.usize, dst_mcv, len_mcv);
+ try func.genCopy(Type.u64, dst_mcv, len_mcv);
break :result dst_mcv;
},
.register_pair => |pair| {
@@ -3463,7 +3684,7 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void {
if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result len_mcv;
const dst_mcv = try func.allocRegOrMem(ty, inst, true);
- try func.genCopy(Type.usize, dst_mcv, len_mcv);
+ try func.genCopy(Type.u64, dst_mcv, len_mcv);
break :result dst_mcv;
},
else => return func.fail("TODO airSliceLen for {}", .{src_mcv}),
@@ -3474,14 +3695,28 @@ fn airSliceLen(func: *Func, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_len_ptr for {}", .{func.target.cpu.arch});
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const src_mcv = try func.resolveInst(ty_op.operand);
+
+ const dst_reg, const dst_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(dst_lock);
+ const dst_mcv: MCValue = .{ .register = dst_reg };
+
+ try func.genCopy(Type.u64, dst_mcv, src_mcv.offset(8));
+ break :result dst_mcv;
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_slice_ptr_ptr for {}", .{func.target.cpu.arch});
- return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+
+ const opt_mcv = try func.resolveInst(ty_op.operand);
+ const dst_mcv = if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
+ opt_mcv
+ else
+ try func.copyToNewRegister(inst, opt_mcv);
+ return func.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void {
@@ -3538,11 +3773,10 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
const addr_reg, const addr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_lock);
- try func.genSetReg(Type.usize, addr_reg, slice_mcv);
+ try func.genSetReg(Type.u64, addr_reg, slice_mcv);
_ = try func.addInst(.{
.tag = .add,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = addr_reg,
.rs1 = addr_reg,
@@ -3576,12 +3810,12 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
.register => {
const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv);
- try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } });
+ try func.genSetReg(Type.u64, addr_reg, .{ .lea_frame = .{ .index = frame_index } });
},
.load_frame => |frame_addr| {
- try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = frame_addr });
+ try func.genSetReg(Type.u64, addr_reg, .{ .lea_frame = frame_addr });
},
- else => try func.genSetReg(Type.usize, addr_reg, array_mcv.address()),
+ else => try func.genSetReg(Type.u64, addr_reg, array_mcv.address()),
}
const dst_mcv = try func.allocRegOrMem(result_ty, inst, false);
@@ -3602,11 +3836,10 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
// we can do a shortcut here where we don't need a vslicedown
// and can just copy to the frame index.
if (!(index_mcv == .immediate and index_mcv.immediate == 0)) {
- const index_reg = try func.copyToTmpRegister(Type.usize, index_mcv);
+ const index_reg = try func.copyToTmpRegister(Type.u64, index_mcv);
_ = try func.addInst(.{
.tag = .vslidedownvx,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = src_reg,
.rs1 = index_reg,
@@ -3624,7 +3857,6 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
defer func.register_manager.unlockReg(offset_lock);
_ = try func.addInst(.{
.tag = .add,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = addr_reg,
.rs1 = addr_reg,
@@ -3640,8 +3872,55 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
+ const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement ptr_elem_val for {}", .{func.target.cpu.arch});
+ const base_ptr_ty = func.typeOf(bin_op.lhs);
+
+ const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else result: {
+ const elem_ty = base_ptr_ty.elemType2(zcu);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+
+ const base_ptr_mcv = try func.resolveInst(bin_op.lhs);
+ const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {
+ .register => |reg| func.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (base_ptr_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const index_mcv = try func.resolveInst(bin_op.rhs);
+ const index_lock: ?RegisterLock = switch (index_mcv) {
+ .register => |reg| func.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (index_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const elem_ptr_reg = if (base_ptr_mcv.isRegister() and func.liveness.operandDies(inst, 0))
+ base_ptr_mcv.register
+ else
+ try func.copyToTmpRegister(base_ptr_ty, base_ptr_mcv);
+ const elem_ptr_lock = func.register_manager.lockRegAssumeUnused(elem_ptr_reg);
+ defer func.register_manager.unlockReg(elem_ptr_lock);
+
+ try func.genBinOp(
+ .ptr_add,
+ base_ptr_mcv,
+ base_ptr_ty,
+ index_mcv,
+ Type.u64,
+ elem_ptr_reg,
+ );
+
+ const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true);
+ const dst_lock = switch (dst_mcv) {
+ .register => |reg| func.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (dst_lock) |lock| func.register_manager.unlockReg(lock);
+
+ try func.load(dst_mcv, .{ .register = elem_ptr_reg }, base_ptr_ty);
+ break :result dst_mcv;
+ };
return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -3651,10 +3930,14 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
- const result = result: {
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const elem_ptr_ty = func.typeOfIndex(inst);
const base_ptr_ty = func.typeOf(extra.lhs);
+ if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) {
+ @panic("audit");
+ }
+
const base_ptr_mcv = try func.resolveInst(extra.lhs);
const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {
.register => |reg| func.register_manager.lockRegAssumeUnused(reg),
@@ -3662,16 +3945,6 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
};
defer if (base_ptr_lock) |lock| func.register_manager.unlockReg(lock);
- if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) {
- break :result if (func.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv))
- base_ptr_mcv
- else
- try func.copyToNewRegister(inst, base_ptr_mcv);
- }
-
- const elem_ty = base_ptr_ty.elemType2(zcu);
- const elem_abi_size = elem_ty.abiSize(pt);
- const index_ty = func.typeOf(extra.rhs);
const index_mcv = try func.resolveInst(extra.rhs);
const index_lock: ?RegisterLock = switch (index_mcv) {
.register => |reg| func.register_manager.lockRegAssumeUnused(reg),
@@ -3679,10 +3952,6 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
};
defer if (index_lock) |lock| func.register_manager.unlockReg(lock);
- const offset_reg = try func.elemOffset(index_ty, index_mcv, elem_abi_size);
- const offset_reg_lock = func.register_manager.lockRegAssumeUnused(offset_reg);
- defer func.register_manager.unlockReg(offset_reg_lock);
-
const result_reg, const result_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(result_lock);
@@ -3690,13 +3959,14 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
.ptr_add,
base_ptr_mcv,
base_ptr_ty,
- .{ .register = offset_reg },
- Type.usize,
+ index_mcv,
+ Type.u64,
result_reg,
);
break :result MCValue{ .register = result_reg };
};
+
return func.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@@ -3729,7 +3999,7 @@ fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
defer func.register_manager.unlockReg(result_lock);
switch (frame_mcv) {
- .load_frame => |frame_addr| {
+ .load_frame => {
if (tag_abi_size <= 8) {
const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
@@ -3739,7 +4009,7 @@ fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
try func.genCopy(
tag_ty,
.{ .register = result_reg },
- .{ .load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off } },
+ frame_mcv.offset(off),
);
} else {
return func.fail(
@@ -3756,7 +4026,57 @@ fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
fn airClz(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airClz for {}", .{func.target.cpu.arch});
+ const operand = try func.resolveInst(ty_op.operand);
+ const ty = func.typeOf(ty_op.operand);
+
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const src_reg, const src_lock = try func.promoteReg(ty, operand);
+ defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const dst_reg: Register = if (func.reuseOperand(
+ inst,
+ ty_op.operand,
+ 0,
+ operand,
+ ) and operand == .register)
+ operand.register
+ else
+ (try func.allocRegOrMem(func.typeOfIndex(inst), inst, true)).register;
+
+ const bit_size = ty.bitSize(func.pt);
+ if (!math.isPowerOfTwo(bit_size)) try func.truncateRegister(ty, src_reg);
+
+ if (bit_size > 64) {
+ return func.fail("TODO: airClz > 64 bits, found {d}", .{bit_size});
+ }
+
+ _ = try func.addInst(.{
+ .tag = switch (bit_size) {
+ 32 => .clzw,
+ else => .clz,
+ },
+ .data = .{
+ .r_type = .{
+ .rs2 = .zero, // rs2 is 0 filled in the spec
+ .rs1 = src_reg,
+ .rd = dst_reg,
+ },
+ },
+ });
+
+ if (!(bit_size == 32 or bit_size == 64)) {
+ _ = try func.addInst(.{
+ .tag = .addi,
+ .data = .{ .i_type = .{
+ .rd = dst_reg,
+ .rs1 = dst_reg,
+ .imm12 = Immediate.s(-@as(i12, @intCast(64 - bit_size % 64))),
+ } },
+ });
+ }
+
+ break :result .{ .register = dst_reg };
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3768,7 +4088,37 @@ fn airCtz(func: *Func, inst: Air.Inst.Index) !void {
fn airPopcount(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airPopcount for {}", .{func.target.cpu.arch});
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const pt = func.pt;
+
+ const operand = try func.resolveInst(ty_op.operand);
+ const src_ty = func.typeOf(ty_op.operand);
+ const operand_reg, const operand_lock = try func.promoteReg(src_ty, operand);
+ defer if (operand_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const dst_reg, const dst_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(dst_lock);
+
+ const bit_size = src_ty.bitSize(pt);
+ switch (bit_size) {
+ 32, 64 => {},
+ 1...31, 33...63 => try func.truncateRegister(src_ty, operand_reg),
+ else => return func.fail("TODO: airPopcount > 64 bits", .{}),
+ }
+
+ _ = try func.addInst(.{
+ .tag = if (bit_size <= 32) .cpopw else .cpop,
+ .data = .{
+ .r_type = .{
+ .rd = dst_reg,
+ .rs1 = operand_reg,
+ .rs2 = @enumFromInt(0b00010), // this is the cpop funct5
+ },
+ },
+ });
+
+ break :result .{ .register = dst_reg };
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3785,6 +4135,13 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
.Int => if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
+ const int_info = scalar_ty.intInfo(zcu);
+ const int_bits = int_info.bits;
+ switch (int_bits) {
+ 32, 64 => {},
+ else => return func.fail("TODO: airAbs Int size {d}", .{int_bits}),
+ }
+
const return_mcv = try func.copyToNewRegister(inst, operand);
const operand_reg = return_mcv.register;
@@ -3792,18 +4149,20 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
defer func.register_manager.unlockReg(temp_lock);
_ = try func.addInst(.{
- .tag = .srai,
- .ops = .rri,
+ .tag = switch (int_bits) {
+ 32 => .sraiw,
+ 64 => .srai,
+ else => unreachable,
+ },
.data = .{ .i_type = .{
.rd = temp_reg,
.rs1 = operand_reg,
- .imm12 = Immediate.u(63),
+ .imm12 = Immediate.u(int_bits - 1),
} },
});
_ = try func.addInst(.{
.tag = .xor,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = operand_reg,
.rs1 = operand_reg,
@@ -3812,8 +4171,11 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
});
_ = try func.addInst(.{
- .tag = .sub,
- .ops = .rrr,
+ .tag = switch (int_bits) {
+ 32 => .subw,
+ 64 => .sub,
+ else => unreachable,
+ },
.data = .{ .r_type = .{
.rd = operand_reg,
.rs1 = operand_reg,
@@ -3825,14 +4187,14 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
},
.Float => {
const float_bits = scalar_ty.floatBits(zcu.getTarget());
- switch (float_bits) {
+ const mnem: Mnemonic = switch (float_bits) {
16 => return func.fail("TODO: airAbs 16-bit float", .{}),
- 32 => {},
- 64 => {},
+ 32 => .fsgnjxs,
+ 64 => .fsgnjxd,
80 => return func.fail("TODO: airAbs 80-bit float", .{}),
128 => return func.fail("TODO: airAbs 128-bit float", .{}),
else => unreachable,
- }
+ };
const return_mcv = try func.copyToNewRegister(inst, operand);
const operand_reg = return_mcv.register;
@@ -3840,13 +4202,12 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
assert(operand_reg.class() == .float);
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_fabs,
+ .tag = mnem,
.data = .{
- .fabs = .{
+ .r_type = .{
.rd = operand_reg,
- .rs = operand_reg,
- .bits = float_bits,
+ .rs1 = operand_reg,
+ .rs2 = operand_reg,
},
},
});
@@ -3869,54 +4230,56 @@ fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void {
const ty = func.typeOf(ty_op.operand);
const operand = try func.resolveInst(ty_op.operand);
- const int_bits = ty.intInfo(zcu).bits;
+ switch (ty.zigTypeTag(zcu)) {
+ .Int => {
+ const int_bits = ty.intInfo(zcu).bits;
- // bytes are no-op
- if (int_bits == 8 and func.reuseOperand(inst, ty_op.operand, 0, operand)) {
- return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
- }
+ // bytes are no-op
+ if (int_bits == 8 and func.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
+ }
- const dest_mcv = try func.copyToNewRegister(inst, operand);
- const dest_reg = dest_mcv.register;
+ const dest_mcv = try func.copyToNewRegister(inst, operand);
+ const dest_reg = dest_mcv.register;
- switch (int_bits) {
- 16 => {
- const temp_reg, const temp_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(temp_lock);
+ switch (int_bits) {
+ 16 => {
+ const temp_reg, const temp_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(temp_lock);
- _ = try func.addInst(.{
- .tag = .srli,
- .ops = .rri,
- .data = .{ .i_type = .{
- .imm12 = Immediate.s(8),
- .rd = temp_reg,
- .rs1 = dest_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .srli,
+ .data = .{ .i_type = .{
+ .imm12 = Immediate.s(8),
+ .rd = temp_reg,
+ .rs1 = dest_reg,
+ } },
+ });
- _ = try func.addInst(.{
- .tag = .slli,
- .ops = .rri,
- .data = .{ .i_type = .{
- .imm12 = Immediate.s(8),
- .rd = dest_reg,
- .rs1 = dest_reg,
- } },
- });
- _ = try func.addInst(.{
- .tag = .@"or",
- .ops = .rri,
- .data = .{ .r_type = .{
- .rd = dest_reg,
- .rs1 = dest_reg,
- .rs2 = temp_reg,
- } },
- });
+ _ = try func.addInst(.{
+ .tag = .slli,
+ .data = .{ .i_type = .{
+ .imm12 = Immediate.s(8),
+ .rd = dest_reg,
+ .rs1 = dest_reg,
+ } },
+ });
+ _ = try func.addInst(.{
+ .tag = .@"or",
+ .data = .{ .r_type = .{
+ .rd = dest_reg,
+ .rs1 = dest_reg,
+ .rs2 = temp_reg,
+ } },
+ });
+ },
+ else => return func.fail("TODO: {d} bits for airByteSwap", .{int_bits}),
+ }
+
+ break :result dest_mcv;
},
- else => return func.fail("TODO: {d} bits for airByteSwap", .{int_bits}),
+ else => return func.fail("TODO: airByteSwap {}", .{ty.fmt(pt)}),
}
-
- break :result dest_mcv;
};
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3960,7 +4323,6 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
.sqrt => {
_ = try func.addInst(.{
.tag = if (operand_bit_size == 64) .fsqrtd else .fsqrts,
- .ops = .rrr,
.data = .{
.r_type = .{
.rd = dst_reg,
@@ -3970,6 +4332,7 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
},
});
},
+
else => return func.fail("TODO: airUnaryMath Float {s}", .{@tagName(tag)}),
}
},
@@ -4054,9 +4417,15 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
const elem_size = elem_ty.abiSize(pt);
const dst_mcv: MCValue = blk: {
- // Pointer is 8 bytes, and if the element is more than that, we cannot reuse it.
- if (elem_size <= 8 and func.reuseOperand(inst, ty_op.operand, 0, ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
+ // The MCValue that holds the pointer can be re-used as the value.
+ // - "ptr" is 8 bytes, and if the element is more than that, we cannot reuse it.
+ //
+ // - "ptr" will be stored in an integer register, so the type that we're gonna
+ // load into it must also be a type that can be inside of an integer register
+ if (elem_size <= 8 and
+ (if (ptr == .register) func.typeRegClass(elem_ty) == ptr.register.class() else true) and
+ func.reuseOperand(inst, ty_op.operand, 0, ptr))
+ {
break :blk ptr;
} else {
break :blk try func.allocRegOrMem(elem_ty, inst, true);
@@ -4090,12 +4459,14 @@ fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro
.register_offset,
.lea_frame,
.lea_symbol,
+ .lea_tlv,
=> try func.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()),
.memory,
.indirect,
.load_symbol,
.load_frame,
+ .load_tlv,
=> {
const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv);
const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg);
@@ -4117,15 +4488,16 @@ fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
const ptr = try func.resolveInst(bin_op.lhs);
const value = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const value_ty = func.typeOf(bin_op.rhs);
- try func.store(ptr, value, ptr_ty, value_ty);
+ try func.store(ptr, value, ptr_ty);
return func.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
/// Loads `value` into the "payload" of `pointer`.
-fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void {
+fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type) !void {
+ const zcu = func.pt.zcu;
+ const src_ty = ptr_ty.childType(zcu);
log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(func.pt), ptr_mcv, ptr_ty.fmt(func.pt) });
switch (ptr_mcv) {
@@ -4141,12 +4513,14 @@ fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty:
.register_offset,
.lea_symbol,
.lea_frame,
+ .lea_tlv,
=> try func.genCopy(src_ty, ptr_mcv.deref(), src_mcv),
.memory,
.indirect,
.load_symbol,
.load_frame,
+ .load_tlv,
=> {
const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv);
const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg);
@@ -4154,7 +4528,7 @@ fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty:
try func.genCopy(src_ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv);
},
- .air_ref => |ptr_ref| try func.store(try func.resolveInst(ptr_ref), src_mcv, ptr_ty, src_ty),
+ .air_ref => |ptr_ref| try func.store(try func.resolveInst(ptr_ref), src_mcv, ptr_ty),
}
}
@@ -4176,17 +4550,14 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const zcu = pt.zcu;
const ptr_field_ty = func.typeOfIndex(inst);
const ptr_container_ty = func.typeOf(operand);
- const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu);
const container_ty = ptr_container_ty.childType(zcu);
- const field_offset: i32 = if (zcu.typeToPackedStruct(container_ty)) |struct_obj|
- if (ptr_field_ty.ptrInfo(zcu).packed_offset.host_size == 0)
- @divExact(pt.structPackedFieldBitOffset(struct_obj, index) +
- ptr_container_ty_info.packed_offset.bit_offset, 8)
- else
- 0
- else
- @intCast(container_ty.structFieldOffset(index, pt));
+ const field_offset: i32 = switch (container_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)),
+ .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+ (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
+ };
const src_mcv = try func.resolveInst(operand);
const dst_mcv = if (switch (src_mcv) {
@@ -4229,7 +4600,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const dst_reg = if (field_off == 0)
(try func.copyToNewRegister(inst, src_mcv)).register
else
- try func.copyToTmpRegister(Type.usize, .{ .register = src_reg });
+ try func.copyToTmpRegister(Type.u64, .{ .register = src_reg });
const dst_mcv: MCValue = .{ .register = dst_reg };
const dst_lock = func.register_manager.lockReg(dst_reg);
@@ -4238,7 +4609,6 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
if (field_off > 0) {
_ = try func.addInst(.{
.tag = .srli,
- .ops = .rri,
.data = .{ .i_type = .{
.imm12 = Immediate.u(@intCast(field_off)),
.rd = dst_reg,
@@ -4289,8 +4659,8 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const hi_mcv =
dst_mcv.address().offset(@intCast(field_bit_size / 64 * 8)).deref();
- try func.genSetReg(Type.usize, tmp_reg, hi_mcv);
- try func.genCopy(Type.usize, hi_mcv, .{ .register = tmp_reg });
+ try func.genSetReg(Type.u64, tmp_reg, hi_mcv);
+ try func.genCopy(Type.u64, hi_mcv, .{ .register = tmp_reg });
}
break :result dst_mcv;
}
@@ -4314,7 +4684,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
const zcu = pt.zcu;
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
- const owner_decl = zcu.funcOwnerDeclIndex(func.func_index);
+ const owner_decl = func.owner.getDecl(zcu);
if (arg.name == .none) return;
const name = func.air.nullTerminatedString(@intFromEnum(arg.name));
@@ -4358,8 +4728,7 @@ fn airArg(func: *Func, inst: Air.Inst.Index) !void {
fn airTrap(func: *Func) !void {
_ = try func.addInst(.{
.tag = .unimp,
- .ops = .none,
- .data = undefined,
+ .data = .none,
});
return func.finishAirBookkeeping();
}
@@ -4367,21 +4736,20 @@ fn airTrap(func: *Func) !void {
fn airBreakpoint(func: *Func) !void {
_ = try func.addInst(.{
.tag = .ebreak,
- .ops = .none,
- .data = undefined,
+ .data = .none,
});
return func.finishAirBookkeeping();
}
fn airRetAddr(func: *Func, inst: Air.Inst.Index) !void {
const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true);
- try func.genCopy(Type.usize, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } });
+ try func.genCopy(Type.u64, dst_mcv, .{ .load_frame = .{ .index = .ret_addr } });
return func.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
fn airFrameAddress(func: *Func, inst: Air.Inst.Index) !void {
const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true);
- try func.genCopy(Type.usize, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } });
+ try func.genCopy(Type.u64, dst_mcv, .{ .lea_frame = .{ .index = .base_ptr } });
return func.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
@@ -4396,15 +4764,11 @@ fn airFence(func: *Func, inst: Air.Inst.Index) !void {
};
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_fence,
- .data = .{
- .fence = .{
- .pred = pred,
- .succ = succ,
- .fm = if (order == .acq_rel) .tso else .none,
- },
- },
+ .tag = if (order == .acq_rel) .fencetso else .fence,
+ .data = .{ .fence = .{
+ .pred = pred,
+ .succ = succ,
+ } },
});
return func.finishAirBookkeeping();
}
@@ -4540,7 +4904,7 @@ fn genCall(
.indirect => |reg_off| {
const ret_ty = Type.fromInterned(fn_info.return_type);
const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
- try func.genSetReg(Type.usize, reg_off.reg, .{
+ try func.genSetReg(Type.u64, reg_off.reg, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
});
call_info.return_value.short = .{ .load_frame = .{ .index = frame_index } };
@@ -4558,7 +4922,7 @@ fn genCall(
dst_reg,
src_arg,
),
- .indirect => |reg_off| try func.genSetReg(Type.usize, reg_off.reg, .{
+ .indirect => |reg_off| try func.genSetReg(Type.u64, reg_off.reg, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
}),
else => return func.fail("TODO: genCall actual set {s}", .{@tagName(dst_arg)}),
@@ -4586,10 +4950,9 @@ fn genCall(
if (func.mod.pic) {
return func.fail("TODO: genCall pic", .{});
} else {
- try func.genSetReg(Type.usize, .ra, .{ .load_symbol = .{ .sym = sym.esym_index } });
+ try func.genSetReg(Type.u64, .ra, .{ .load_symbol = .{ .sym = sym.esym_index } });
_ = try func.addInst(.{
.tag = .jalr,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = .ra,
.rs1 = .ra,
@@ -4603,18 +4966,17 @@ fn genCall(
const owner_decl = zcu.declPtr(extern_func.decl);
const lib_name = extern_func.lib_name.toSlice(&zcu.intern_pool);
const decl_name = owner_decl.name.toSlice(&zcu.intern_pool);
- const atom_index = try func.symbolIndex();
+ const atom_index = try func.owner.getSymbolIndex(func);
- if (func.bin_file.cast(link.File.Elf)) |elf_file| {
- _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_extern_fn_reloc,
- .data = .{ .reloc = .{
- .atom_index = atom_index,
- .sym_index = try elf_file.getGlobalSymbol(decl_name, lib_name),
- } },
- });
- } else unreachable; // not a valid riscv64 format
+ const elf_file = func.bin_file.cast(link.File.Elf).?;
+ _ = try func.addInst(.{
+ .tag = .pseudo_extern_fn_reloc,
+ .data = .{ .reloc = .{
+ .register = .ra,
+ .atom_index = atom_index,
+ .sym_index = try elf_file.getGlobalSymbol(decl_name, lib_name),
+ } },
+ });
},
else => return func.fail("TODO implement calling bitcasted functions", .{}),
}
@@ -4622,11 +4984,10 @@ fn genCall(
assert(func.typeOf(callee).zigTypeTag(zcu) == .Pointer);
const addr_reg, const addr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_lock);
- try func.genSetReg(Type.usize, addr_reg, .{ .air_ref = callee });
+ try func.genSetReg(Type.u64, addr_reg, .{ .air_ref = callee });
_ = try func.addInst(.{
.tag = .jalr,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = .ra,
.rs1 = addr_reg,
@@ -4638,6 +4999,10 @@ fn genCall(
.lib => return func.fail("TODO: lib func calls", .{}),
}
+ // reset the vector settings as they might have changed in the function
+ func.avl = null;
+ func.vtype = null;
+
return call_info.return_value.short;
}
@@ -4683,7 +5048,7 @@ fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
const lock = func.register_manager.lockRegAssumeUnused(reg_off.reg);
defer func.register_manager.unlockReg(lock);
- try func.genSetReg(Type.usize, reg_off.reg, func.ret_mcv.long);
+ try func.genSetReg(Type.u64, reg_off.reg, func.ret_mcv.long);
try func.genSetMem(
.{ .reg = reg_off.reg },
reg_off.off,
@@ -4699,9 +5064,11 @@ fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
// Just add space for an instruction, reloced this later
const index = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_j,
- .data = .{ .inst = undefined },
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
+ .inst = undefined,
+ } },
});
try func.exitlude_jump_relocs.append(func.gpa, index);
@@ -4723,9 +5090,11 @@ fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void {
// Just add space for an instruction, reloced this later
const index = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_j,
- .data = .{ .inst = undefined },
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
+ .inst = undefined,
+ } },
});
try func.exitlude_jump_relocs.append(func.gpa, index);
@@ -4751,14 +5120,14 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
.Enum => lhs_ty.intTagType(zcu),
.Int => lhs_ty,
.Bool => Type.u1,
- .Pointer => Type.usize,
+ .Pointer => Type.u64,
.ErrorSet => Type.anyerror,
.Optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(zcu)) {
- break :blk Type.usize;
+ break :blk Type.u64;
} else {
return func.fail("TODO riscv cmp non-pointer optionals", .{});
}
@@ -4805,8 +5174,7 @@ fn airDbgStmt(func: *Func, inst: Air.Inst.Index) !void {
const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_dbg_line_column,
+ .tag = .pseudo_dbg_line_column,
.data = .{ .pseudo_dbg_line_column = .{
.line = dbg_stmt.line,
.column = dbg_stmt.column,
@@ -4868,7 +5236,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
- try dw.genVarDbgInfo(name, ty, zcu.funcOwnerDeclIndex(func.func_index), is_ptr, loc);
+ try dw.genVarDbgInfo(name, ty, func.owner.getDecl(zcu), is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -4924,7 +5292,6 @@ fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index {
return try func.addInst(.{
.tag = .beq,
- .ops = .rr_inst,
.data = .{
.b_type = .{
.rs1 = cond_reg,
@@ -4955,19 +5322,18 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.dead,
.undef,
.immediate,
- .register_pair,
.register_offset,
.lea_frame,
.lea_symbol,
.reserved_frame,
.air_ref,
- => return func.fail("TODO: hmm {}", .{opt_mcv}),
+ .register_pair,
+ => unreachable,
.register => |opt_reg| {
if (some_info.off == 0) {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_compare,
+ .tag = .pseudo_compare,
.data = .{
.compare = .{
.op = .eq,
@@ -4984,9 +5350,27 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
return return_mcv;
}
assert(some_info.ty.ip_index == .bool_type);
- const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
- _ = opt_abi_size;
- return func.fail("TODO: isNull some_info.off != 0 register", .{});
+ const bit_offset: u7 = @intCast(some_info.off * 8);
+
+ try func.genBinOp(
+ .shr,
+ .{ .register = opt_reg },
+ Type.u64,
+ .{ .immediate = bit_offset },
+ Type.u8,
+ return_reg,
+ );
+ try func.truncateRegister(Type.u8, return_reg);
+ try func.genBinOp(
+ .cmp_eq,
+ .{ .register = return_reg },
+ Type.u64,
+ .{ .immediate = 0 },
+ Type.u8,
+ return_reg,
+ );
+
+ return return_mcv;
},
.load_frame => {
@@ -4998,8 +5382,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
defer func.register_manager.unlockReg(opt_reg_lock);
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_compare,
+ .tag = .pseudo_compare,
.data = .{
.compare = .{
.op = .eq,
@@ -5048,8 +5431,7 @@ fn airIsNonNull(func: *Func, inst: Air.Inst.Index) !void {
assert(result == .register);
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_not,
+ .tag = .pseudo_not,
.data = .{
.rr = .{
.rd = result.register,
@@ -5181,8 +5563,7 @@ fn isNonErr(func: *Func, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC
switch (is_err_res) {
.register => |reg| {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_not,
+ .tag = .pseudo_not,
.data = .{
.rr = .{
.rd = reg,
@@ -5249,11 +5630,11 @@ fn airLoop(func: *Func, inst: Air.Inst.Index) !void {
/// Send control flow to the `index` of `func.code`.
fn jump(func: *Func, index: Mir.Inst.Index) !Mir.Inst.Index {
return func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_j,
- .data = .{
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
.inst = index,
- },
+ } },
});
}
@@ -5296,7 +5677,6 @@ fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) !
fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const condition = try func.resolveInst(pl_op.operand);
const condition_ty = func.typeOf(pl_op.operand);
const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload);
var extra_index: usize = switch_br.end;
@@ -5304,6 +5684,8 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void {
const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1);
defer func.gpa.free(liveness.deaths);
+ const condition = try func.resolveInst(pl_op.operand);
+
// If the condition dies here in this switch instruction, process
// that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches
@@ -5326,9 +5708,14 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void {
defer func.gpa.free(relocs);
for (items, relocs, 0..) |item, *reloc, i| {
- // switch branches must be comptime-known, so this is stored in an immediate
const item_mcv = try func.resolveInst(item);
+ const cond_lock = switch (condition) {
+ .register => func.register_manager.lockRegAssumeUnused(condition.register),
+ else => null,
+ };
+ defer if (cond_lock) |lock| func.register_manager.unlockReg(lock);
+
const cmp_reg, const cmp_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(cmp_lock);
@@ -5343,8 +5730,7 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void {
if (!(i < relocs.len - 1)) {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_not,
+ .tag = .pseudo_not,
.data = .{ .rr = .{
.rd = cmp_reg,
.rs = cmp_reg,
@@ -5391,18 +5777,14 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void {
fn performReloc(func: *Func, inst: Mir.Inst.Index) void {
const tag = func.mir_instructions.items(.tag)[inst];
- const ops = func.mir_instructions.items(.ops)[inst];
const target: Mir.Inst.Index = @intCast(func.mir_instructions.len);
switch (tag) {
- .bne,
.beq,
+ .bne,
=> func.mir_instructions.items(.data)[inst].b_type.inst = target,
.jal => func.mir_instructions.items(.data)[inst].j_type.inst = target,
- .pseudo => switch (ops) {
- .pseudo_j => func.mir_instructions.items(.data)[inst].inst = target,
- else => std.debug.panic("TODO: performReloc {s}", .{@tagName(ops)}),
- },
+ .pseudo_j => func.mir_instructions.items(.data)[inst].j_type.inst = target,
else => std.debug.panic("TODO: performReloc {s}", .{@tagName(tag)}),
}
}
@@ -5489,7 +5871,6 @@ fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void {
_ = try func.addInst(.{
.tag = if (tag == .bool_or) .@"or" else .@"and",
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = result_reg,
.rs1 = lhs_reg,
@@ -5501,7 +5882,6 @@ fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void {
if (func.wantSafety()) {
_ = try func.addInst(.{
.tag = .andi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = result_reg,
.rs1 = result_reg,
@@ -5518,7 +5898,6 @@ fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void {
fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref =
@@ -5527,87 +5906,444 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
- const dead = !is_volatile and func.liveness.isUnused(inst);
- const result: MCValue = if (dead) .unreach else result: {
- if (outputs.len > 1) {
- return func.fail("TODO implement codegen for asm with more than 1 output", .{});
- }
+ var result: MCValue = .none;
+ var args = std.ArrayList(MCValue).init(func.gpa);
+ try args.ensureTotalCapacity(outputs.len + inputs.len);
+ defer {
+ for (args.items) |arg| if (arg.getReg()) |reg| func.register_manager.unlockReg(.{
+ .tracked_index = RegisterManager.indexOfRegIntoTracked(reg) orelse continue,
+ });
+ args.deinit();
+ }
+ var arg_map = std.StringHashMap(u8).init(func.gpa);
+ try arg_map.ensureTotalCapacity(@intCast(outputs.len + inputs.len));
+ defer arg_map.deinit();
+
+ var outputs_extra_i = extra_i;
+ for (outputs) |output| {
+ const extra_bytes = mem.sliceAsBytes(func.air.extra[extra_i..]);
+ const constraint = mem.sliceTo(mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
+ const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const is_read = switch (constraint[0]) {
+ '=' => false,
+ '+' => read: {
+ if (output == .none) return func.fail(
+ "read-write constraint unsupported for asm result: '{s}'",
+ .{constraint},
+ );
+ break :read true;
+ },
+ else => return func.fail("invalid constraint: '{s}'", .{constraint}),
+ };
+ const is_early_clobber = constraint[1] == '&';
+ const rest = constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
+ const arg_mcv: MCValue = arg_mcv: {
+ const arg_maybe_reg: ?Register = if (mem.eql(u8, rest, "m"))
+ if (output != .none) null else return func.fail(
+ "memory constraint unsupported for asm result: '{s}'",
+ .{constraint},
+ )
+ else if (mem.startsWith(u8, rest, "{") and mem.endsWith(u8, rest, "}"))
+ parseRegName(rest["{".len .. rest.len - "}".len]) orelse
+ return func.fail("invalid register constraint: '{s}'", .{constraint})
+ else if (rest.len == 1 and std.ascii.isDigit(rest[0])) {
+ const index = std.fmt.charToDigit(rest[0], 10) catch unreachable;
+ if (index >= args.items.len) return func.fail("constraint out of bounds: '{s}'", .{
+ constraint,
+ });
+ break :arg_mcv args.items[index];
+ } else return func.fail("invalid constraint: '{s}'", .{constraint});
+ break :arg_mcv if (arg_maybe_reg) |reg| .{ .register = reg } else arg: {
+ const ptr_mcv = try func.resolveInst(output);
+ switch (ptr_mcv) {
+ .immediate => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |_|
+ break :arg ptr_mcv.deref(),
+ .register, .register_offset, .lea_frame => break :arg ptr_mcv.deref(),
+ else => {},
+ }
+ break :arg .{ .indirect = .{ .reg = try func.copyToTmpRegister(Type.usize, ptr_mcv) } };
+ };
+ };
+ if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |_| {
+ _ = func.register_manager.lockReg(reg);
+ };
+ if (!mem.eql(u8, name, "_"))
+ arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
+ args.appendAssumeCapacity(arg_mcv);
+ if (output == .none) result = arg_mcv;
+ if (is_read) try func.load(arg_mcv, .{ .air_ref = output }, func.typeOf(output));
+ }
- const output_constraint: ?[]const u8 = for (outputs) |output| {
- if (output != .none) {
- return func.fail("TODO implement codegen for non-expr asm", .{});
+ for (inputs) |input| {
+ const input_bytes = mem.sliceAsBytes(func.air.extra[extra_i..]);
+ const constraint = mem.sliceTo(input_bytes, 0);
+ const name = mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const ty = func.typeOf(input);
+ const input_mcv = try func.resolveInst(input);
+ const arg_mcv: MCValue = if (mem.eql(u8, constraint, "X"))
+ input_mcv
+ else if (mem.startsWith(u8, constraint, "{") and mem.endsWith(u8, constraint, "}")) arg: {
+ const reg = parseRegName(constraint["{".len .. constraint.len - "}".len]) orelse
+ return func.fail("invalid register constraint: '{s}'", .{constraint});
+ try func.register_manager.getReg(reg, null);
+ try func.genSetReg(ty, reg, input_mcv);
+ break :arg .{ .register = reg };
+ } else if (mem.eql(u8, constraint, "r")) arg: {
+ switch (input_mcv) {
+ .register => break :arg input_mcv,
+ else => {},
}
- const extra_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
- const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
-
- break constraint;
- } else null;
+ const temp_reg = try func.copyToTmpRegister(ty, input_mcv);
+ break :arg .{ .register = temp_reg };
+ } else return func.fail("invalid input constraint: '{s}'", .{constraint});
+ if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |_| {
+ _ = func.register_manager.lockReg(reg);
+ };
+ if (!mem.eql(u8, name, "_"))
+ arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
+ args.appendAssumeCapacity(arg_mcv);
+ }
- for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(func.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(input_bytes, 0);
- const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ {
+ var clobber_i: u32 = 0;
+ while (clobber_i < clobbers_len) : (clobber_i += 1) {
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+ extra_i += clobber.len / 4 + 1;
- if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
- return func.fail("unrecognized asm input constraint: '{s}'", .{constraint});
+ if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) {
+ // nothing really to do
+ } else {
+ try func.register_manager.getReg(parseRegName(clobber) orelse
+ return func.fail("invalid clobber: '{s}'", .{clobber}), null);
}
- const reg_name = constraint[1 .. constraint.len - 1];
- const reg = parseRegName(reg_name) orelse
- return func.fail("unrecognized register: '{s}'", .{reg_name});
-
- const arg_mcv = try func.resolveInst(input);
- try func.register_manager.getReg(reg, null);
- try func.genSetReg(func.typeOf(input), reg, arg_mcv);
}
+ }
- {
- var clobber_i: u32 = 0;
- while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += clobber.len / 4 + 1;
+ const Label = struct {
+ target: Mir.Inst.Index = undefined,
+ pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
+
+ const Kind = enum { definition, reference };
+
+ fn isValid(kind: Kind, name: []const u8) bool {
+ for (name, 0..) |c, i| switch (c) {
+ else => return false,
+ '$' => if (i == 0) return false,
+ '.' => {},
+ '0'...'9' => if (i == 0) switch (kind) {
+ .definition => if (name.len != 1) return false,
+ .reference => {
+ if (name.len != 2) return false;
+ switch (name[1]) {
+ else => return false,
+ 'B', 'F', 'b', 'f' => {},
+ }
+ },
+ },
+ '@', 'A'...'Z', '_', 'a'...'z' => {},
+ };
+ return name.len > 0;
+ }
+ };
+ var labels: std.StringHashMapUnmanaged(Label) = .{};
+ defer {
+ var label_it = labels.valueIterator();
+ while (label_it.next()) |label| label.pending_relocs.deinit(func.gpa);
+ labels.deinit(func.gpa);
+ }
- if (std.mem.eql(u8, clobber, "") or std.mem.eql(u8, clobber, "memory")) {
- // nothing really to do
- } else {
- try func.register_manager.getReg(parseRegName(clobber) orelse
- return func.fail("invalid clobber: '{s}'", .{clobber}), null);
- }
+ const asm_source = std.mem.sliceAsBytes(func.air.extra[extra_i..])[0..extra.data.source_len];
+ var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;");
+ next_line: while (line_it.next()) |line| {
+ var mnem_it = mem.tokenizeAny(u8, line, " \t");
+ const mnem_str = while (mnem_it.next()) |mnem_str| {
+ if (mem.startsWith(u8, mnem_str, "#")) continue :next_line;
+ if (mem.startsWith(u8, mnem_str, "//")) continue :next_line;
+ if (!mem.endsWith(u8, mnem_str, ":")) break mnem_str;
+ const label_name = mnem_str[0 .. mnem_str.len - ":".len];
+ if (!Label.isValid(.definition, label_name))
+ return func.fail("invalid label: '{s}'", .{label_name});
+
+ const label_gop = try labels.getOrPut(func.gpa, label_name);
+ if (!label_gop.found_existing) label_gop.value_ptr.* = .{} else {
+ const anon = std.ascii.isDigit(label_name[0]);
+ if (!anon and label_gop.value_ptr.pending_relocs.items.len == 0)
+ return func.fail("redefined label: '{s}'", .{label_name});
+ for (label_gop.value_ptr.pending_relocs.items) |pending_reloc|
+ func.performReloc(pending_reloc);
+ if (anon)
+ label_gop.value_ptr.pending_relocs.clearRetainingCapacity()
+ else
+ label_gop.value_ptr.pending_relocs.clearAndFree(func.gpa);
}
- }
+ label_gop.value_ptr.target = @intCast(func.mir_instructions.len);
+ } else continue;
+
+ const instruction: union(enum) { mnem: Mnemonic, pseudo: Pseudo } =
+ if (std.meta.stringToEnum(Mnemonic, mnem_str)) |mnem|
+ .{ .mnem = mnem }
+ else if (std.meta.stringToEnum(Pseudo, mnem_str)) |pseudo|
+ .{ .pseudo = pseudo }
+ else
+ return func.fail("invalid mnem str '{s}'", .{mnem_str});
+
+ const Operand = union(enum) {
+ none,
+ reg: Register,
+ imm: Immediate,
+ inst: Mir.Inst.Index,
+ sym: SymbolOffset,
+ };
- const asm_source = std.mem.sliceAsBytes(func.air.extra[extra_i..])[0..extra.data.source_len];
+ var ops: [4]Operand = .{.none} ** 4;
+ var last_op = false;
+ var op_it = mem.splitAny(u8, mnem_it.rest(), ",(");
+ next_op: for (&ops) |*op| {
+ const op_str = while (!last_op) {
+ const full_str = op_it.next() orelse break :next_op;
+ const code_str = if (mem.indexOfScalar(u8, full_str, '#') orelse
+ mem.indexOf(u8, full_str, "//")) |comment|
+ code: {
+ last_op = true;
+ break :code full_str[0..comment];
+ } else full_str;
+ const trim_str = mem.trim(u8, code_str, " \t*");
+ if (trim_str.len > 0) break trim_str;
+ } else break;
+
+ if (parseRegName(op_str)) |reg| {
+ op.* = .{ .reg = reg };
+ } else if (std.fmt.parseInt(i12, op_str, 10)) |int| {
+ op.* = .{ .imm = Immediate.s(int) };
+ } else |_| if (mem.startsWith(u8, op_str, "%[")) {
+ const mod_index = mem.indexOf(u8, op_str, "]@");
+ const modifier = if (mod_index) |index|
+ op_str[index + "]@".len ..]
+ else
+ "";
+
+ op.* = switch (args.items[
+ arg_map.get(op_str["%[".len .. mod_index orelse op_str.len - "]".len]) orelse
+ return func.fail("no matching constraint: '{s}'", .{op_str})
+ ]) {
+ .load_symbol => |sym_off| if (mem.eql(u8, modifier, "plt")) blk: {
+ assert(sym_off.off == 0);
+ break :blk .{ .sym = sym_off };
+ } else return func.fail("invalid modifier: '{s}'", .{modifier}),
+ .register => |reg| if (modifier.len == 0)
+ .{ .reg = reg }
+ else
+ return func.fail("invalid modified '{s}'", .{modifier}),
+ else => return func.fail("invalid constraint: '{s}'", .{op_str}),
+ };
+ } else if (mem.endsWith(u8, op_str, ")")) {
+ const reg = op_str[0 .. op_str.len - ")".len];
+ const addr_reg = parseRegName(reg) orelse
+ return func.fail("expected valid register, found '{s}'", .{reg});
+
+ op.* = .{ .reg = addr_reg };
+ } else if (Label.isValid(.reference, op_str)) {
+ const anon = std.ascii.isDigit(op_str[0]);
+ const label_gop = try labels.getOrPut(func.gpa, op_str[0..if (anon) 1 else op_str.len]);
+ if (!label_gop.found_existing) label_gop.value_ptr.* = .{};
+ if (anon and (op_str[1] == 'b' or op_str[1] == 'B') and !label_gop.found_existing)
+ return func.fail("undefined label: '{s}'", .{op_str});
+ const pending_relocs = &label_gop.value_ptr.pending_relocs;
+ if (if (anon)
+ op_str[1] == 'f' or op_str[1] == 'F'
+ else
+ !label_gop.found_existing or pending_relocs.items.len > 0)
+ try pending_relocs.append(func.gpa, @intCast(func.mir_instructions.len));
+ op.* = .{ .inst = label_gop.value_ptr.target };
+ } else return func.fail("invalid operand: '{s}'", .{op_str});
+ } else if (op_it.next()) |op_str| return func.fail("extra operand: '{s}'", .{op_str});
+
+ switch (instruction) {
+ .mnem => |mnem| {
+ _ = (switch (ops[0]) {
+ .none => try func.addInst(.{
+ .tag = mnem,
+ .data = .none,
+ }),
+ .reg => |reg1| switch (ops[1]) {
+ .reg => |reg2| switch (ops[2]) {
+ .imm => |imm1| try func.addInst(.{
+ .tag = mnem,
+ .data = .{ .i_type = .{
+ .rd = reg1,
+ .rs1 = reg2,
+ .imm12 = imm1,
+ } },
+ }),
+ else => error.InvalidInstruction,
+ },
+ .imm => |imm1| switch (ops[2]) {
+ .reg => |reg2| switch (mnem) {
+ .sd => try func.addInst(.{
+ .tag = mnem,
+ .data = .{ .i_type = .{
+ .rd = reg2,
+ .rs1 = reg1,
+ .imm12 = imm1,
+ } },
+ }),
+ .ld => try func.addInst(.{
+ .tag = mnem,
+ .data = .{ .i_type = .{
+ .rd = reg1,
+ .rs1 = reg2,
+ .imm12 = imm1,
+ } },
+ }),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ .none => switch (mnem) {
+ .jalr => try func.addInst(.{
+ .tag = mnem,
+ .data = .{ .i_type = .{
+ .rd = .ra,
+ .rs1 = reg1,
+ .imm12 = Immediate.s(0),
+ } },
+ }),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ }) catch |err| {
+ switch (err) {
+ error.InvalidInstruction => return func.fail(
+ "invalid instruction: {s} {s} {s} {s} {s}",
+ .{
+ @tagName(mnem),
+ @tagName(ops[0]),
+ @tagName(ops[1]),
+ @tagName(ops[2]),
+ @tagName(ops[3]),
+ },
+ ),
+ else => |e| return e,
+ }
+ };
+ },
+ .pseudo => |pseudo| {
+ (@as(error{InvalidInstruction}!void, switch (pseudo) {
+ .li => blk: {
+ if (ops[0] != .reg or ops[1] != .imm) {
+ break :blk error.InvalidInstruction;
+ }
- if (std.meta.stringToEnum(Mir.Inst.Tag, asm_source)) |tag| {
- _ = try func.addInst(.{
- .tag = tag,
- .ops = .none,
- .data = undefined,
- });
- } else {
- return func.fail("TODO: asm_source {s}", .{asm_source});
- }
+ const reg = ops[0].reg;
+ const imm = ops[1].imm;
- if (output_constraint) |output| {
- if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
- return func.fail("unrecognized asm output constraint: '{s}'", .{output});
- }
- const reg_name = output[2 .. output.len - 1];
- const reg = parseRegName(reg_name) orelse
- return func.fail("unrecognized register: '{s}'", .{reg_name});
- break :result .{ .register = reg };
- } else {
- break :result .{ .none = {} };
+ try func.genSetReg(Type.usize, reg, .{ .immediate = imm.asBits(u64) });
+ },
+ .mv => blk: {
+ if (ops[0] != .reg or ops[1] != .reg) {
+ break :blk error.InvalidInstruction;
+ }
+
+ const dst = ops[0].reg;
+ const src = ops[1].reg;
+
+ if (dst.class() != .int or src.class() != .int) {
+ return func.fail("pseudo instruction 'mv' only works on integer registers", .{});
+ }
+
+ try func.genSetReg(Type.usize, dst, .{ .register = src });
+ },
+ .tail => blk: {
+ if (ops[0] != .sym) {
+ break :blk error.InvalidInstruction;
+ }
+
+ const sym_offset = ops[0].sym;
+ assert(sym_offset.off == 0);
+
+ const random_link_reg, const lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(lock);
+
+ _ = try func.addInst(.{
+ .tag = .pseudo_extern_fn_reloc,
+ .data = .{ .reloc = .{
+ .register = random_link_reg,
+ .atom_index = try func.owner.getSymbolIndex(func),
+ .sym_index = sym_offset.sym,
+ } },
+ });
+ },
+ .ret => _ = try func.addInst(.{
+ .tag = .jalr,
+ .data = .{ .i_type = .{
+ .rd = .zero,
+ .rs1 = .ra,
+ .imm12 = Immediate.s(0),
+ } },
+ }),
+ .beqz => blk: {
+ if (ops[0] != .reg or ops[1] != .inst) {
+ break :blk error.InvalidInstruction;
+ }
+
+ _ = try func.addInst(.{
+ .tag = .beq,
+ .data = .{ .b_type = .{
+ .rs1 = ops[0].reg,
+ .rs2 = .zero,
+ .inst = ops[1].inst,
+ } },
+ });
+ },
+ })) catch |err| {
+ switch (err) {
+ error.InvalidInstruction => return func.fail(
+ "invalid instruction: {s} {s} {s} {s} {s}",
+ .{
+ @tagName(pseudo),
+ @tagName(ops[0]),
+ @tagName(ops[1]),
+ @tagName(ops[2]),
+ @tagName(ops[3]),
+ },
+ ),
+ else => |e| return e,
+ }
+ };
+ },
}
- };
+ }
+
+ var label_it = labels.iterator();
+ while (label_it.next()) |label| if (label.value_ptr.pending_relocs.items.len > 0)
+ return func.fail("undefined label: '{s}'", .{label.key_ptr.*});
+
+ for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
+ const extra_bytes = mem.sliceAsBytes(func.air.extra[outputs_extra_i..]);
+ const constraint =
+ mem.sliceTo(mem.sliceAsBytes(func.air.extra[outputs_extra_i..]), 0);
+ const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ outputs_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (output == .none) continue;
+ if (arg_mcv != .register) continue;
+ if (constraint.len == 2 and std.ascii.isDigit(constraint[1])) continue;
+ try func.store(.{ .air_ref = output }, arg_mcv, func.typeOf(output));
+ }
simple: {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
@@ -5668,12 +6404,19 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
ty,
src_mcv,
),
+ .load_tlv => {
+ const addr_reg, const addr_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(addr_lock);
+
+ try func.genSetReg(ty, addr_reg, dst_mcv.address());
+ try func.genCopy(ty, .{ .indirect = .{ .reg = addr_reg } }, src_mcv);
+ },
.memory => return func.fail("TODO: genCopy memory", .{}),
.register_pair => |dst_regs| {
const src_info: ?struct { addr_reg: Register, addr_lock: ?RegisterLock } = switch (src_mcv) {
.register_pair, .memory, .indirect, .load_frame => null,
.load_symbol => src: {
- const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.usize, src_mcv.address());
+ const src_addr_reg, const src_addr_lock = try func.promoteReg(Type.u64, src_mcv.address());
errdefer func.register_manager.unlockReg(src_addr_lock);
break :src .{ .addr_reg = src_addr_reg, .addr_lock = src_addr_lock };
@@ -5725,14 +6468,23 @@ fn genInlineMemcpy(
const src = regs[2];
const dst = regs[3];
- try func.genSetReg(Type.usize, count, len);
- try func.genSetReg(Type.usize, src, src_ptr);
- try func.genSetReg(Type.usize, dst, dst_ptr);
+ try func.genSetReg(Type.u64, count, len);
+ try func.genSetReg(Type.u64, src, src_ptr);
+ try func.genSetReg(Type.u64, dst, dst_ptr);
+
+ // if count is 0, there's nothing to copy
+ _ = try func.addInst(.{
+ .tag = .beq,
+ .data = .{ .b_type = .{
+ .rs1 = count,
+ .rs2 = .zero,
+ .inst = @intCast(func.mir_instructions.len + 9),
+ } },
+ });
// lb tmp, 0(src)
const first_inst = try func.addInst(.{
.tag = .lb,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = tmp,
@@ -5745,7 +6497,6 @@ fn genInlineMemcpy(
// sb tmp, 0(dst)
_ = try func.addInst(.{
.tag = .sb,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst,
@@ -5758,7 +6509,6 @@ fn genInlineMemcpy(
// dec count by 1
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = count,
@@ -5771,7 +6521,6 @@ fn genInlineMemcpy(
// branch if count is 0
_ = try func.addInst(.{
.tag = .beq,
- .ops = .rr_inst,
.data = .{
.b_type = .{
.inst = @intCast(func.mir_instructions.len + 4), // points after the last inst
@@ -5784,7 +6533,6 @@ fn genInlineMemcpy(
// increment the pointers
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = src,
@@ -5796,7 +6544,6 @@ fn genInlineMemcpy(
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst,
@@ -5808,9 +6555,11 @@ fn genInlineMemcpy(
// jump back to start of loop
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_j,
- .data = .{ .inst = first_inst },
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
+ .inst = first_inst,
+ } },
});
}
@@ -5828,14 +6577,13 @@ fn genInlineMemset(
const src = regs[1];
const dst = regs[2];
- try func.genSetReg(Type.usize, count, len);
- try func.genSetReg(Type.usize, src, src_value);
- try func.genSetReg(Type.usize, dst, dst_ptr);
+ try func.genSetReg(Type.u64, count, len);
+ try func.genSetReg(Type.u64, src, src_value);
+ try func.genSetReg(Type.u64, dst, dst_ptr);
// sb src, 0(dst)
const first_inst = try func.addInst(.{
.tag = .sb,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst,
@@ -5848,7 +6596,6 @@ fn genInlineMemset(
// dec count by 1
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = count,
@@ -5861,7 +6608,6 @@ fn genInlineMemset(
// branch if count is 0
_ = try func.addInst(.{
.tag = .beq,
- .ops = .rr_inst,
.data = .{
.b_type = .{
.inst = @intCast(func.mir_instructions.len + 4), // points after the last inst
@@ -5874,7 +6620,6 @@ fn genInlineMemset(
// increment the pointers
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{
.i_type = .{
.rd = dst,
@@ -5886,11 +6631,11 @@ fn genInlineMemset(
// jump back to start of loop
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_j,
- .data = .{
+ .tag = .pseudo_j,
+ .data = .{ .j_type = .{
+ .rd = .zero,
.inst = first_inst,
- },
+ } },
});
}
@@ -5936,7 +6681,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
if (math.minInt(i12) <= x and x <= math.maxInt(i12)) {
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = .zero,
@@ -5950,7 +6694,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
_ = try func.addInst(.{
.tag = .lui,
- .ops = .ri,
.data = .{ .u_type = .{
.rd = reg,
.imm20 = Immediate.s(hi20),
@@ -5958,7 +6701,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
});
_ = try func.addInst(.{
.tag = .addi,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = reg,
@@ -5981,7 +6723,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
_ = try func.addInst(.{
.tag = .slli,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = reg,
@@ -5991,7 +6732,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
_ = try func.addInst(.{
.tag = .add,
- .ops = .rrr,
.data = .{ .r_type = .{
.rd = reg,
.rs1 = reg,
@@ -6028,15 +6768,16 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
// mv reg, src_reg
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_mv,
+ .tag = .pseudo_mv,
.data = .{ .rr = .{
.rd = reg,
.rs = src_reg,
} },
});
},
- .register_pair => return func.fail("genSetReg should we allow reg -> reg_pair?", .{}),
+ // useful in cases like slice_ptr, which can easily reuse the operand
+ // but we need to get only the pointer out.
+ .register_pair => |pair| try func.genSetReg(ty, reg, .{ .register = pair[0] }),
.load_frame => |frame| {
if (reg.class() == .vector) {
// vectors don't support an offset memory load so we need to put the true
@@ -6048,8 +6789,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
try func.genCopy(ty, .{ .register = reg }, .{ .indirect = .{ .reg = addr_reg } });
} else {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
+ .tag = .pseudo_load_rm,
.data = .{ .rm = .{
.r = reg,
.m = .{
@@ -6069,7 +6809,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
_ = try func.addInst(.{
.tag = .ld,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = reg,
@@ -6079,8 +6818,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
},
.lea_frame, .register_offset => {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_lea_rm,
+ .tag = .pseudo_lea_rm,
.data = .{
.rm = .{
.r = reg,
@@ -6108,7 +6846,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
});
},
.indirect => |reg_off| {
- const load_tag: Mir.Inst.Tag = switch (reg.class()) {
+ const load_tag: Mnemonic = switch (reg.class()) {
.float => switch (abi_size) {
1 => unreachable, // Zig does not support 8-bit floats
2 => return func.fail("TODO: genSetReg indirect 16-bit float", .{}),
@@ -6147,8 +6885,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
});
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
+ .tag = .pseudo_load_rm,
.data = .{ .rm = .{
.r = reg,
.m = .{
@@ -6168,7 +6905,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
_ = try func.addInst(.{
.tag = load_tag,
- .ops = .rri,
.data = .{ .i_type = .{
.rd = reg,
.rs1 = reg_off.reg,
@@ -6178,16 +6914,15 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
},
.lea_symbol => |sym_off| {
assert(sym_off.off == 0);
- const atom_index = try func.symbolIndex();
+ const atom_index = try func.owner.getSymbolIndex(func);
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_load_symbol,
- .data = .{ .payload = try func.addExtra(Mir.LoadSymbolPayload{
- .register = reg.encodeId(),
+ .tag = .pseudo_load_symbol,
+ .data = .{ .reloc = .{
+ .register = reg,
.atom_index = atom_index,
.sym_index = sym_off.sym,
- }) },
+ } },
});
},
.load_symbol => {
@@ -6197,6 +6932,25 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
try func.genSetReg(ty, addr_reg, src_mcv.address());
try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } });
},
+ .lea_tlv => |sym| {
+ const atom_index = try func.owner.getSymbolIndex(func);
+
+ _ = try func.addInst(.{
+ .tag = .pseudo_load_tlv,
+ .data = .{ .reloc = .{
+ .register = reg,
+ .atom_index = atom_index,
+ .sym_index = sym,
+ } },
+ });
+ },
+ .load_tlv => {
+ const addr_reg, const addr_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(addr_lock);
+
+ try func.genSetReg(ty, addr_reg, src_mcv.address());
+ try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } });
+ },
.air_ref => |ref| try func.genSetReg(ty, reg, try func.resolveInst(ref)),
else => return func.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}),
}
@@ -6216,7 +6970,6 @@ fn genSetMem(
const dst_ptr_mcv: MCValue = switch (base) {
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
- .reloc => |base_symbol| .{ .lea_symbol = .{ .sym = base_symbol.sym_index, .off = disp } },
};
switch (src_mcv) {
.none,
@@ -6260,7 +7013,7 @@ fn genSetMem(
},
.register => |reg| {
if (reg.class() == .vector) {
- const addr_reg = try func.copyToTmpRegister(Type.usize, dst_ptr_mcv);
+ const addr_reg = try func.copyToTmpRegister(Type.u64, dst_ptr_mcv);
const num_elem = ty.vectorLen(zcu);
const elem_size = ty.childType(zcu).bitSize(pt);
@@ -6279,8 +7032,7 @@ fn genSetMem(
});
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_store_rm,
+ .tag = .pseudo_store_rm,
.data = .{ .rm = .{
.r = reg,
.m = .{
@@ -6319,8 +7071,7 @@ fn genSetMem(
}));
const frame_mcv: MCValue = .{ .load_frame = .{ .index = frame_index } };
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_store_rm,
+ .tag = .pseudo_store_rm,
.data = .{ .rm = .{
.r = reg,
.m = .{
@@ -6335,8 +7086,7 @@ fn genSetMem(
try func.genSetMem(base, disp, ty, frame_mcv);
try func.freeValue(frame_mcv);
} else _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_store_rm,
+ .tag = .pseudo_store_rm,
.data = .{ .rm = .{
.r = reg,
.m = .{
@@ -6366,6 +7116,7 @@ fn genSetMem(
return func.genSetMem(base, disp, ty, .{ .register = reg });
},
.air_ref => |src_ref| try func.genSetMem(base, disp, ty, try func.resolveInst(src_ref)),
+ else => return func.fail("TODO: genSetMem {s}", .{@tagName(src_mcv)}),
}
}
@@ -6398,7 +7149,7 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
- const dst_mcv = if (dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and
+ const dst_mcv = if (dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and src_mcv != .register_pair and
func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true);
try func.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
@@ -6437,7 +7188,7 @@ fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = frame_index },
@intCast(ptr_ty.abiSize(pt)),
- Type.usize,
+ Type.u64,
.{ .immediate = array_len },
);
@@ -6447,107 +7198,409 @@ fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void {
fn airFloatFromInt(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airFloatFromInt for {}", .{
- func.target.cpu.arch,
- });
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const pt = func.pt;
+ const zcu = pt.zcu;
+
+ const operand = try func.resolveInst(ty_op.operand);
+
+ const src_ty = func.typeOf(ty_op.operand);
+ const dst_ty = ty_op.ty.toType();
+
+ const src_reg, const src_lock = try func.promoteReg(src_ty, operand);
+ defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const is_unsigned = dst_ty.isUnsignedInt(zcu);
+ const src_bits = src_ty.bitSize(pt);
+ const dst_bits = dst_ty.bitSize(pt);
+
+ switch (src_bits) {
+ 32, 64 => {},
+ else => try func.truncateRegister(src_ty, src_reg),
+ }
+
+ const int_mod: Mir.FcvtOp = switch (src_bits) {
+ 8, 16, 32 => if (is_unsigned) .wu else .w,
+ 64 => if (is_unsigned) .lu else .l,
+ else => return func.fail("TODO: airFloatFromInt src size: {d}", .{src_bits}),
+ };
+
+ const float_mod: enum { s, d } = switch (dst_bits) {
+ 32 => .s,
+ 64 => .d,
+ else => return func.fail("TODO: airFloatFromInt dst size {d}", .{dst_bits}),
+ };
+
+ const dst_reg, const dst_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(dst_lock);
+
+ _ = try func.addInst(.{
+ .tag = switch (float_mod) {
+ .s => switch (int_mod) {
+ .l => .fcvtsl,
+ .lu => .fcvtslu,
+ .w => .fcvtsw,
+ .wu => .fcvtswu,
+ },
+ .d => switch (int_mod) {
+ .l => .fcvtdl,
+ .lu => .fcvtdlu,
+ .w => .fcvtdw,
+ .wu => .fcvtdwu,
+ },
+ },
+ .data = .{ .rr = .{
+ .rd = dst_reg,
+ .rs = src_reg,
+ } },
+ });
+
+ break :result .{ .register = dst_reg };
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntFromFloat(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airIntFromFloat for {}", .{
- func.target.cpu.arch,
- });
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const pt = func.pt;
+ const zcu = pt.zcu;
+
+ const operand = try func.resolveInst(ty_op.operand);
+ const src_ty = func.typeOf(ty_op.operand);
+ const dst_ty = ty_op.ty.toType();
+
+ const is_unsigned = dst_ty.isUnsignedInt(zcu);
+ const src_bits = src_ty.bitSize(pt);
+ const dst_bits = dst_ty.bitSize(pt);
+
+ const float_mod: enum { s, d } = switch (src_bits) {
+ 32 => .s,
+ 64 => .d,
+ else => return func.fail("TODO: airIntFromFloat src size {d}", .{src_bits}),
+ };
+
+ const int_mod: Mir.FcvtOp = switch (dst_bits) {
+ 32 => if (is_unsigned) .wu else .w,
+ 8, 16, 64 => if (is_unsigned) .lu else .l,
+ else => return func.fail("TODO: airIntFromFloat dst size: {d}", .{dst_bits}),
+ };
+
+ const src_reg, const src_lock = try func.promoteReg(src_ty, operand);
+ defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const dst_reg, const dst_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(dst_lock);
+
+ _ = try func.addInst(.{
+ .tag = switch (float_mod) {
+ .s => switch (int_mod) {
+ .l => .fcvtls,
+ .lu => .fcvtlus,
+ .w => .fcvtws,
+ .wu => .fcvtwus,
+ },
+ .d => switch (int_mod) {
+ .l => .fcvtld,
+ .lu => .fcvtlud,
+ .w => .fcvtwd,
+ .wu => .fcvtwud,
+ },
+ },
+ .data = .{ .rr = .{
+ .rd = dst_reg,
+ .rs = src_reg,
+ } },
+ });
+
+ break :result .{ .register = dst_reg };
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airCmpxchg(func: *Func, inst: Air.Inst.Index) !void {
+fn airCmpxchg(func: *Func, inst: Air.Inst.Index, strength: enum { weak, strong }) !void {
+ _ = strength; // TODO: do something with this
+
+ const pt = func.pt;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Block, ty_pl.payload);
- _ = extra;
- return func.fail("TODO implement airCmpxchg for {}", .{
- func.target.cpu.arch,
+ const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+
+ const ptr_ty = func.typeOf(extra.ptr);
+ const val_ty = func.typeOf(extra.expected_value);
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+
+ switch (val_abi_size) {
+ 1, 2, 4, 8 => {},
+ else => return func.fail("TODO: airCmpxchg Int size {}", .{val_abi_size}),
+ }
+
+ const lr_order: struct { aq: Mir.Barrier, rl: Mir.Barrier } = switch (extra.successOrder()) {
+ .unordered,
+ => unreachable,
+
+ .monotonic,
+ .release,
+ => .{ .aq = .none, .rl = .none },
+ .acquire,
+ .acq_rel,
+ => .{ .aq = .aq, .rl = .none },
+ .seq_cst => .{ .aq = .aq, .rl = .rl },
+ };
+
+ const sc_order: struct { aq: Mir.Barrier, rl: Mir.Barrier } = switch (extra.failureOrder()) {
+ .unordered,
+ .release,
+ .acq_rel,
+ => unreachable,
+
+ .monotonic,
+ .acquire,
+ .seq_cst,
+ => switch (extra.successOrder()) {
+ .release,
+ .seq_cst,
+ => .{ .aq = .none, .rl = .rl },
+ else => .{ .aq = .none, .rl = .none },
+ },
+ };
+
+ const ptr_mcv = try func.resolveInst(extra.ptr);
+ const ptr_reg, const ptr_lock = try func.promoteReg(ptr_ty, ptr_mcv);
+ defer if (ptr_lock) |lock| func.register_manager.unlockReg(lock);
+
+ const exp_mcv = try func.resolveInst(extra.expected_value);
+ const exp_reg, const exp_lock = try func.promoteReg(val_ty, exp_mcv);
+ defer if (exp_lock) |lock| func.register_manager.unlockReg(lock);
+ try func.truncateRegister(val_ty, exp_reg);
+
+ const new_mcv = try func.resolveInst(extra.new_value);
+ const new_reg, const new_lock = try func.promoteReg(val_ty, new_mcv);
+ defer if (new_lock) |lock| func.register_manager.unlockReg(lock);
+ try func.truncateRegister(val_ty, new_reg);
+
+ const branch_reg, const branch_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(branch_lock);
+
+ const fallthrough_reg, const fallthrough_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(fallthrough_lock);
+
+ const jump_back = try func.addInst(.{
+ .tag = if (val_ty.bitSize(pt) <= 32) .lrw else .lrd,
+ .data = .{ .amo = .{
+ .aq = lr_order.aq,
+ .rl = lr_order.rl,
+ .rd = branch_reg,
+ .rs1 = ptr_reg,
+ .rs2 = .zero,
+ } },
});
- // return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
+ try func.truncateRegister(val_ty, branch_reg);
+
+ const jump_forward = try func.addInst(.{
+ .tag = .bne,
+ .data = .{ .b_type = .{
+ .rs1 = branch_reg,
+ .rs2 = exp_reg,
+ .inst = undefined,
+ } },
+ });
+
+ _ = try func.addInst(.{
+ .tag = if (val_ty.bitSize(pt) <= 32) .scw else .scd,
+ .data = .{ .amo = .{
+ .aq = sc_order.aq,
+ .rl = sc_order.rl,
+ .rd = fallthrough_reg,
+ .rs1 = ptr_reg,
+ .rs2 = new_reg,
+ } },
+ });
+ try func.truncateRegister(Type.bool, fallthrough_reg);
+
+ _ = try func.addInst(.{
+ .tag = .bne,
+ .data = .{ .b_type = .{
+ .rs1 = fallthrough_reg,
+ .rs2 = .zero,
+ .inst = jump_back,
+ } },
+ });
+
+ func.performReloc(jump_forward);
+
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, false);
+
+ const tmp_reg, const tmp_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(tmp_lock);
+
+ try func.genBinOp(
+ .cmp_neq,
+ .{ .register = branch_reg },
+ val_ty,
+ .{ .register = exp_reg },
+ val_ty,
+ tmp_reg,
+ );
+
+ try func.genCopy(val_ty, dst_mcv, .{ .register = branch_reg });
+ try func.genCopy(
+ Type.bool,
+ dst_mcv.address().offset(@intCast(val_abi_size)).deref(),
+ .{ .register = tmp_reg },
+ );
+
+ break :result dst_mcv;
+ };
+
+ return func.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
}
fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void {
const pt = func.pt;
+ const zcu = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
- const op = extra.op();
- const order = extra.ordering();
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const op = extra.op();
+ const order = extra.ordering();
- const ptr_ty = func.typeOf(pl_op.operand);
- const ptr_mcv = try func.resolveInst(pl_op.operand);
+ const ptr_ty = func.typeOf(pl_op.operand);
+ const ptr_mcv = try func.resolveInst(pl_op.operand);
- const val_ty = func.typeOf(extra.operand);
- const val_size = val_ty.abiSize(pt);
- const val_mcv = try func.resolveInst(extra.operand);
+ const val_ty = func.typeOf(extra.operand);
+ const val_size = val_ty.abiSize(pt);
+ const val_mcv = try func.resolveInst(extra.operand);
- if (!math.isPowerOfTwo(val_size))
- return func.fail("TODO: airAtomicRmw non-pow 2", .{});
+ if (!math.isPowerOfTwo(val_size))
+ return func.fail("TODO: airAtomicRmw non-pow 2", .{});
- switch (val_ty.zigTypeTag(pt.zcu)) {
- .Int => {},
- inline .Bool, .Float, .Enum, .Pointer => |ty| return func.fail("TODO: airAtomicRmw {s}", .{@tagName(ty)}),
- else => unreachable,
- }
+ switch (val_ty.zigTypeTag(pt.zcu)) {
+ .Enum, .Int => {},
+ inline .Bool, .Float, .Pointer => |ty| return func.fail("TODO: airAtomicRmw {s}", .{@tagName(ty)}),
+ else => unreachable,
+ }
- switch (val_size) {
- 1, 2 => return func.fail("TODO: airAtomicRmw Int {}", .{val_size}),
- 4, 8 => {},
- else => unreachable,
- }
+ const method: enum { amo, loop } = switch (val_size) {
+ 1, 2 => .loop,
+ 4, 8 => .amo,
+ else => unreachable,
+ };
- const ptr_register, const ptr_lock = try func.promoteReg(ptr_ty, ptr_mcv);
- defer if (ptr_lock) |lock| func.register_manager.unlockReg(lock);
+ const ptr_register, const ptr_lock = try func.promoteReg(ptr_ty, ptr_mcv);
+ defer if (ptr_lock) |lock| func.register_manager.unlockReg(lock);
- const val_register, const val_lock = try func.promoteReg(val_ty, val_mcv);
- defer if (val_lock) |lock| func.register_manager.unlockReg(lock);
+ const val_register, const val_lock = try func.promoteReg(val_ty, val_mcv);
+ defer if (val_lock) |lock| func.register_manager.unlockReg(lock);
- const result_mcv = try func.allocRegOrMem(val_ty, inst, true);
- assert(result_mcv == .register); // should fit into 8 bytes
+ const result_mcv = try func.allocRegOrMem(val_ty, inst, true);
+ assert(result_mcv == .register); // should fit into 8 bytes
+ const result_reg = result_mcv.register;
- const aq, const rl = switch (order) {
- .unordered => unreachable,
- .monotonic => .{ false, false },
- .acquire => .{ true, false },
- .release => .{ false, true },
- .acq_rel => .{ true, true },
- .seq_cst => .{ true, true },
- };
+ const aq, const rl = switch (order) {
+ .unordered => unreachable,
+ .monotonic => .{ false, false },
+ .acquire => .{ true, false },
+ .release => .{ false, true },
+ .acq_rel => .{ true, true },
+ .seq_cst => .{ true, true },
+ };
- _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_amo,
- .data = .{ .amo = .{
- .rd = result_mcv.register,
- .rs1 = ptr_register,
- .rs2 = val_register,
- .aq = if (aq) .aq else .none,
- .rl = if (rl) .rl else .none,
- .op = switch (op) {
- .Xchg => .SWAP,
- .Add => .ADD,
- .Sub => return func.fail("TODO: airAtomicRmw SUB", .{}),
- .And => .AND,
- .Nand => return func.fail("TODO: airAtomicRmw NAND", .{}),
- .Or => .OR,
- .Xor => .XOR,
- .Max => .MAX,
- .Min => .MIN,
+ switch (method) {
+ .amo => {
+ const is_d = val_ty.abiSize(pt) == 8;
+ const is_un = val_ty.isUnsignedInt(zcu);
+
+ const mnem: Mnemonic = switch (op) {
+ // zig fmt: off
+ .Xchg => if (is_d) .amoswapd else .amoswapw,
+ .Add => if (is_d) .amoaddd else .amoaddw,
+ .And => if (is_d) .amoandd else .amoandw,
+ .Or => if (is_d) .amoord else .amoorw,
+ .Xor => if (is_d) .amoxord else .amoxorw,
+ .Max => if (is_d) if (is_un) .amomaxud else .amomaxd else if (is_un) .amomaxuw else .amomaxw,
+ .Min => if (is_d) if (is_un) .amominud else .amomind else if (is_un) .amominuw else .amominw,
+ else => return func.fail("TODO: airAtomicRmw amo {s}", .{@tagName(op)}),
+ // zig fmt: on
+ };
+
+ _ = try func.addInst(.{
+ .tag = mnem,
+ .data = .{ .amo = .{
+ .rd = result_reg,
+ .rs1 = ptr_register,
+ .rs2 = val_register,
+ .aq = if (aq) .aq else .none,
+ .rl = if (rl) .rl else .none,
+ } },
+ });
},
- .ty = val_ty,
- } },
- });
+ .loop => {
+ // where we'll jump back when the sc fails
+ const jump_back = try func.addInst(.{
+ .tag = .lrw,
+ .data = .{ .amo = .{
+ .rd = result_reg,
+ .rs1 = ptr_register,
+ .rs2 = .zero,
+ .aq = if (aq) .aq else .none,
+ .rl = if (rl) .rl else .none,
+ } },
+ });
+
+ const after_reg, const after_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(after_lock);
+
+ switch (op) {
+ .Add, .Sub => |tag| {
+ _ = try func.genBinOp(
+ switch (tag) {
+ .Add => .add,
+ .Sub => .sub,
+ else => unreachable,
+ },
+ .{ .register = result_reg },
+ val_ty,
+ .{ .register = val_register },
+ val_ty,
+ after_reg,
+ );
+ },
+
+ else => return func.fail("TODO: airAtomicRmw loop {s}", .{@tagName(op)}),
+ }
+
+ _ = try func.addInst(.{
+ .tag = .scw,
+ .data = .{ .amo = .{
+ .rd = after_reg,
+ .rs1 = ptr_register,
+ .rs2 = after_reg,
+ .aq = if (aq) .aq else .none,
+ .rl = if (rl) .rl else .none,
+ } },
+ });
+
+ _ = try func.addInst(.{
+ .tag = .bne,
+ .data = .{ .b_type = .{
+ .inst = jump_back,
+ .rs1 = after_reg,
+ .rs2 = .zero,
+ } },
+ });
+ },
+ }
+ break :result result_mcv;
+ };
- return func.finishAir(inst, result_mcv, .{ pl_op.operand, extra.operand, .none });
+ return func.finishAir(inst, result, .{ pl_op.operand, extra.operand, .none });
}
fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.pt.zcu;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const order: std.builtin.AtomicOrder = atomic_load.order;
@@ -6555,20 +7608,19 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
const elem_ty = ptr_ty.childType(zcu);
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
+ const bit_size = elem_ty.bitSize(pt);
+ if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
+
const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
assert(result_mcv == .register); // should be less than 8 bytes
if (order == .seq_cst) {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_fence,
- .data = .{
- .fence = .{
- .pred = .rw,
- .succ = .rw,
- .fm = .none,
- },
- },
+ .tag = .fence,
+ .data = .{ .fence = .{
+ .pred = .rw,
+ .succ = .rw,
+ } },
});
}
@@ -6581,15 +7633,11 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
// Make sure all previous reads happen before any reading or writing accurs.
.seq_cst, .acquire => {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_fence,
- .data = .{
- .fence = .{
- .pred = .r,
- .succ = .rw,
- .fm = .none,
- },
- },
+ .tag = .fence,
+ .data = .{ .fence = .{
+ .pred = .r,
+ .succ = .rw,
+ } },
});
},
else => unreachable,
@@ -6607,25 +7655,24 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr
const val_ty = func.typeOf(bin_op.rhs);
const val_mcv = try func.resolveInst(bin_op.rhs);
+ const bit_size = val_ty.bitSize(func.pt);
+ if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
+
switch (order) {
.unordered, .monotonic => {},
.release, .seq_cst => {
_ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_fence,
- .data = .{
- .fence = .{
- .pred = .rw,
- .succ = .w,
- .fm = .none,
- },
- },
+ .tag = .fence,
+ .data = .{ .fence = .{
+ .pred = .rw,
+ .succ = .w,
+ } },
});
},
else => unreachable,
}
- try func.store(ptr_mcv, val_mcv, ptr_ty, val_ty);
+ try func.store(ptr_mcv, val_mcv, ptr_ty);
return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -6689,15 +7736,15 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
const len = dst_ptr_ty.childType(zcu).arrayLen(zcu);
assert(len != 0); // prevented by Sema
- try func.store(dst_ptr, src_val, elem_ptr_ty, elem_ty);
+ try func.store(dst_ptr, src_val, elem_ptr_ty);
const second_elem_ptr_reg, const second_elem_ptr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(second_elem_ptr_lock);
const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg };
- try func.genSetReg(Type.usize, second_elem_ptr_reg, .{ .register_offset = .{
- .reg = try func.copyToTmpRegister(Type.usize, dst_ptr),
+ try func.genSetReg(Type.u64, second_elem_ptr_reg, .{ .register_offset = .{
+ .reg = try func.copyToTmpRegister(Type.u64, dst_ptr),
.off = elem_abi_size,
} });
@@ -6711,123 +7758,94 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
}
fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void {
- _ = inst;
- return func.fail("TODO implement airMemcpy for {}", .{func.target.cpu.arch});
-}
-
-fn airTagName(func: *Func, inst: Air.Inst.Index) !void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else {
- _ = operand;
- return func.fail("TODO implement airTagName for riscv64", .{});
- };
- return func.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
-fn airErrorName(func: *Func, inst: Air.Inst.Index) !void {
const pt = func.pt;
const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const err_ty = func.typeOf(un_op);
- const err_mcv = try func.resolveInst(un_op);
+ const dst_ptr = try func.resolveInst(bin_op.lhs);
+ const src_ptr = try func.resolveInst(bin_op.rhs);
- const err_reg = try func.copyToTmpRegister(err_ty, err_mcv);
- const err_lock = func.register_manager.lockRegAssumeUnused(err_reg);
- defer func.register_manager.unlockReg(err_lock);
+ const dst_ty = func.typeOf(bin_op.lhs);
- const addr_reg, const addr_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(addr_lock);
+ const len_mcv: MCValue = switch (dst_ty.ptrSize(zcu)) {
+ .Slice => len: {
+ const len_reg, const len_lock = try func.allocReg(.int);
+ defer func.register_manager.unlockReg(len_lock);
- // this is now the base address of the error name table
- const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu);
- if (func.bin_file.cast(link.File.Elf)) |elf_file| {
- const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
- return func.fail("{s} creating lazy symbol", .{@errorName(err)});
- const sym = elf_file.symbol(sym_index);
- try func.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } });
- } else {
- return func.fail("TODO: riscv non-elf", .{});
- }
+ const elem_size = dst_ty.childType(zcu).abiSize(pt);
+ try func.genBinOp(
+ .mul,
+ .{ .immediate = elem_size },
+ Type.u64,
+ dst_ptr.address().offset(8).deref(),
+ Type.u64,
+ len_reg,
+ );
+ break :len .{ .register = len_reg };
+ },
+ .One => len: {
+ const array_ty = dst_ty.childType(zcu);
+ break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(pt) };
+ },
+ else => |size| return func.fail("TODO: airMemcpy size {s}", .{@tagName(size)}),
+ };
+ const len_lock: ?RegisterLock = switch (len_mcv) {
+ .register => |reg| func.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (len_lock) |lock| func.register_manager.unlockReg(lock);
- const start_reg, const start_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(start_lock);
+ try func.genInlineMemcpy(dst_ptr, src_ptr, len_mcv);
- const end_reg, const end_lock = try func.allocReg(.int);
- defer func.register_manager.unlockReg(end_lock);
+ return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
+}
- // const tmp_reg, const tmp_lock = try func.allocReg(.int);
- // defer func.register_manager.unlockReg(tmp_lock);
+fn airTagName(func: *Func, inst: Air.Inst.Index) !void {
+ const pt = func.pt;
+ const zcu = pt.zcu;
- // we move the base address forward by the following formula: base + (errno * 8)
+ const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const enum_ty = func.typeOf(un_op);
- // shifting left by 4 is the same as multiplying by 8
- _ = try func.addInst(.{
- .tag = .slli,
- .ops = .rri,
- .data = .{ .i_type = .{
- .imm12 = Immediate.u(4),
- .rd = err_reg,
- .rs1 = err_reg,
- } },
- });
+ // TODO: work out the bugs
+ if (true) return func.fail("TODO: airTagName", .{});
- _ = try func.addInst(.{
- .tag = .add,
- .ops = .rrr,
- .data = .{ .r_type = .{
- .rd = addr_reg,
- .rs1 = addr_reg,
- .rs2 = err_reg,
- } },
- });
+ const param_regs = abi.Registers.Integer.function_arg_regs;
+ const dst_mcv = try func.allocRegOrMem(Type.u64, inst, false);
+ try func.genSetReg(Type.u64, param_regs[0], dst_mcv.address());
- _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
- .data = .{
- .rm = .{
- .r = start_reg,
- .m = .{
- .base = .{ .reg = addr_reg },
- .mod = .{ .size = .dword, .unsigned = true },
- },
- },
- },
- });
+ const operand = try func.resolveInst(un_op);
+ try func.genSetReg(enum_ty, param_regs[1], operand);
- _ = try func.addInst(.{
- .tag = .pseudo,
- .ops = .pseudo_load_rm,
- .data = .{
- .rm = .{
- .r = end_reg,
- .m = .{
- .base = .{ .reg = addr_reg },
- .mod = .{ .size = .dword, .unsigned = true },
- },
- },
- },
- });
+ const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(zcu), zcu);
+ const elf_file = func.bin_file.cast(link.File.Elf).?;
+ const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
+ return func.fail("{s} creating lazy symbol", .{@errorName(err)});
+ const sym = elf_file.symbol(sym_index);
- const dst_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, false);
- const frame = dst_mcv.load_frame;
- try func.genSetMem(
- .{ .frame = frame.index },
- frame.off,
- Type.usize,
- .{ .register = start_reg },
- );
+ if (func.mod.pic) {
+ return func.fail("TODO: airTagName pic", .{});
+ } else {
+ try func.genSetReg(Type.u64, .ra, .{ .load_symbol = .{ .sym = sym.esym_index } });
+ _ = try func.addInst(.{
+ .tag = .jalr,
+ .data = .{ .i_type = .{
+ .rd = .ra,
+ .rs1 = .ra,
+ .imm12 = Immediate.s(0),
+ } },
+ });
+ }
- try func.genSetMem(
- .{ .frame = frame.index },
- frame.off + 8,
- Type.usize,
- .{ .register = end_reg },
- );
+ break :result dst_mcv;
+ };
+ return func.finishAir(inst, result, .{ un_op, .none, .none });
+}
- return func.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
+fn airErrorName(func: *Func, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return func.fail("TODO: airErrorName", .{});
}
fn airSplat(func: *Func, inst: Air.Inst.Index) !void {
@@ -7013,9 +8031,10 @@ fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking {
fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
const pt = func.pt;
+ const zcu = pt.zcu;
const gpa = func.gpa;
- const owner_decl_index = pt.zcu.funcOwnerDeclIndex(func.func_index);
+ const owner_decl_index = func.owner.getDecl(zcu);
const lf = func.bin_file;
const src_loc = func.src_loc;
@@ -7047,9 +8066,10 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
.none => .none,
.undef => unreachable,
.load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
+ .load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
- .load_got, .load_direct, .load_tlv => {
+ .load_got, .load_direct => {
return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
},
},
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index bc972e86b9..6c04988e78 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -40,7 +40,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.source = start_offset,
.target = target,
.offset = 0,
- .enc = std.meta.activeTag(lowered_inst.encoding.data),
+ .fmt = std.meta.activeTag(lowered_inst),
}),
.load_symbol_reloc => |symbol| {
const is_obj_or_static_lib = switch (emit.lower.output_mode) {
@@ -49,46 +49,70 @@ pub fn emitMir(emit: *Emit) Error!void {
.Lib => emit.lower.link_mode == .static,
};
- if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
- const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index);
- const sym = elf_file.symbol(sym_index);
-
- var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
- var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
-
- if (sym.flags.needs_zig_got and !is_obj_or_static_lib) {
- _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
-
- hi_r_type = Elf.R_ZIG_GOT_HI20;
- lo_r_type = Elf.R_ZIG_GOT_LO12;
- }
-
- try atom_ptr.addReloc(elf_file, .{
- .r_offset = start_offset,
- .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type,
- .r_addend = 0,
- });
-
- try atom_ptr.addReloc(elf_file, .{
- .r_offset = start_offset + 4,
- .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
- .r_addend = 0,
- });
- } else unreachable;
+ const elf_file = emit.bin_file.cast(link.File.Elf).?;
+
+ const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
+ const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index);
+ const sym = elf_file.symbol(sym_index);
+
+ var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
+ var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
+
+ if (sym.flags.needs_zig_got and !is_obj_or_static_lib) {
+ _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
+
+ hi_r_type = Elf.R_ZIG_GOT_HI20;
+ lo_r_type = Elf.R_ZIG_GOT_LO12;
+ }
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type,
+ .r_addend = 0,
+ });
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset + 4,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
+ .r_addend = 0,
+ });
+ },
+ .load_tlv_reloc => |symbol| {
+ const elf_file = emit.bin_file.cast(link.File.Elf).?;
+
+ const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
+
+ const R_RISCV = std.elf.R_RISCV;
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_HI20),
+ .r_addend = 0,
+ });
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset + 4,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_ADD),
+ .r_addend = 0,
+ });
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset + 8,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_LO12_I),
+ .r_addend = 0,
+ });
},
.call_extern_fn_reloc => |symbol| {
- if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
+ const elf_file = emit.bin_file.cast(link.File.Elf).?;
+ const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
- const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
+ const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
- try atom_ptr.addReloc(elf_file, .{
- .r_offset = start_offset,
- .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type,
- .r_addend = 0,
- });
- } else return emit.fail("TODO: call_extern_fn_reloc non-ELF", .{});
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = start_offset,
+ .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type,
+ .r_addend = 0,
+ });
},
};
}
@@ -98,40 +122,37 @@ pub fn emitMir(emit: *Emit) Error!void {
const mir_inst = emit.lower.mir.instructions.get(mir_index);
switch (mir_inst.tag) {
else => unreachable,
- .pseudo => switch (mir_inst.ops) {
- else => unreachable,
- .pseudo_dbg_prologue_end => {
- switch (emit.debug_output) {
- .dwarf => |dw| {
- try dw.setPrologueEnd();
- log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
- emit.prev_di_line, emit.prev_di_column,
- });
- try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
- },
- .plan9 => {},
- .none => {},
- }
- },
- .pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine(
- mir_inst.data.pseudo_dbg_line_column.line,
- mir_inst.data.pseudo_dbg_line_column.column,
- ),
- .pseudo_dbg_epilogue_begin => {
- switch (emit.debug_output) {
- .dwarf => |dw| {
- try dw.setEpilogueBegin();
- log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
- emit.prev_di_line, emit.prev_di_column,
- });
- try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
- },
- .plan9 => {},
- .none => {},
- }
- },
- .pseudo_dead => {},
+ .pseudo_dbg_prologue_end => {
+ switch (emit.debug_output) {
+ .dwarf => |dw| {
+ try dw.setPrologueEnd();
+ log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
+ emit.prev_di_line, emit.prev_di_column,
+ });
+ try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
+ },
+ .plan9 => {},
+ .none => {},
+ }
},
+ .pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine(
+ mir_inst.data.pseudo_dbg_line_column.line,
+ mir_inst.data.pseudo_dbg_line_column.column,
+ ),
+ .pseudo_dbg_epilogue_begin => {
+ switch (emit.debug_output) {
+ .dwarf => |dw| {
+ try dw.setEpilogueBegin();
+ log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
+ emit.prev_di_line, emit.prev_di_column,
+ });
+ try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
+ },
+ .plan9 => {},
+ .none => {},
+ }
+ },
+ .pseudo_dead => {},
}
}
}
@@ -151,8 +172,8 @@ const Reloc = struct {
target: Mir.Inst.Index,
/// Offset of the relocation within the instruction.
offset: u32,
- /// Encoding of the instruction, used to determine how to modify it.
- enc: Encoding.InstEnc,
+ /// Format of the instruction, used to determine how to modify it.
+ fmt: encoding.Lir.Format,
};
fn fixupRelocs(emit: *Emit) Error!void {
@@ -164,12 +185,10 @@ fn fixupRelocs(emit: *Emit) Error!void {
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
- log.debug("disp: {x}", .{disp});
-
- switch (reloc.enc) {
+ switch (reloc.fmt) {
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
.B => riscv_util.writeInstB(code, @bitCast(disp)),
- else => return emit.fail("tried to reloc encoding type {s}", .{@tagName(reloc.enc)}),
+ else => return emit.fail("tried to reloc format type {s}", .{@tagName(reloc.fmt)}),
}
}
}
@@ -209,5 +228,5 @@ const Emit = @This();
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const riscv_util = @import("../../link/riscv.zig");
-const Encoding = @import("Encoding.zig");
const Elf = @import("../../link/Elf.zig");
+const encoding = @import("encoding.zig");
diff --git a/src/arch/riscv64/Encoding.zig b/src/arch/riscv64/Encoding.zig
deleted file mode 100644
index f4684fe42b..0000000000
--- a/src/arch/riscv64/Encoding.zig
+++ /dev/null
@@ -1,1119 +0,0 @@
-mnemonic: Mnemonic,
-data: Data,
-
-const OpCode = enum(u7) {
- LOAD = 0b0000011,
- LOAD_FP = 0b0000111,
- MISC_MEM = 0b0001111,
- OP_IMM = 0b0010011,
- AUIPC = 0b0010111,
- OP_IMM_32 = 0b0011011,
- STORE = 0b0100011,
- STORE_FP = 0b0100111,
- AMO = 0b0101111,
- OP_V = 0b1010111,
- OP = 0b0110011,
- OP_32 = 0b0111011,
- LUI = 0b0110111,
- MADD = 0b1000011,
- MSUB = 0b1000111,
- NMSUB = 0b1001011,
- NMADD = 0b1001111,
- OP_FP = 0b1010011,
- OP_IMM_64 = 0b1011011,
- BRANCH = 0b1100011,
- JALR = 0b1100111,
- JAL = 0b1101111,
- SYSTEM = 0b1110011,
- OP_64 = 0b1111011,
- NONE = 0b00000000,
-};
-
-const FpFmt = enum(u2) {
- /// 32-bit single-precision
- S = 0b00,
- /// 64-bit double-precision
- D = 0b01,
-
- // H = 0b10, unused in the G extension
-
- /// 128-bit quad-precision
- Q = 0b11,
-};
-
-const AmoWidth = enum(u3) {
- W = 0b010,
- D = 0b011,
-};
-
-const FenceMode = enum(u4) {
- none = 0b0000,
- tso = 0b1000,
-};
-
-const Enc = struct {
- opcode: OpCode,
-
- data: union(enum) {
- /// funct3 + funct7
- ff: struct {
- funct3: u3,
- funct7: u7,
- },
- amo: struct {
- funct5: u5,
- width: AmoWidth,
- },
- fence: struct {
- funct3: u3,
- fm: FenceMode,
- },
- /// funct5 + rm + fmt
- fmt: struct {
- funct5: u5,
- rm: u3,
- fmt: FpFmt,
- },
- /// funct3
- f: struct {
- funct3: u3,
- },
- /// typ + funct3 + has_5
- sh: struct {
- typ: u6,
- funct3: u3,
- has_5: bool,
- },
- vecls: struct {
- width: VecWidth,
- umop: Umop,
- vm: bool,
- mop: Mop,
- mew: bool,
- nf: u3,
- },
- vecmath: struct {
- vm: bool,
- funct6: u6,
- funct3: VecType,
- },
- /// U-type
- none,
- },
-
- const Mop = enum(u2) {
- unit = 0b00,
- unord = 0b01,
- stride = 0b10,
- ord = 0b11,
- };
-
- const Umop = enum(u5) {
- unit = 0b00000,
- whole = 0b01000,
- mask = 0b01011,
- fault = 0b10000,
- };
-
- const VecWidth = enum(u3) {
- // zig fmt: off
- @"8" = 0b000,
- @"16" = 0b101,
- @"32" = 0b110,
- @"64" = 0b111,
- // zig fmt: on
- };
-
- const VecType = enum(u3) {
- OPIVV = 0b000,
- OPFVV = 0b001,
- OPMVV = 0b010,
- OPIVI = 0b011,
- OPIVX = 0b100,
- OPFVF = 0b101,
- OPMVX = 0b110,
- };
-};
-
-pub const Mnemonic = enum {
- // base mnemonics
-
- // I Type
- ld,
- lw,
- lwu,
- lh,
- lhu,
- lb,
- lbu,
-
- sltiu,
- xori,
- andi,
-
- slli,
- srli,
- srai,
-
- slliw,
- srliw,
- sraiw,
-
- addi,
- jalr,
-
- vsetivli,
- vsetvli,
-
- // U Type
- lui,
- auipc,
-
- // S Type
- sd,
- sw,
- sh,
- sb,
-
- // J Type
- jal,
-
- // B Type
- beq,
-
- // R Type
- add,
- addw,
- sub,
- subw,
- @"and",
- @"or",
- slt,
- sltu,
- xor,
-
- sll,
- srl,
- sra,
-
- sllw,
- srlw,
- sraw,
-
- // System
- ecall,
- ebreak,
- unimp,
-
- csrrs,
-
- // M extension
- mul,
- mulw,
-
- mulh,
- mulhu,
- mulhsu,
-
- div,
- divu,
-
- divw,
- divuw,
-
- rem,
- remu,
-
- remw,
- remuw,
-
- // F extension (32-bit float)
- fadds,
- fsubs,
- fmuls,
- fdivs,
-
- fmins,
- fmaxs,
-
- fsqrts,
-
- flw,
- fsw,
-
- feqs,
- flts,
- fles,
-
- fsgnjns,
- fsgnjxs,
-
- // D extension (64-bit float)
- faddd,
- fsubd,
- fmuld,
- fdivd,
-
- fmind,
- fmaxd,
-
- fsqrtd,
-
- fld,
- fsd,
-
- feqd,
- fltd,
- fled,
-
- fsgnjnd,
- fsgnjxd,
-
- // V Extension
- vle8v,
- vle16v,
- vle32v,
- vle64v,
-
- vse8v,
- vse16v,
- vse32v,
- vse64v,
-
- vsoxei8v,
-
- vaddvv,
- vsubvv,
-
- vfaddvv,
- vfsubvv,
-
- vadcvv,
-
- vmvvx,
-
- vslidedownvx,
-
- // MISC
- fence,
- fencetso,
-
- // AMO
- amoswapw,
- amoaddw,
- amoandw,
- amoorw,
- amoxorw,
- amomaxw,
- amominw,
- amomaxuw,
- amominuw,
-
- amoswapd,
- amoaddd,
- amoandd,
- amoord,
- amoxord,
- amomaxd,
- amomind,
- amomaxud,
- amominud,
-
- // TODO: Q extension
-
- pub fn encoding(mnem: Mnemonic) Enc {
- return switch (mnem) {
- // zig fmt: off
-
- // OP
-
- .add => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } },
- .sub => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } },
-
- .@"and" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } },
- .@"or" => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } },
- .xor => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } },
-
- .sltu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } },
- .slt => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } },
-
- .mul => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } },
- .mulh => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000001 } } },
- .mulhsu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000001 } } },
- .mulhu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000001 } } },
-
- .div => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } },
- .divu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } },
-
- .rem => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } },
- .remu => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } },
-
- .sll => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } },
- .srl => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } },
- .sra => .{ .opcode = .OP, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } },
-
-
- // OP_IMM
-
- .addi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b000 } } },
- .andi => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b111 } } },
- .xori => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b100 } } },
-
- .sltiu => .{ .opcode = .OP_IMM, .data = .{ .f = .{ .funct3 = 0b011 } } },
-
- .slli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = true } } },
- .srli => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = true } } },
- .srai => .{ .opcode = .OP_IMM, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = true } } },
-
-
- // OP_IMM_32
-
- .slliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = false } } },
- .srliw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = false } } },
- .sraiw => .{ .opcode = .OP_IMM_32, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = false } } },
-
-
- // OP_32
-
- .addw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } },
- .subw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } },
- .mulw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } },
-
- .divw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } },
- .divuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } },
-
- .remw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } },
- .remuw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } },
-
- .sllw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } },
- .srlw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } },
- .sraw => .{ .opcode = .OP_32, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } },
-
-
- // OP_FP
-
- .fadds => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } },
- .faddd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } },
-
- .fsubs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .S, .rm = 0b111 } } },
- .fsubd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .D, .rm = 0b111 } } },
-
- .fmuls => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .S, .rm = 0b111 } } },
- .fmuld => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .D, .rm = 0b111 } } },
-
- .fdivs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .S, .rm = 0b111 } } },
- .fdivd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .D, .rm = 0b111 } } },
-
- .fmins => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b000 } } },
- .fmind => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b000 } } },
-
- .fmaxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b001 } } },
- .fmaxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b001 } } },
-
- .fsqrts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .S, .rm = 0b111 } } },
- .fsqrtd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .D, .rm = 0b111 } } },
-
- .fles => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b000 } } },
- .fled => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b000 } } },
-
- .flts => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b001 } } },
- .fltd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b001 } } },
-
- .feqs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } },
- .feqd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } },
-
- .fsgnjns => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } },
- .fsgnjnd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } },
-
- .fsgnjxs => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b0010} } },
- .fsgnjxd => .{ .opcode = .OP_FP, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b0010} } },
-
-
- // LOAD
-
- .lb => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b000 } } },
- .lh => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b001 } } },
- .lw => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b010 } } },
- .ld => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b011 } } },
- .lbu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b100 } } },
- .lhu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b101 } } },
- .lwu => .{ .opcode = .LOAD, .data = .{ .f = .{ .funct3 = 0b110 } } },
-
-
- // STORE
-
- .sb => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b000 } } },
- .sh => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b001 } } },
- .sw => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b010 } } },
- .sd => .{ .opcode = .STORE, .data = .{ .f = .{ .funct3 = 0b011 } } },
-
-
- // LOAD_FP
-
- .flw => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b010 } } },
- .fld => .{ .opcode = .LOAD_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
-
- .vle8v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vle16v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vle32v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vle64v => .{ .opcode = .LOAD_FP, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
-
-
- // STORE_FP
-
- .fsw => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b010 } } },
- .fsd => .{ .opcode = .STORE_FP, .data = .{ .f = .{ .funct3 = 0b011 } } },
-
- .vse8v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vse16v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vse32v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
- .vse64v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
-
- .vsoxei8v => .{ .opcode = .STORE_FP, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .ord, .mew = false, .nf = 0b000 } } },
-
- // JALR
-
- .jalr => .{ .opcode = .JALR, .data = .{ .f = .{ .funct3 = 0b000 } } },
-
-
- // LUI
-
- .lui => .{ .opcode = .LUI, .data = .{ .none = {} } },
-
-
- // AUIPC
-
- .auipc => .{ .opcode = .AUIPC, .data = .{ .none = {} } },
-
-
- // JAL
-
- .jal => .{ .opcode = .JAL, .data = .{ .none = {} } },
-
-
- // BRANCH
-
- .beq => .{ .opcode = .BRANCH, .data = .{ .f = .{ .funct3 = 0b000 } } },
-
-
- // SYSTEM
-
- .ecall => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } },
- .ebreak => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b000 } } },
-
- .csrrs => .{ .opcode = .SYSTEM, .data = .{ .f = .{ .funct3 = 0b010 } } },
-
-
- // NONE
-
- .unimp => .{ .opcode = .NONE, .data = .{ .f = .{ .funct3 = 0b000 } } },
-
-
- // MISC_MEM
-
- .fence => .{ .opcode = .MISC_MEM, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .none } } },
- .fencetso => .{ .opcode = .MISC_MEM, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .tso } } },
-
-
- // AMO
-
- .amoaddw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00000 } } },
- .amoswapw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00001 } } },
- // LR.W
- // SC.W
- .amoxorw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00100 } } },
- .amoandw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01100 } } },
- .amoorw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01000 } } },
- .amominw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10000 } } },
- .amomaxw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10100 } } },
- .amominuw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11000 } } },
- .amomaxuw => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11100 } } },
-
- .amoaddd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00000 } } },
- .amoswapd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00001 } } },
- // LR.D
- // SC.D
- .amoxord => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00100 } } },
- .amoandd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01100 } } },
- .amoord => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01000 } } },
- .amomind => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10000 } } },
- .amomaxd => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10100 } } },
- .amominud => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11000 } } },
- .amomaxud => .{ .opcode = .AMO, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11100 } } },
-
- // OP_V
- .vsetivli => .{ .opcode = .OP_V, .data = .{ .f = .{ .funct3 = 0b111 } } },
- .vsetvli => .{ .opcode = .OP_V, .data = .{ .f = .{ .funct3 = 0b111 } } },
- .vaddvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPIVV } } },
- .vsubvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPIVV } } },
-
- .vfaddvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPFVV } } },
- .vfsubvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPFVV } } },
-
- .vadcvv => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010000, .funct3 = .OPMVV } } },
- .vmvvx => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010111, .funct3 = .OPIVX } } },
-
- .vslidedownvx => .{ .opcode = .OP_V, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b001111, .funct3 = .OPIVX } } },
-
- // zig fmt: on
- };
- }
-};
-
-pub const InstEnc = enum {
- R,
- R4,
- I,
- S,
- B,
- U,
- J,
- fence,
- amo,
- system,
-
- pub fn fromMnemonic(mnem: Mnemonic) InstEnc {
- return switch (mnem) {
- .addi,
- .jalr,
- .sltiu,
- .xori,
- .andi,
-
- .slli,
- .srli,
- .srai,
-
- .slliw,
- .srliw,
- .sraiw,
-
- .ld,
- .lw,
- .lwu,
- .lh,
- .lhu,
- .lb,
- .lbu,
-
- .flw,
- .fld,
-
- .csrrs,
- .vsetivli,
- .vsetvli,
- => .I,
-
- .lui,
- .auipc,
- => .U,
-
- .sd,
- .sw,
- .sh,
- .sb,
-
- .fsd,
- .fsw,
- => .S,
-
- .jal,
- => .J,
-
- .beq,
- => .B,
-
- .slt,
- .sltu,
-
- .sll,
- .srl,
- .sra,
-
- .sllw,
- .srlw,
- .sraw,
-
- .div,
- .divu,
- .divw,
- .divuw,
-
- .rem,
- .remu,
- .remw,
- .remuw,
-
- .xor,
- .@"and",
- .@"or",
-
- .add,
- .addw,
-
- .sub,
- .subw,
-
- .mul,
- .mulw,
- .mulh,
- .mulhu,
- .mulhsu,
-
- .fadds,
- .faddd,
-
- .fsubs,
- .fsubd,
-
- .fmuls,
- .fmuld,
-
- .fdivs,
- .fdivd,
-
- .fmins,
- .fmind,
-
- .fmaxs,
- .fmaxd,
-
- .fsqrts,
- .fsqrtd,
-
- .fles,
- .fled,
-
- .flts,
- .fltd,
-
- .feqs,
- .feqd,
-
- .fsgnjns,
- .fsgnjnd,
-
- .fsgnjxs,
- .fsgnjxd,
-
- .vle8v,
- .vle16v,
- .vle32v,
- .vle64v,
-
- .vse8v,
- .vse16v,
- .vse32v,
- .vse64v,
-
- .vsoxei8v,
-
- .vaddvv,
- .vsubvv,
- .vfaddvv,
- .vfsubvv,
- .vadcvv,
- .vmvvx,
- .vslidedownvx,
- => .R,
-
- .ecall,
- .ebreak,
- .unimp,
- => .system,
-
- .fence,
- .fencetso,
- => .fence,
-
- .amoswapw,
- .amoaddw,
- .amoandw,
- .amoorw,
- .amoxorw,
- .amomaxw,
- .amominw,
- .amomaxuw,
- .amominuw,
-
- .amoswapd,
- .amoaddd,
- .amoandd,
- .amoord,
- .amoxord,
- .amomaxd,
- .amomind,
- .amomaxud,
- .amominud,
- => .amo,
- };
- }
-
- pub fn opsList(enc: InstEnc) [5]std.meta.FieldEnum(Operand) {
- return switch (enc) {
- // zig fmt: off
- .R => .{ .reg, .reg, .reg, .none, .none, },
- .R4 => .{ .reg, .reg, .reg, .reg, .none, },
- .I => .{ .reg, .reg, .imm, .none, .none, },
- .S => .{ .reg, .reg, .imm, .none, .none, },
- .B => .{ .reg, .reg, .imm, .none, .none, },
- .U => .{ .reg, .imm, .none, .none, .none, },
- .J => .{ .reg, .imm, .none, .none, .none, },
- .system => .{ .none, .none, .none, .none, .none, },
- .fence => .{ .barrier, .barrier, .none, .none, .none, },
- .amo => .{ .reg, .reg, .reg, .barrier, .barrier },
- // zig fmt: on
- };
- }
-};
-
-pub const Data = union(InstEnc) {
- R: packed struct {
- opcode: u7,
- rd: u5,
- funct3: u3,
- rs1: u5,
- rs2: u5,
- funct7: u7,
- },
- R4: packed struct {
- opcode: u7,
- rd: u5,
- funct3: u3,
- rs1: u5,
- rs2: u5,
- funct2: u2,
- rs3: u5,
- },
- I: packed struct {
- opcode: u7,
- rd: u5,
- funct3: u3,
- rs1: u5,
- imm0_11: u12,
- },
- S: packed struct {
- opcode: u7,
- imm0_4: u5,
- funct3: u3,
- rs1: u5,
- rs2: u5,
- imm5_11: u7,
- },
- B: packed struct {
- opcode: u7,
- imm11: u1,
- imm1_4: u4,
- funct3: u3,
- rs1: u5,
- rs2: u5,
- imm5_10: u6,
- imm12: u1,
- },
- U: packed struct {
- opcode: u7,
- rd: u5,
- imm12_31: u20,
- },
- J: packed struct {
- opcode: u7,
- rd: u5,
- imm12_19: u8,
- imm11: u1,
- imm1_10: u10,
- imm20: u1,
- },
- fence: packed struct {
- opcode: u7,
- rd: u5 = 0,
- funct3: u3,
- rs1: u5 = 0,
- succ: u4,
- pred: u4,
- fm: u4,
- },
- amo: packed struct {
- opcode: u7,
- rd: u5,
- funct3: u3,
- rs1: u5,
- rs2: u5,
- rl: bool,
- aq: bool,
- funct5: u5,
- },
- system: u32,
-
- comptime {
- for (std.meta.fields(Data)) |field| {
- assert(@bitSizeOf(field.type) == 32);
- }
- }
-
- pub fn toU32(self: Data) u32 {
- return switch (self) {
- .fence => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.rd)) << 7) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.succ)) << 20) + (@as(u32, @intCast(v.pred)) << 24) + (@as(u32, @intCast(v.fm)) << 28),
- inline else => |v| @bitCast(v),
- .system => unreachable,
- };
- }
-
- pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data {
- const inst_enc = InstEnc.fromMnemonic(mnem);
- const enc = mnem.encoding();
-
- // special mnemonics
- switch (mnem) {
- .ecall,
- .ebreak,
- .unimp,
- => {
- assert(ops.len == 0);
- return .{
- .I = .{
- .rd = Register.zero.encodeId(),
- .rs1 = Register.zero.encodeId(),
- .imm0_11 = switch (mnem) {
- .ecall => 0x000,
- .ebreak => 0x001,
- .unimp => 0x000,
- else => unreachable,
- },
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = enc.data.f.funct3,
- },
- };
- },
- .csrrs => {
- assert(ops.len == 3);
-
- const csr = ops[0].csr;
- const rs1 = ops[1].reg;
- const rd = ops[2].reg;
-
- return .{
- .I = .{
- .rd = rd.encodeId(),
- .rs1 = rs1.encodeId(),
-
- .imm0_11 = @intFromEnum(csr),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = enc.data.f.funct3,
- },
- };
- },
- else => {},
- }
-
- switch (inst_enc) {
- .R => {
- assert(ops.len == 3);
- return .{
- .R = switch (enc.data) {
- .ff => |ff| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
- .rs2 = ops[2].reg.encodeId(),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = ff.funct3,
- .funct7 = ff.funct7,
- },
- .fmt => |fmt| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
- .rs2 = ops[2].reg.encodeId(),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = fmt.rm,
- .funct7 = (@as(u7, fmt.funct5) << 2) | @intFromEnum(fmt.fmt),
- },
- .vecls => |vec| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
-
- .rs2 = @intFromEnum(vec.umop),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = @intFromEnum(vec.width),
- .funct7 = (@as(u7, vec.nf) << 4) | (@as(u7, @intFromBool(vec.mew)) << 3) | (@as(u7, @intFromEnum(vec.mop)) << 1) | @intFromBool(vec.vm),
- },
- .vecmath => |vec| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
- .rs2 = ops[2].reg.encodeId(),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = @intFromEnum(vec.funct3),
- .funct7 = (@as(u7, vec.funct6) << 1) | @intFromBool(vec.vm),
- },
- else => unreachable,
- },
- };
- },
- .S => {
- assert(ops.len == 3);
- const umm = ops[2].imm.asBits(u12);
-
- return .{
- .S = .{
- .imm0_4 = @truncate(umm),
- .rs1 = ops[0].reg.encodeId(),
- .rs2 = ops[1].reg.encodeId(),
- .imm5_11 = @truncate(umm >> 5),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = enc.data.f.funct3,
- },
- };
- },
- .I => {
- assert(ops.len == 3);
- return .{
- .I = switch (enc.data) {
- .f => |f| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
- .imm0_11 = ops[2].imm.asBits(u12),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = f.funct3,
- },
- .sh => |sh| .{
- .rd = ops[0].reg.encodeId(),
- .rs1 = ops[1].reg.encodeId(),
- .imm0_11 = (@as(u12, sh.typ) << 6) |
- if (sh.has_5) ops[2].imm.asBits(u6) else (@as(u6, 0) | ops[2].imm.asBits(u5)),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = sh.funct3,
- },
- else => unreachable,
- },
- };
- },
- .U => {
- assert(ops.len == 2);
- return .{
- .U = .{
- .rd = ops[0].reg.encodeId(),
- .imm12_31 = ops[1].imm.asBits(u20),
-
- .opcode = @intFromEnum(enc.opcode),
- },
- };
- },
- .J => {
- assert(ops.len == 2);
-
- const umm = ops[1].imm.asBits(u21);
- assert(umm % 4 == 0); // misaligned jump target
-
- return .{
- .J = .{
- .rd = ops[0].reg.encodeId(),
- .imm1_10 = @truncate(umm >> 1),
- .imm11 = @truncate(umm >> 11),
- .imm12_19 = @truncate(umm >> 12),
- .imm20 = @truncate(umm >> 20),
-
- .opcode = @intFromEnum(enc.opcode),
- },
- };
- },
- .B => {
- assert(ops.len == 3);
-
- const umm = ops[2].imm.asBits(u13);
- assert(umm % 4 == 0); // misaligned branch target
-
- return .{
- .B = .{
- .rs1 = ops[0].reg.encodeId(),
- .rs2 = ops[1].reg.encodeId(),
- .imm1_4 = @truncate(umm >> 1),
- .imm5_10 = @truncate(umm >> 5),
- .imm11 = @truncate(umm >> 11),
- .imm12 = @truncate(umm >> 12),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = enc.data.f.funct3,
- },
- };
- },
- .fence => {
- assert(ops.len == 2);
-
- const succ = ops[0].barrier;
- const pred = ops[1].barrier;
-
- return .{
- .fence = .{
- .succ = @intFromEnum(succ),
- .pred = @intFromEnum(pred),
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = enc.data.fence.funct3,
- .fm = @intFromEnum(enc.data.fence.fm),
- },
- };
- },
- .amo => {
- assert(ops.len == 5);
-
- const rd = ops[0].reg;
- const rs1 = ops[1].reg;
- const rs2 = ops[2].reg;
- const rl = ops[3].barrier;
- const aq = ops[4].barrier;
-
- return .{
- .amo = .{
- .rd = rd.encodeId(),
- .rs1 = rs1.encodeId(),
- .rs2 = rs2.encodeId(),
-
- // TODO: https://github.com/ziglang/zig/issues/20113
- .rl = if (rl == .rl) true else false,
- .aq = if (aq == .aq) true else false,
-
- .opcode = @intFromEnum(enc.opcode),
- .funct3 = @intFromEnum(enc.data.amo.width),
- .funct5 = enc.data.amo.funct5,
- },
- };
- },
- else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}),
- }
- }
-};
-
-pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding {
- if (!verifyOps(mnem, ops)) return null;
-
- return .{
- .mnemonic = mnem,
- .data = try Data.construct(mnem, ops),
- };
-}
-
-fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool {
- const inst_enc = InstEnc.fromMnemonic(mnem);
- const list = std.mem.sliceTo(&inst_enc.opsList(), .none);
- for (list, ops) |l, o| if (l != std.meta.activeTag(o)) return false;
- return true;
-}
-
-const std = @import("std");
-const assert = std.debug.assert;
-const log = std.log.scoped(.encoding);
-
-const Encoding = @This();
-const bits = @import("bits.zig");
-const Register = bits.Register;
-const encoder = @import("encoder.zig");
-const Instruction = encoder.Instruction;
-const Operand = Instruction.Operand;
-const OperandEnum = std.meta.FieldEnum(Operand);
diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig
index 012f520485..fc8202adea 100644
--- a/src/arch/riscv64/Lower.zig
+++ b/src/arch/riscv64/Lower.zig
@@ -34,6 +34,8 @@ pub const Reloc = struct {
/// Relocs the lowered_inst_index and the next instruction.
load_symbol_reloc: bits.Symbol,
+ /// Relocs the lowered_inst_index and the next two instructions.
+ load_tlv_reloc: bits.Symbol,
/// Relocs the lowered_inst_index and the next instruction.
call_extern_fn_reloc: bits.Symbol,
};
@@ -61,453 +63,418 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
log.debug("lowerMir {}", .{inst});
switch (inst.tag) {
else => try lower.generic(inst),
- .pseudo => switch (inst.ops) {
- .pseudo_dbg_line_column,
- .pseudo_dbg_epilogue_begin,
- .pseudo_dbg_prologue_end,
- .pseudo_dead,
- => {},
-
- .pseudo_load_rm, .pseudo_store_rm => {
- const rm = inst.data.rm;
-
- const frame_loc: Mir.FrameLoc = if (options.allow_frame_locs)
- rm.m.toFrameLoc(lower.mir)
- else
- .{ .base = .s0, .disp = 0 };
-
- switch (inst.ops) {
- .pseudo_load_rm => {
- const dest_reg = rm.r;
- const dest_reg_class = dest_reg.class();
-
- const src_size = rm.m.mod.size;
- const unsigned = rm.m.mod.unsigned;
-
- const tag: Encoding.Mnemonic = switch (dest_reg_class) {
- .int => switch (src_size) {
- .byte => if (unsigned) .lbu else .lb,
- .hword => if (unsigned) .lhu else .lh,
- .word => if (unsigned) .lwu else .lw,
- .dword => .ld,
- },
- .float => switch (src_size) {
- .byte => unreachable, // Zig does not support 8-bit floats
- .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
- .word => .flw,
- .dword => .fld,
- },
- .vector => switch (src_size) {
- .byte => .vle8v,
- .hword => .vle32v,
- .word => .vle32v,
- .dword => .vle64v,
- },
- };
-
- switch (dest_reg_class) {
- .int, .float => {
- try lower.emit(tag, &.{
- .{ .reg = rm.r },
- .{ .reg = frame_loc.base },
- .{ .imm = Immediate.s(frame_loc.disp) },
- });
- },
- .vector => {
- assert(frame_loc.disp == 0);
- try lower.emit(tag, &.{
- .{ .reg = rm.r },
- .{ .reg = frame_loc.base },
- .{ .reg = .zero },
- });
- },
- }
- },
- .pseudo_store_rm => {
- const src_reg = rm.r;
- const src_reg_class = src_reg.class();
-
- const dest_size = rm.m.mod.size;
-
- const tag: Encoding.Mnemonic = switch (src_reg_class) {
- .int => switch (dest_size) {
- .byte => .sb,
- .hword => .sh,
- .word => .sw,
- .dword => .sd,
- },
- .float => switch (dest_size) {
- .byte => unreachable, // Zig does not support 8-bit floats
- .hword => return lower.fail("TODO: lowerMir pseudo_store_rm support 16-bit floats", .{}),
- .word => .fsw,
- .dword => .fsd,
- },
- .vector => switch (dest_size) {
- .byte => .vse8v,
- .hword => .vse16v,
- .word => .vse32v,
- .dword => .vse64v,
- },
- };
-
- switch (src_reg_class) {
- .int, .float => {
- try lower.emit(tag, &.{
- .{ .reg = frame_loc.base },
- .{ .reg = rm.r },
- .{ .imm = Immediate.s(frame_loc.disp) },
- });
- },
- .vector => {
- assert(frame_loc.disp == 0);
- try lower.emit(tag, &.{
- .{ .reg = rm.r },
- .{ .reg = frame_loc.base },
- .{ .reg = .zero },
- });
- },
- }
- },
- else => unreachable,
- }
- },
-
- .pseudo_mv => {
- const rr = inst.data.rr;
-
- const dst_class = rr.rd.class();
- const src_class = rr.rs.class();
-
- switch (src_class) {
- .float => switch (dst_class) {
- .float => {
- try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
- .{ .reg = rr.rd },
- .{ .reg = rr.rs },
- .{ .reg = rr.rs },
- });
+ .pseudo_dbg_line_column,
+ .pseudo_dbg_epilogue_begin,
+ .pseudo_dbg_prologue_end,
+ .pseudo_dead,
+ => {},
+
+ .pseudo_load_rm, .pseudo_store_rm => {
+ const rm = inst.data.rm;
+
+ const frame_loc: Mir.FrameLoc = if (options.allow_frame_locs)
+ rm.m.toFrameLoc(lower.mir)
+ else
+ .{ .base = .s0, .disp = 0 };
+
+ switch (inst.tag) {
+ .pseudo_load_rm => {
+ const dest_reg = rm.r;
+ const dest_reg_class = dest_reg.class();
+
+ const src_size = rm.m.mod.size;
+ const unsigned = rm.m.mod.unsigned;
+
+ const mnem: Mnemonic = switch (dest_reg_class) {
+ .int => switch (src_size) {
+ .byte => if (unsigned) .lbu else .lb,
+ .hword => if (unsigned) .lhu else .lh,
+ .word => if (unsigned) .lwu else .lw,
+ .dword => .ld,
},
- .int, .vector => return lower.fail("TODO: lowerMir pseudo_mv float -> {s}", .{@tagName(dst_class)}),
- },
- .int => switch (dst_class) {
- .int => {
- try lower.emit(.addi, &.{
- .{ .reg = rr.rd },
- .{ .reg = rr.rs },
- .{ .imm = Immediate.s(0) },
- });
+ .float => switch (src_size) {
+ .byte => unreachable, // Zig does not support 8-bit floats
+ .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
+ .word => .flw,
+ .dword => .fld,
},
- .vector => {
- try lower.emit(.vmvvx, &.{
- .{ .reg = rr.rd },
- .{ .reg = rr.rs },
- .{ .reg = .x0 },
+ .vector => switch (src_size) {
+ .byte => .vle8v,
+ .hword => .vle32v,
+ .word => .vle32v,
+ .dword => .vle64v,
+ },
+ };
+
+ switch (dest_reg_class) {
+ .int, .float => {
+ try lower.emit(mnem, &.{
+ .{ .reg = rm.r },
+ .{ .reg = frame_loc.base },
+ .{ .imm = Immediate.s(frame_loc.disp) },
});
},
- .float => return lower.fail("TODO: lowerMir pseudo_mv int -> {s}", .{@tagName(dst_class)}),
- },
- .vector => switch (dst_class) {
- .int => {
- try lower.emit(.vadcvv, &.{
- .{ .reg = rr.rd },
+ .vector => {
+ assert(frame_loc.disp == 0);
+ try lower.emit(mnem, &.{
+ .{ .reg = rm.r },
+ .{ .reg = frame_loc.base },
.{ .reg = .zero },
- .{ .reg = rr.rs },
});
},
- .float, .vector => return lower.fail("TODO: lowerMir pseudo_mv vector -> {s}", .{@tagName(dst_class)}),
- },
- }
- },
-
- .pseudo_j => {
- try lower.emit(.jal, &.{
- .{ .reg = .zero },
- .{ .imm = lower.reloc(.{ .inst = inst.data.inst }) },
- });
- },
-
- .pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list),
- .pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list),
-
- .pseudo_load_symbol => {
- const payload = inst.data.payload;
- const data = lower.mir.extraData(Mir.LoadSymbolPayload, payload).data;
- const dst_reg: bits.Register = @enumFromInt(data.register);
- assert(dst_reg.class() == .int);
-
- try lower.emit(.lui, &.{
- .{ .reg = dst_reg },
- .{ .imm = lower.reloc(.{
- .load_symbol_reloc = .{
- .atom_index = data.atom_index,
- .sym_index = data.sym_index,
+ }
+ },
+ .pseudo_store_rm => {
+ const src_reg = rm.r;
+ const src_reg_class = src_reg.class();
+
+ const dest_size = rm.m.mod.size;
+
+ const mnem: Mnemonic = switch (src_reg_class) {
+ .int => switch (dest_size) {
+ .byte => .sb,
+ .hword => .sh,
+ .word => .sw,
+ .dword => .sd,
},
- }) },
- });
-
- // the above reloc implies this one
- try lower.emit(.addi, &.{
- .{ .reg = dst_reg },
- .{ .reg = dst_reg },
- .{ .imm = Immediate.s(0) },
- });
- },
-
- .pseudo_lea_rm => {
- const rm = inst.data.rm;
- assert(rm.r.class() == .int);
-
- const frame: Mir.FrameLoc = if (options.allow_frame_locs)
- rm.m.toFrameLoc(lower.mir)
- else
- .{ .base = .s0, .disp = 0 };
-
- try lower.emit(.addi, &.{
- .{ .reg = rm.r },
- .{ .reg = frame.base },
- .{ .imm = Immediate.s(frame.disp) },
- });
- },
-
- .pseudo_fabs => {
- const fabs = inst.data.fabs;
- assert(fabs.rs.class() == .float and fabs.rd.class() == .float);
-
- const mnem: Encoding.Mnemonic = switch (fabs.bits) {
- 16 => return lower.fail("TODO: airAbs Float 16", .{}),
- 32 => .fsgnjxs,
- 64 => .fsgnjxd,
- 80 => return lower.fail("TODO: airAbs Float 80", .{}),
- 128 => return lower.fail("TODO: airAbs Float 128", .{}),
- else => unreachable,
- };
-
- try lower.emit(mnem, &.{
- .{ .reg = fabs.rs },
- .{ .reg = fabs.rd },
- .{ .reg = fabs.rd },
- });
- },
-
- .pseudo_compare => {
- const compare = inst.data.compare;
- const op = compare.op;
-
- const rd = compare.rd;
- const rs1 = compare.rs1;
- const rs2 = compare.rs2;
-
- const class = rs1.class();
- const ty = compare.ty;
- const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
- return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
- };
-
- const is_unsigned = ty.isUnsignedInt(pt.zcu);
-
- const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt;
-
- switch (class) {
- .int => switch (op) {
- .eq => {
- try lower.emit(.xor, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
-
- try lower.emit(.sltiu, &.{
- .{ .reg = rd },
- .{ .reg = rd },
- .{ .imm = Immediate.s(1) },
- });
+ .float => switch (dest_size) {
+ .byte => unreachable, // Zig does not support 8-bit floats
+ .hword => return lower.fail("TODO: lowerMir pseudo_store_rm support 16-bit floats", .{}),
+ .word => .fsw,
+ .dword => .fsd,
},
- .neq => {
- try lower.emit(.xor, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
-
- try lower.emit(.sltu, &.{
- .{ .reg = rd },
- .{ .reg = .zero },
- .{ .reg = rd },
- });
+ .vector => switch (dest_size) {
+ .byte => .vse8v,
+ .hword => .vse16v,
+ .word => .vse32v,
+ .dword => .vse64v,
},
- .gt => {
- try lower.emit(less_than, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
+ };
+
+ switch (src_reg_class) {
+ .int, .float => {
+ try lower.emit(mnem, &.{
+ .{ .reg = frame_loc.base },
+ .{ .reg = rm.r },
+ .{ .imm = Immediate.s(frame_loc.disp) },
});
},
- .gte => {
- try lower.emit(less_than, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
-
- try lower.emit(.xori, &.{
- .{ .reg = rd },
- .{ .reg = rd },
- .{ .imm = Immediate.s(1) },
- });
- },
- .lt => {
- try lower.emit(less_than, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
+ .vector => {
+ assert(frame_loc.disp == 0);
+ try lower.emit(mnem, &.{
+ .{ .reg = rm.r },
+ .{ .reg = frame_loc.base },
+ .{ .reg = .zero },
});
},
- .lte => {
- try lower.emit(less_than, &.{
- .{ .reg = rd },
- .{ .reg = rs2 },
- .{ .reg = rs1 },
- });
+ }
+ },
+ else => unreachable,
+ }
+ },
- try lower.emit(.xori, &.{
- .{ .reg = rd },
- .{ .reg = rd },
- .{ .imm = Immediate.s(1) },
- });
- },
+ .pseudo_mv => {
+ const rr = inst.data.rr;
+
+ const dst_class = rr.rd.class();
+ const src_class = rr.rs.class();
+
+ switch (src_class) {
+ .float => switch (dst_class) {
+ .float => {
+ try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
+ .{ .reg = rr.rd },
+ .{ .reg = rr.rs },
+ .{ .reg = rr.rs },
+ });
},
- .float => switch (op) {
- // eq
- .eq => {
- try lower.emit(if (size == 64) .feqd else .feqs, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
- },
- // !(eq)
- .neq => {
- try lower.emit(if (size == 64) .feqd else .feqs, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
- try lower.emit(.xori, &.{
- .{ .reg = rd },
- .{ .reg = rd },
- .{ .imm = Immediate.s(1) },
- });
- },
- .lt => {
- try lower.emit(if (size == 64) .fltd else .flts, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
- },
- .lte => {
- try lower.emit(if (size == 64) .fled else .fles, &.{
- .{ .reg = rd },
- .{ .reg = rs1 },
- .{ .reg = rs2 },
- });
- },
- .gt => {
- try lower.emit(if (size == 64) .fltd else .flts, &.{
- .{ .reg = rd },
- .{ .reg = rs2 },
- .{ .reg = rs1 },
- });
- },
- .gte => {
- try lower.emit(if (size == 64) .fled else .fles, &.{
- .{ .reg = rd },
- .{ .reg = rs2 },
- .{ .reg = rs1 },
- });
- },
+ .int, .vector => return lower.fail("TODO: lowerMir pseudo_mv float -> {s}", .{@tagName(dst_class)}),
+ },
+ .int => switch (dst_class) {
+ .int => {
+ try lower.emit(.addi, &.{
+ .{ .reg = rr.rd },
+ .{ .reg = rr.rs },
+ .{ .imm = Immediate.s(0) },
+ });
+ },
+ .vector => {
+ try lower.emit(.vmvvx, &.{
+ .{ .reg = rr.rd },
+ .{ .reg = rr.rs },
+ .{ .reg = .x0 },
+ });
+ },
+ .float => return lower.fail("TODO: lowerMir pseudo_mv int -> {s}", .{@tagName(dst_class)}),
+ },
+ .vector => switch (dst_class) {
+ .int => {
+ try lower.emit(.vadcvv, &.{
+ .{ .reg = rr.rd },
+ .{ .reg = .zero },
+ .{ .reg = rr.rs },
+ });
+ },
+ .float, .vector => return lower.fail("TODO: lowerMir pseudo_mv vector -> {s}", .{@tagName(dst_class)}),
+ },
+ }
+ },
+
+ .pseudo_j => {
+ const j_type = inst.data.j_type;
+ try lower.emit(.jal, &.{
+ .{ .reg = j_type.rd },
+ .{ .imm = lower.reloc(.{ .inst = j_type.inst }) },
+ });
+ },
+
+ .pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list),
+ .pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list),
+
+ .pseudo_load_symbol => {
+ const payload = inst.data.reloc;
+ const dst_reg = payload.register;
+ assert(dst_reg.class() == .int);
+
+ try lower.emit(.lui, &.{
+ .{ .reg = dst_reg },
+ .{ .imm = lower.reloc(.{
+ .load_symbol_reloc = .{
+ .atom_index = payload.atom_index,
+ .sym_index = payload.sym_index,
+ },
+ }) },
+ });
+
+ // the reloc above implies this one
+ try lower.emit(.addi, &.{
+ .{ .reg = dst_reg },
+ .{ .reg = dst_reg },
+ .{ .imm = Immediate.s(0) },
+ });
+ },
+
+ .pseudo_load_tlv => {
+ const payload = inst.data.reloc;
+ const dst_reg = payload.register;
+ assert(dst_reg.class() == .int);
+
+ try lower.emit(.lui, &.{
+ .{ .reg = dst_reg },
+ .{ .imm = lower.reloc(.{
+ .load_tlv_reloc = .{
+ .atom_index = payload.atom_index,
+ .sym_index = payload.sym_index,
},
- .vector => return lower.fail("TODO: lowerMir pseudo_cmp vector", .{}),
- }
- },
-
- .pseudo_not => {
- const rr = inst.data.rr;
- assert(rr.rs.class() == .int and rr.rd.class() == .int);
-
- // mask out any other bits that aren't the boolean
- try lower.emit(.andi, &.{
- .{ .reg = rr.rs },
- .{ .reg = rr.rs },
- .{ .imm = Immediate.s(1) },
- });
-
- try lower.emit(.sltiu, &.{
- .{ .reg = rr.rd },
- .{ .reg = rr.rs },
- .{ .imm = Immediate.s(1) },
- });
- },
-
- .pseudo_extern_fn_reloc => {
- const inst_reloc = inst.data.reloc;
-
- try lower.emit(.auipc, &.{
- .{ .reg = .ra },
- .{ .imm = lower.reloc(
- .{ .call_extern_fn_reloc = .{
- .atom_index = inst_reloc.atom_index,
- .sym_index = inst_reloc.sym_index,
- } },
- ) },
- });
-
- try lower.emit(.jalr, &.{
- .{ .reg = .ra },
- .{ .reg = .ra },
- .{ .imm = Immediate.s(0) },
- });
- },
-
- .pseudo_amo => {
- const amo = inst.data.amo;
- const is_d = amo.ty.abiSize(pt) == 8;
- const is_un = amo.ty.isUnsignedInt(pt.zcu);
-
- const mnem: Encoding.Mnemonic = switch (amo.op) {
- // zig fmt: off
- .SWAP => if (is_d) .amoswapd else .amoswapw,
- .ADD => if (is_d) .amoaddd else .amoaddw,
- .AND => if (is_d) .amoandd else .amoandw,
- .OR => if (is_d) .amoord else .amoorw,
- .XOR => if (is_d) .amoxord else .amoxorw,
- .MAX => if (is_d) if (is_un) .amomaxud else .amomaxd else if (is_un) .amomaxuw else .amomaxw,
- .MIN => if (is_d) if (is_un) .amominud else .amomind else if (is_un) .amominuw else .amominw,
- // zig fmt: on
- };
-
- try lower.emit(mnem, &.{
- .{ .reg = inst.data.amo.rd },
- .{ .reg = inst.data.amo.rs1 },
- .{ .reg = inst.data.amo.rs2 },
- .{ .barrier = inst.data.amo.rl },
- .{ .barrier = inst.data.amo.aq },
- });
- },
-
- .pseudo_fence => {
- const fence = inst.data.fence;
-
- try lower.emit(switch (fence.fm) {
- .tso => .fencetso,
- .none => .fence,
- }, &.{
- .{ .barrier = fence.succ },
- .{ .barrier = fence.pred },
- });
- },
-
- else => return lower.fail("TODO lower: psuedo {s}", .{@tagName(inst.ops)}),
+ }) },
+ });
+
+ try lower.emit(.add, &.{
+ .{ .reg = dst_reg },
+ .{ .reg = dst_reg },
+ .{ .reg = .tp },
+ });
+
+ try lower.emit(.addi, &.{
+ .{ .reg = dst_reg },
+ .{ .reg = dst_reg },
+ .{ .imm = Immediate.s(0) },
+ });
+ },
+
+ .pseudo_lea_rm => {
+ const rm = inst.data.rm;
+ assert(rm.r.class() == .int);
+
+ const frame: Mir.FrameLoc = if (options.allow_frame_locs)
+ rm.m.toFrameLoc(lower.mir)
+ else
+ .{ .base = .s0, .disp = 0 };
+
+ try lower.emit(.addi, &.{
+ .{ .reg = rm.r },
+ .{ .reg = frame.base },
+ .{ .imm = Immediate.s(frame.disp) },
+ });
+ },
+
+ .pseudo_compare => {
+ const compare = inst.data.compare;
+ const op = compare.op;
+
+ const rd = compare.rd;
+ const rs1 = compare.rs1;
+ const rs2 = compare.rs2;
+
+ const class = rs1.class();
+ const ty = compare.ty;
+ const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
+ return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
+ };
+
+ const is_unsigned = ty.isUnsignedInt(pt.zcu);
+ const less_than: Mnemonic = if (is_unsigned) .sltu else .slt;
+
+ switch (class) {
+ .int => switch (op) {
+ .eq => {
+ try lower.emit(.xor, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+
+ try lower.emit(.sltiu, &.{
+ .{ .reg = rd },
+ .{ .reg = rd },
+ .{ .imm = Immediate.s(1) },
+ });
+ },
+ .neq => {
+ try lower.emit(.xor, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+
+ try lower.emit(.sltu, &.{
+ .{ .reg = rd },
+ .{ .reg = .zero },
+ .{ .reg = rd },
+ });
+ },
+ .gt => {
+ try lower.emit(less_than, &.{
+ .{ .reg = rd },
+ .{ .reg = rs2 },
+ .{ .reg = rs1 },
+ });
+ },
+ .gte => {
+ try lower.emit(less_than, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ try lower.emit(.xori, &.{
+ .{ .reg = rd },
+ .{ .reg = rd },
+ .{ .imm = Immediate.s(1) },
+ });
+ },
+ .lt => {
+ try lower.emit(less_than, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ },
+ .lte => {
+ try lower.emit(less_than, &.{
+ .{ .reg = rd },
+ .{ .reg = rs2 },
+ .{ .reg = rs1 },
+ });
+
+ try lower.emit(.xori, &.{
+ .{ .reg = rd },
+ .{ .reg = rd },
+ .{ .imm = Immediate.s(1) },
+ });
+ },
+ },
+ .float => switch (op) {
+ // eq
+ .eq => {
+ try lower.emit(if (size == 64) .feqd else .feqs, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ },
+ // !(eq)
+ .neq => {
+ try lower.emit(if (size == 64) .feqd else .feqs, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ try lower.emit(.xori, &.{
+ .{ .reg = rd },
+ .{ .reg = rd },
+ .{ .imm = Immediate.s(1) },
+ });
+ },
+ .lt => {
+ try lower.emit(if (size == 64) .fltd else .flts, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ },
+ .lte => {
+ try lower.emit(if (size == 64) .fled else .fles, &.{
+ .{ .reg = rd },
+ .{ .reg = rs1 },
+ .{ .reg = rs2 },
+ });
+ },
+ .gt => {
+ try lower.emit(if (size == 64) .fltd else .flts, &.{
+ .{ .reg = rd },
+ .{ .reg = rs2 },
+ .{ .reg = rs1 },
+ });
+ },
+ .gte => {
+ try lower.emit(if (size == 64) .fled else .fles, &.{
+ .{ .reg = rd },
+ .{ .reg = rs2 },
+ .{ .reg = rs1 },
+ });
+ },
+ },
+ .vector => return lower.fail("TODO: lowerMir pseudo_cmp vector", .{}),
+ }
+ },
+
+ .pseudo_not => {
+ const rr = inst.data.rr;
+ assert(rr.rs.class() == .int and rr.rd.class() == .int);
+
+ // mask out any other bits that aren't the boolean
+ try lower.emit(.andi, &.{
+ .{ .reg = rr.rs },
+ .{ .reg = rr.rs },
+ .{ .imm = Immediate.s(1) },
+ });
+
+ try lower.emit(.sltiu, &.{
+ .{ .reg = rr.rd },
+ .{ .reg = rr.rs },
+ .{ .imm = Immediate.s(1) },
+ });
+ },
+
+ .pseudo_extern_fn_reloc => {
+ const inst_reloc = inst.data.reloc;
+ const link_reg = inst_reloc.register;
+
+ try lower.emit(.auipc, &.{
+ .{ .reg = link_reg },
+ .{ .imm = lower.reloc(
+ .{ .call_extern_fn_reloc = .{
+ .atom_index = inst_reloc.atom_index,
+ .sym_index = inst_reloc.sym_index,
+ } },
+ ) },
+ });
+
+ try lower.emit(.jalr, &.{
+ .{ .reg = link_reg },
+ .{ .reg = link_reg },
+ .{ .imm = Immediate.s(0) },
+ });
},
}
@@ -518,49 +485,57 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
}
fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
- const mnemonic = std.meta.stringToEnum(Encoding.Mnemonic, @tagName(inst.tag)) orelse {
- return lower.fail("generic inst name '{s}' with op {s} doesn't match with a mnemonic", .{
- @tagName(inst.tag),
- @tagName(inst.ops),
- });
- };
- try lower.emit(mnemonic, switch (inst.ops) {
+ const mnemonic = inst.tag;
+ try lower.emit(mnemonic, switch (inst.data) {
.none => &.{},
- .ri => &.{
- .{ .reg = inst.data.u_type.rd },
- .{ .imm = inst.data.u_type.imm20 },
+ .u_type => |u| &.{
+ .{ .reg = u.rd },
+ .{ .imm = u.imm20 },
+ },
+ .i_type => |i| &.{
+ .{ .reg = i.rd },
+ .{ .reg = i.rs1 },
+ .{ .imm = i.imm12 },
+ },
+ .rr => |rr| &.{
+ .{ .reg = rr.rd },
+ .{ .reg = rr.rs },
},
- .rr => &.{
- .{ .reg = inst.data.rr.rd },
- .{ .reg = inst.data.rr.rs },
+ .b_type => |b| &.{
+ .{ .reg = b.rs1 },
+ .{ .reg = b.rs2 },
+ .{ .imm = lower.reloc(.{ .inst = b.inst }) },
},
- .rri => &.{
- .{ .reg = inst.data.i_type.rd },
- .{ .reg = inst.data.i_type.rs1 },
- .{ .imm = inst.data.i_type.imm12 },
+ .r_type => |r| &.{
+ .{ .reg = r.rd },
+ .{ .reg = r.rs1 },
+ .{ .reg = r.rs2 },
},
- .rr_inst => &.{
- .{ .reg = inst.data.b_type.rs1 },
- .{ .reg = inst.data.b_type.rs2 },
- .{ .imm = lower.reloc(.{ .inst = inst.data.b_type.inst }) },
+ .csr => |csr| &.{
+ .{ .csr = csr.csr },
+ .{ .reg = csr.rs1 },
+ .{ .reg = csr.rd },
},
- .rrr => &.{
- .{ .reg = inst.data.r_type.rd },
- .{ .reg = inst.data.r_type.rs1 },
- .{ .reg = inst.data.r_type.rs2 },
+ .amo => |amo| &.{
+ .{ .reg = amo.rd },
+ .{ .reg = amo.rs1 },
+ .{ .reg = amo.rs2 },
+ .{ .barrier = amo.rl },
+ .{ .barrier = amo.aq },
},
- .csr => &.{
- .{ .csr = inst.data.csr.csr },
- .{ .reg = inst.data.csr.rs1 },
- .{ .reg = inst.data.csr.rd },
+ .fence => |fence| &.{
+ .{ .barrier = fence.succ },
+ .{ .barrier = fence.pred },
},
- else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}),
+ else => return lower.fail("TODO: generic lower {s}", .{@tagName(inst.data)}),
});
}
-fn emit(lower: *Lower, mnemonic: Encoding.Mnemonic, ops: []const Instruction.Operand) !void {
- lower.result_insts[lower.result_insts_len] =
- try Instruction.new(mnemonic, ops);
+fn emit(lower: *Lower, mnemonic: Mnemonic, ops: []const Instruction.Operand) !void {
+ const lir = encoding.Lir.fromMnem(mnemonic);
+ const inst = Instruction.fromLir(lir, ops);
+
+ lower.result_insts[lower.result_insts_len] = inst;
lower.result_insts_len += 1;
}
@@ -582,7 +557,7 @@ fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.Register
const reg = abi.Registers.all_preserved[i];
const reg_class = reg.class();
- const load_inst: Encoding.Mnemonic, const store_inst: Encoding.Mnemonic = switch (reg_class) {
+ const load_inst: Mnemonic, const store_inst: Mnemonic = switch (reg_class) {
.int => .{ .ld, .sd },
.float => .{ .fld, .fsd },
.vector => unreachable,
@@ -620,20 +595,22 @@ fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
}
const Lower = @This();
-
-const abi = @import("abi.zig");
-const assert = std.debug.assert;
-const bits = @import("bits.zig");
-const encoder = @import("encoder.zig");
-const link = @import("../../link.zig");
-const Encoding = @import("Encoding.zig");
const std = @import("std");
+const assert = std.debug.assert;
const log = std.log.scoped(.lower);
-const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const ErrorMsg = Zcu.ErrorMsg;
-const Mir = @import("Mir.zig");
+
+const link = @import("../../link.zig");
+const Air = @import("../../Air.zig");
const Zcu = @import("../../Zcu.zig");
-const Instruction = encoder.Instruction;
+
+const Mir = @import("Mir.zig");
+const abi = @import("abi.zig");
+const bits = @import("bits.zig");
+const encoding = @import("encoding.zig");
+
+const Mnemonic = @import("mnem.zig").Mnemonic;
const Immediate = bits.Immediate;
+const Instruction = encoding.Instruction;
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 1478bf5d5b..2ae62fd9b2 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -1,164 +1,17 @@
//! Machine Intermediate Representation.
-//! This data is produced by RISCV64 Codegen or RISCV64 assembly parsing
-//! These instructions have a 1:1 correspondence with machine code instructions
-//! for the target. MIR can be lowered to source-annotated textual assembly code
-//! instructions, or it can be lowered to machine code.
-//! The main purpose of MIR is to postpone the assignment of offsets until Isel,
-//! so that, for example, the smaller encodings of jump instructions can be used.
+//! This data is produced by CodeGen.zig
instructions: std.MultiArrayList(Inst).Slice,
-/// The meaning of this data is determined by `Inst.Tag` value.
-extra: []const u32,
frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct {
- tag: Tag,
+ tag: Mnemonic,
data: Data,
- ops: Ops,
- /// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
- pub const Tag = enum(u16) {
-
- // base extension
- addi,
- addiw,
-
- jalr,
- lui,
-
- @"and",
- andi,
-
- xori,
- xor,
- @"or",
-
- ebreak,
- ecall,
- unimp,
-
- add,
- addw,
- sub,
- subw,
-
- sltu,
- slt,
-
- slli,
- srli,
- srai,
-
- slliw,
- srliw,
- sraiw,
-
- sll,
- srl,
- sra,
-
- sllw,
- srlw,
- sraw,
-
- jal,
-
- beq,
- bne,
-
- nop,
-
- ld,
- lw,
- lh,
- lb,
-
- sd,
- sw,
- sh,
- sb,
-
- // M extension
- mul,
- mulw,
-
- div,
- divu,
- divw,
- divuw,
-
- rem,
- remu,
- remw,
- remuw,
-
- // F extension (32-bit float)
- fadds,
- fsubs,
- fmuls,
- fdivs,
-
- fabss,
-
- fmins,
- fmaxs,
-
- fsqrts,
-
- flw,
- fsw,
-
- feqs,
- flts,
- fles,
-
- // D extension (64-bit float)
- faddd,
- fsubd,
- fmuld,
- fdivd,
-
- fabsd,
-
- fmind,
- fmaxd,
-
- fsqrtd,
-
- fld,
- fsd,
-
- feqd,
- fltd,
- fled,
-
- // Zicsr Extension Instructions
- csrrs,
-
- // V Extension Instructions
- vsetvli,
- vsetivli,
- vsetvl,
- vaddvv,
- vfaddvv,
- vsubvv,
- vfsubvv,
- vslidedownvx,
-
- /// A pseudo-instruction. Used for anything that isn't 1:1 with an
- /// assembly instruction.
- pseudo,
- };
-
- /// All instructions have a 4-byte payload, which is contained within
- /// this union. `Ops` determines which union field is active, as well as
- /// how to interpret the data within.
- pub const Data = union {
- nop: void,
- inst: Index,
- payload: u32,
+ pub const Data = union(enum) {
+ none: void,
r_type: struct {
rd: Register,
rs1: Register,
@@ -188,10 +41,6 @@ pub const Inst = struct {
rd: Register,
inst: Inst.Index,
},
- pseudo_dbg_line_column: struct {
- line: u32,
- column: u32,
- },
rm: struct {
r: Register,
m: Memory,
@@ -202,11 +51,6 @@ pub const Inst = struct {
rd: Register,
rs: Register,
},
- fabs: struct {
- rd: Register,
- rs: Register,
- bits: u16,
- },
compare: struct {
rd: Register,
rs1: Register,
@@ -222,16 +66,13 @@ pub const Inst = struct {
ty: Type,
},
reloc: struct {
+ register: Register,
atom_index: u32,
sym_index: u32,
},
fence: struct {
pred: Barrier,
succ: Barrier,
- fm: enum {
- none,
- tso,
- },
},
amo: struct {
rd: Register,
@@ -239,123 +80,32 @@ pub const Inst = struct {
rs2: Register,
aq: Barrier,
rl: Barrier,
- op: AmoOp,
- ty: Type,
},
csr: struct {
csr: CSR,
rs1: Register,
rd: Register,
},
- };
-
- pub const Ops = enum {
- /// No data associated with this instruction (only mnemonic is used).
- none,
- /// Two registers
- rr,
- /// Three registers
- rrr,
-
- /// Two registers + immediate, uses the i_type payload.
- rri,
- //extern_fn_reloc/ Two registers + another instruction.
- rr_inst,
-
- /// Register + Memory
- rm,
-
- /// Register + Immediate
- ri,
-
- /// Another instruction.
- inst,
-
- /// Control and Status Register Instruction.
- csr,
-
- /// Pseudo-instruction that will generate a backpatched
- /// function prologue.
- pseudo_prologue,
- /// Pseudo-instruction that will generate a backpatched
- /// function epilogue
- pseudo_epilogue,
-
- /// Pseudo-instruction: End of prologue
- pseudo_dbg_prologue_end,
- /// Pseudo-instruction: Beginning of epilogue
- pseudo_dbg_epilogue_begin,
- /// Pseudo-instruction: Update debug line
- pseudo_dbg_line_column,
-
- /// Pseudo-instruction that loads from memory into a register.
- ///
- /// Uses `rm` payload.
- pseudo_load_rm,
- /// Pseudo-instruction that stores from a register into memory
- ///
- /// Uses `rm` payload.
- pseudo_store_rm,
-
- /// Pseudo-instruction that loads the address of memory into a register.
- ///
- /// Uses `rm` payload.
- pseudo_lea_rm,
-
- /// Jumps. Uses `inst` payload.
- pseudo_j,
-
- /// Floating point absolute value.
- pseudo_fabs,
-
- /// Dead inst, ignored by the emitter.
- pseudo_dead,
-
- /// Loads the address of a value that hasn't yet been allocated in memory.
- ///
- /// uses the Mir.LoadSymbolPayload payload.
- pseudo_load_symbol,
-
- /// Moves the value of rs1 to rd.
- ///
- /// uses the `rr` payload.
- pseudo_mv,
-
- pseudo_restore_regs,
- pseudo_spill_regs,
-
- pseudo_compare,
-
- /// NOT operation on booleans. Does an `andi reg, reg, 1` to mask out any other bits from the boolean.
- pseudo_not,
-
- /// Generates an auipc + jalr pair, with a R_RISCV_CALL_PLT reloc
- pseudo_extern_fn_reloc,
-
- /// IORW, IORW
- pseudo_fence,
-
- /// Ordering, Src, Addr, Dest
- pseudo_amo,
+ pseudo_dbg_line_column: struct {
+ line: u32,
+ column: u32,
+ },
};
pub fn format(
inst: Inst,
comptime fmt: []const u8,
- options: std.fmt.FormatOptions,
+ _: std.fmt.FormatOptions,
writer: anytype,
) !void {
assert(fmt.len == 0);
- _ = options;
-
- try writer.print("Tag: {s}, Ops: {s}", .{ @tagName(inst.tag), @tagName(inst.ops) });
+ try writer.print("Tag: {s}, Data: {s}", .{ @tagName(inst.tag), @tagName(inst.data) });
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
mir.frame_locs.deinit(gpa);
- gpa.free(mir.extra);
mir.* = undefined;
}
@@ -386,25 +136,12 @@ pub const AmoOp = enum(u5) {
MIN,
};
-/// Returns the requested data, as well as the new index which is at the start of the
-/// trailers for the object.
-pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
- const fields = std.meta.fields(T);
- var i: usize = index;
- var result: T = undefined;
- inline for (fields) |field| {
- @field(result, field.name) = switch (field.type) {
- u32 => mir.extra[i],
- i32 => @as(i32, @bitCast(mir.extra[i])),
- else => @compileError("bad field type"),
- };
- i += 1;
- }
- return .{
- .data = result,
- .end = i,
- };
-}
+pub const FcvtOp = enum(u5) {
+ w = 0b00000,
+ wu = 0b00001,
+ l = 0b00010,
+ lu = 0b00011,
+};
pub const LoadSymbolPayload = struct {
register: u32,
@@ -453,10 +190,10 @@ const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const Type = @import("../../Type.zig");
+const bits = @import("bits.zig");
const assert = std.debug.assert;
-const bits = @import("bits.zig");
const Register = bits.Register;
const CSR = bits.CSR;
const Immediate = bits.Immediate;
@@ -464,3 +201,4 @@ const Memory = bits.Memory;
const FrameIndex = bits.FrameIndex;
const FrameAddr = @import("CodeGen.zig").FrameAddr;
const IntegerBitSet = std.bit_set.IntegerBitSet;
+const Mnemonic = @import("mnem.zig").Mnemonic;
diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig
index 3f5f7f6744..d274c76f08 100644
--- a/src/arch/riscv64/abi.zig
+++ b/src/arch/riscv64/abi.zig
@@ -125,10 +125,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
result[0] = .integer;
return result;
}
- result[0] = .integer;
- if (ty.optionalChild(zcu).abiSize(pt) == 0) return result;
- result[1] = .integer;
- return result;
+ return memory_class;
},
.Int, .Enum, .ErrorSet => {
const int_bits = ty.intInfo(pt.zcu).bits;
@@ -167,7 +164,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
return memory_class;
},
- .Struct => {
+ .Struct, .Union => {
const layout = ty.containerLayout(pt.zcu);
const ty_size = ty.abiSize(pt);
@@ -200,6 +197,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
result[0] = .integer;
return result;
}
+ // we should pass vector registers of size <= 128 through 2 integer registers
+ // but we haven't implemented seperating vector registers into register_pairs
return memory_class;
},
else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}),
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 97db43b50e..45fdd22bc0 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -5,7 +5,6 @@ const testing = std.testing;
const Target = std.Target;
const Zcu = @import("../../Zcu.zig");
-const Encoding = @import("Encoding.zig");
const Mir = @import("Mir.zig");
const abi = @import("abi.zig");
@@ -16,7 +15,6 @@ pub const Memory = struct {
pub const Base = union(enum) {
reg: Register,
frame: FrameIndex,
- reloc: Symbol,
};
pub const Mod = struct {
@@ -83,7 +81,6 @@ pub const Memory = struct {
.disp = base_loc.disp + offset,
};
},
- .reloc => unreachable,
}
}
};
@@ -193,7 +190,7 @@ pub const Register = enum(u8) {
/// The goal of this function is to return the same ID for `zero` and `x0` but two
/// seperate IDs for `x0` and `f0`. We will assume that each register set has 32 registers
/// and is repeated twice, once for the named version, once for the number version.
- pub fn id(reg: Register) u8 {
+ pub fn id(reg: Register) std.math.IntFittingRange(0, @typeInfo(Register).Enum.fields.len) {
const base = switch (@intFromEnum(reg)) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => @intFromEnum(Register.zero),
@@ -251,8 +248,7 @@ pub const FrameIndex = enum(u32) {
/// This index referes to a frame dedicated to setting up args for function called
/// in this function. Useful for aligning args separately.
call_frame,
- /// This index referes to the frame where callee saved registers are spilled and restore
- /// from.
+ /// This index referes to the frame where callee saved registers are spilled and restored from.
spill_frame,
/// Other indices are used for local variable stack slots
_,
diff --git a/src/arch/riscv64/encoder.zig b/src/arch/riscv64/encoder.zig
deleted file mode 100644
index 2ef5ba03ec..0000000000
--- a/src/arch/riscv64/encoder.zig
+++ /dev/null
@@ -1,80 +0,0 @@
-pub const Instruction = struct {
- encoding: Encoding,
- ops: [5]Operand = .{.none} ** 5,
-
- pub const Operand = union(enum) {
- none,
- reg: Register,
- csr: CSR,
- mem: Memory,
- imm: Immediate,
- barrier: Mir.Barrier,
- };
-
- pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction {
- const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse {
- std.log.err("no encoding found for: {s} [{s} {s} {s} {s} {s}]", .{
- @tagName(mnemonic),
- @tagName(if (ops.len > 0) ops[0] else .none),
- @tagName(if (ops.len > 1) ops[1] else .none),
- @tagName(if (ops.len > 2) ops[2] else .none),
- @tagName(if (ops.len > 3) ops[3] else .none),
- @tagName(if (ops.len > 4) ops[4] else .none),
- });
- return error.InvalidInstruction;
- };
-
- var result_ops: [5]Operand = .{.none} ** 5;
- @memcpy(result_ops[0..ops.len], ops);
-
- return .{
- .encoding = encoding,
- .ops = result_ops,
- };
- }
-
- pub fn encode(inst: Instruction, writer: anytype) !void {
- try writer.writeInt(u32, inst.encoding.data.toU32(), .little);
- }
-
- pub fn format(
- inst: Instruction,
- comptime fmt: []const u8,
- _: std.fmt.FormatOptions,
- writer: anytype,
- ) !void {
- std.debug.assert(fmt.len == 0);
-
- const encoding = inst.encoding;
-
- try writer.print("{s} ", .{@tagName(encoding.mnemonic)});
-
- var i: u32 = 0;
- while (i < inst.ops.len and inst.ops[i] != .none) : (i += 1) {
- if (i != inst.ops.len and i != 0) try writer.writeAll(", ");
-
- switch (@as(Instruction.Operand, inst.ops[i])) {
- .none => unreachable, // it's sliced out above
- .reg => |reg| try writer.writeAll(@tagName(reg)),
- .imm => |imm| try writer.print("{d}", .{imm.asSigned(64)}),
- .mem => try writer.writeAll("mem"),
- .barrier => |barrier| try writer.writeAll(@tagName(barrier)),
- .csr => |csr| try writer.writeAll(@tagName(csr)),
- }
- }
- }
-};
-
-const std = @import("std");
-
-const Lower = @import("Lower.zig");
-const Mir = @import("Mir.zig");
-const bits = @import("bits.zig");
-const Encoding = @import("Encoding.zig");
-
-const Register = bits.Register;
-const CSR = bits.CSR;
-const Memory = bits.Memory;
-const Immediate = bits.Immediate;
-
-const log = std.log.scoped(.encode);
diff --git a/src/arch/riscv64/encoding.zig b/src/arch/riscv64/encoding.zig
new file mode 100644
index 0000000000..3ff497a1ea
--- /dev/null
+++ b/src/arch/riscv64/encoding.zig
@@ -0,0 +1,729 @@
+//! This file is responsible for going from MIR, which is emitted by CodeGen
+//! and converting it into Instructions, which can be used as needed.
+//!
+//! Here we encode how mnemonics relate to opcodes and where their operands go.
+
+/// Lower Instruction Representation
+///
+/// This format encodes a specific instruction, however it's still abstracted
+/// away from the true encoding it'll be in. It's meant to make the process of
+/// indicating unique encoding data easier.
+pub const Lir = struct {
+ opcode: OpCode,
+ format: Format,
+ data: Data,
+
+ pub const Format = enum {
+ R,
+ I,
+ S,
+ B,
+ U,
+ J,
+ extra,
+ };
+
+ const Data = union(enum) {
+ none,
+ f: struct { funct3: u3 },
+ ff: struct {
+ funct3: u3,
+ funct7: u7,
+ },
+ sh: struct {
+ typ: u6,
+ funct3: u3,
+ has_5: bool,
+ },
+
+ fmt: struct {
+ funct5: u5,
+ rm: u3,
+ fmt: FpFmt,
+ },
+ fcvt: struct {
+ funct5: u5,
+ rm: u3,
+ fmt: FpFmt,
+ width: Mir.FcvtOp,
+ },
+
+ vecls: struct {
+ width: VecWidth,
+ umop: Umop,
+ vm: bool,
+ mop: Mop,
+ mew: bool,
+ nf: u3,
+ },
+ vecmath: struct {
+ vm: bool,
+ funct6: u6,
+ funct3: VecType,
+ },
+
+ amo: struct {
+ funct5: u5,
+ width: AmoWidth,
+ },
+ fence: struct {
+ funct3: u3,
+ fm: FenceMode,
+ },
+
+ /// the mnemonic has some special properities that can't be handled in a generic fashion
+ extra: Mnemonic,
+ };
+
+ const OpCode = enum(u7) {
+ LOAD = 0b0000011,
+ LOAD_FP = 0b0000111,
+ MISC_MEM = 0b0001111,
+ OP_IMM = 0b0010011,
+ AUIPC = 0b0010111,
+ OP_IMM_32 = 0b0011011,
+ STORE = 0b0100011,
+ STORE_FP = 0b0100111,
+ AMO = 0b0101111,
+ OP_V = 0b1010111,
+ OP = 0b0110011,
+ OP_32 = 0b0111011,
+ LUI = 0b0110111,
+ MADD = 0b1000011,
+ MSUB = 0b1000111,
+ NMSUB = 0b1001011,
+ NMADD = 0b1001111,
+ OP_FP = 0b1010011,
+ OP_IMM_64 = 0b1011011,
+ BRANCH = 0b1100011,
+ JALR = 0b1100111,
+ JAL = 0b1101111,
+ SYSTEM = 0b1110011,
+ OP_64 = 0b1111011,
+ NONE = 0b00000000,
+ };
+
+ const FpFmt = enum(u2) {
+ /// 32-bit single-precision
+ S = 0b00,
+ /// 64-bit double-precision
+ D = 0b01,
+
+ // H = 0b10, unused in the G extension
+
+ /// 128-bit quad-precision
+ Q = 0b11,
+ };
+
+ const AmoWidth = enum(u3) {
+ W = 0b010,
+ D = 0b011,
+ };
+
+ const FenceMode = enum(u4) {
+ none = 0b0000,
+ tso = 0b1000,
+ };
+
+ const Mop = enum(u2) {
+ // zig fmt: off
+ unit = 0b00,
+ unord = 0b01,
+ stride = 0b10,
+ ord = 0b11,
+ // zig fmt: on
+ };
+
+ const Umop = enum(u5) {
+ // zig fmt: off
+ unit = 0b00000,
+ whole = 0b01000,
+ mask = 0b01011,
+ fault = 0b10000,
+ // zig fmt: on
+ };
+
+ const VecWidth = enum(u3) {
+ // zig fmt: off
+ @"8" = 0b000,
+ @"16" = 0b101,
+ @"32" = 0b110,
+ @"64" = 0b111,
+ // zig fmt: on
+ };
+
+ const VecType = enum(u3) {
+ OPIVV = 0b000,
+ OPFVV = 0b001,
+ OPMVV = 0b010,
+ OPIVI = 0b011,
+ OPIVX = 0b100,
+ OPFVF = 0b101,
+ OPMVX = 0b110,
+ };
+
+ pub fn fromMnem(mnem: Mnemonic) Lir {
+ return switch (mnem) {
+ // zig fmt: off
+
+ // OP
+ .add => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } },
+ .sub => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } },
+
+ .@"and" => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000000 } } },
+ .@"or" => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000000 } } },
+ .xor => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000000 } } },
+
+ .sltu => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000000 } } },
+ .slt => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000000 } } },
+
+ .mul => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } },
+ .mulh => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000001 } } },
+ .mulhsu => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b010, .funct7 = 0b0000001 } } },
+ .mulhu => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b011, .funct7 = 0b0000001 } } },
+
+ .div => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } },
+ .divu => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } },
+
+ .rem => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } },
+ .remu => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } },
+
+ .sll => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } },
+ .srl => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } },
+ .sra => .{ .opcode = .OP, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } },
+
+
+ // OP_IMM
+
+ .addi => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .f = .{ .funct3 = 0b000 } } },
+ .andi => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .f = .{ .funct3 = 0b111 } } },
+ .xori => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .f = .{ .funct3 = 0b100 } } },
+
+ .sltiu => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .f = .{ .funct3 = 0b011 } } },
+
+ .slli => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = true } } },
+ .srli => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = true } } },
+ .srai => .{ .opcode = .OP_IMM, .format = .I, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = true } } },
+
+ .clz => .{ .opcode = .OP_IMM, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0110000 } } },
+ .cpop => .{ .opcode = .OP_IMM, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0110000 } } },
+
+ // OP_IMM_32
+
+ .slliw => .{ .opcode = .OP_IMM_32, .format = .I, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b001, .has_5 = false } } },
+ .srliw => .{ .opcode = .OP_IMM_32, .format = .I, .data = .{ .sh = .{ .typ = 0b000000, .funct3 = 0b101, .has_5 = false } } },
+ .sraiw => .{ .opcode = .OP_IMM_32, .format = .I, .data = .{ .sh = .{ .typ = 0b010000, .funct3 = 0b101, .has_5 = false } } },
+
+ .clzw => .{ .opcode = .OP_IMM_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0110000 } } },
+ .cpopw => .{ .opcode = .OP_IMM_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0110000 } } },
+
+ // OP_32
+
+ .addw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000000 } } },
+ .subw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0100000 } } },
+ .mulw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b000, .funct7 = 0b0000001 } } },
+
+ .divw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b100, .funct7 = 0b0000001 } } },
+ .divuw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000001 } } },
+
+ .remw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b110, .funct7 = 0b0000001 } } },
+ .remuw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b111, .funct7 = 0b0000001 } } },
+
+ .sllw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b001, .funct7 = 0b0000000 } } },
+ .srlw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0000000 } } },
+ .sraw => .{ .opcode = .OP_32, .format = .R, .data = .{ .ff = .{ .funct3 = 0b101, .funct7 = 0b0100000 } } },
+
+
+ // OP_FP
+
+ .fadds => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .S, .rm = 0b111 } } },
+ .faddd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00000, .fmt = .D, .rm = 0b111 } } },
+
+ .fsubs => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .S, .rm = 0b111 } } },
+ .fsubd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00001, .fmt = .D, .rm = 0b111 } } },
+
+ .fmuls => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .S, .rm = 0b111 } } },
+ .fmuld => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00010, .fmt = .D, .rm = 0b111 } } },
+
+ .fdivs => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .S, .rm = 0b111 } } },
+ .fdivd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00011, .fmt = .D, .rm = 0b111 } } },
+
+ .fmins => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b000 } } },
+ .fmind => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b000 } } },
+
+ .fmaxs => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .S, .rm = 0b001 } } },
+ .fmaxd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00101, .fmt = .D, .rm = 0b001 } } },
+
+ .fsqrts => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .S, .rm = 0b111 } } },
+ .fsqrtd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b01011, .fmt = .D, .rm = 0b111 } } },
+
+ .fles => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b000 } } },
+ .fled => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b000 } } },
+
+ .flts => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b001 } } },
+ .fltd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b001 } } },
+
+ .feqs => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .S, .rm = 0b010 } } },
+ .feqd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b10100, .fmt = .D, .rm = 0b010 } } },
+
+ .fsgnjns => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b000 } } },
+ .fsgnjnd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b000 } } },
+
+ .fsgnjxs => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .S, .rm = 0b010 } } },
+ .fsgnjxd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fmt = .{ .funct5 = 0b00100, .fmt = .D, .rm = 0b010 } } },
+
+ .fcvtws => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .S, .rm = 0b111, .width = .w } } },
+ .fcvtwus => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .S, .rm = 0b111, .width = .wu } } },
+ .fcvtls => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .S, .rm = 0b111, .width = .l } } },
+ .fcvtlus => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .S, .rm = 0b111, .width = .lu } } },
+
+ .fcvtwd => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .D, .rm = 0b111, .width = .w } } },
+ .fcvtwud => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .D, .rm = 0b111, .width = .wu } } },
+ .fcvtld => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .D, .rm = 0b111, .width = .l } } },
+ .fcvtlud => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11000, .fmt = .D, .rm = 0b111, .width = .lu } } },
+
+ .fcvtsw => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .S, .rm = 0b111, .width = .w } } },
+ .fcvtswu => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .S, .rm = 0b111, .width = .wu } } },
+ .fcvtsl => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .S, .rm = 0b111, .width = .l } } },
+ .fcvtslu => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .S, .rm = 0b111, .width = .lu } } },
+
+ .fcvtdw => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .D, .rm = 0b111, .width = .w } } },
+ .fcvtdwu => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .D, .rm = 0b111, .width = .wu } } },
+ .fcvtdl => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .D, .rm = 0b111, .width = .l } } },
+ .fcvtdlu => .{ .opcode = .OP_FP, .format = .R, .data = .{ .fcvt = .{ .funct5 = 0b11010, .fmt = .D, .rm = 0b111, .width = .lu } } },
+
+ // LOAD
+
+ .lb => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b000 } } },
+ .lh => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b001 } } },
+ .lw => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b010 } } },
+ .ld => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b011 } } },
+ .lbu => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b100 } } },
+ .lhu => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b101 } } },
+ .lwu => .{ .opcode = .LOAD, .format = .I, .data = .{ .f = .{ .funct3 = 0b110 } } },
+
+
+ // STORE
+
+ .sb => .{ .opcode = .STORE, .format = .S, .data = .{ .f = .{ .funct3 = 0b000 } } },
+ .sh => .{ .opcode = .STORE, .format = .S, .data = .{ .f = .{ .funct3 = 0b001 } } },
+ .sw => .{ .opcode = .STORE, .format = .S, .data = .{ .f = .{ .funct3 = 0b010 } } },
+ .sd => .{ .opcode = .STORE, .format = .S, .data = .{ .f = .{ .funct3 = 0b011 } } },
+
+
+ // LOAD_FP
+
+ .flw => .{ .opcode = .LOAD_FP, .format = .I, .data = .{ .f = .{ .funct3 = 0b010 } } },
+ .fld => .{ .opcode = .LOAD_FP, .format = .I, .data = .{ .f = .{ .funct3 = 0b011 } } },
+
+ .vle8v => .{ .opcode = .LOAD_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vle16v => .{ .opcode = .LOAD_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vle32v => .{ .opcode = .LOAD_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vle64v => .{ .opcode = .LOAD_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+
+
+ // STORE_FP
+
+ .fsw => .{ .opcode = .STORE_FP, .format = .S, .data = .{ .f = .{ .funct3 = 0b010 } } },
+ .fsd => .{ .opcode = .STORE_FP, .format = .S, .data = .{ .f = .{ .funct3 = 0b011 } } },
+
+ .vse8v => .{ .opcode = .STORE_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"8", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vse16v => .{ .opcode = .STORE_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"16", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vse32v => .{ .opcode = .STORE_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"32", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+ .vse64v => .{ .opcode = .STORE_FP, .format = .R, .data = .{ .vecls = .{ .width = .@"64", .umop = .unit, .vm = true, .mop = .unit, .mew = false, .nf = 0b000 } } },
+
+ // JALR
+
+ .jalr => .{ .opcode = .JALR, .format = .I, .data = .{ .f = .{ .funct3 = 0b000 } } },
+
+
+ // LUI
+
+ .lui => .{ .opcode = .LUI, .format = .U, .data = .{ .none = {} } },
+
+
+ // AUIPC
+
+ .auipc => .{ .opcode = .AUIPC, .format = .U, .data = .{ .none = {} } },
+
+
+ // JAL
+
+ .jal => .{ .opcode = .JAL, .format = .J, .data = .{ .none = {} } },
+
+
+ // BRANCH
+
+ .beq => .{ .opcode = .BRANCH, .format = .B, .data = .{ .f = .{ .funct3 = 0b000 } } },
+ .bne => .{ .opcode = .BRANCH, .format = .B, .data = .{ .f = .{ .funct3 = 0b001 } } },
+
+
+ // SYSTEM
+
+ .ecall => .{ .opcode = .SYSTEM, .format = .extra, .data = .{ .extra = .ecall } },
+ .ebreak => .{ .opcode = .SYSTEM, .format = .extra, .data = .{ .extra = .ebreak } },
+
+ .csrrs => .{ .opcode = .SYSTEM, .format = .I, .data = .{ .f = .{ .funct3 = 0b010 } } },
+
+
+ // NONE
+
+ .unimp => .{ .opcode = .NONE, .format = .extra, .data = .{ .extra = .unimp } },
+
+
+ // MISC_MEM
+
+ .fence => .{ .opcode = .MISC_MEM, .format = .I, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .none } } },
+ .fencetso => .{ .opcode = .MISC_MEM, .format = .I, .data = .{ .fence = .{ .funct3 = 0b000, .fm = .tso } } },
+
+
+ // AMO
+
+ .amoaddw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00000 } } },
+ .amoswapw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00001 } } },
+ .lrw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00010 } } },
+ .scw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00011 } } },
+ .amoxorw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b00100 } } },
+ .amoandw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01100 } } },
+ .amoorw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b01000 } } },
+ .amominw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10000 } } },
+ .amomaxw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b10100 } } },
+ .amominuw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11000 } } },
+ .amomaxuw => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .W, .funct5 = 0b11100 } } },
+
+
+ .amoaddd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00000 } } },
+ .amoswapd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00001 } } },
+ .lrd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00010 } } },
+ .scd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00011 } } },
+ .amoxord => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b00100 } } },
+ .amoandd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01100 } } },
+ .amoord => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b01000 } } },
+ .amomind => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10000 } } },
+ .amomaxd => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b10100 } } },
+ .amominud => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11000 } } },
+ .amomaxud => .{ .opcode = .AMO, .format = .R, .data = .{ .amo = .{ .width = .D, .funct5 = 0b11100 } } },
+
+ // OP_V
+ .vsetivli => .{ .opcode = .OP_V, .format = .I, .data = .{ .f = .{ .funct3 = 0b111 } } },
+ .vsetvli => .{ .opcode = .OP_V, .format = .I, .data = .{ .f = .{ .funct3 = 0b111 } } },
+ .vaddvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPIVV } } },
+ .vsubvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPIVV } } },
+ .vmulvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b100101, .funct3 = .OPIVV } } },
+
+ .vfaddvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000000, .funct3 = .OPFVV } } },
+ .vfsubvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b000010, .funct3 = .OPFVV } } },
+ .vfmulvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b100100, .funct3 = .OPFVV } } },
+
+ .vadcvv => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010000, .funct3 = .OPMVV } } },
+ .vmvvx => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b010111, .funct3 = .OPIVX } } },
+
+ .vslidedownvx => .{ .opcode = .OP_V, .format = .R, .data = .{ .vecmath = .{ .vm = true, .funct6 = 0b001111, .funct3 = .OPIVX } } },
+
+
+ .pseudo_prologue,
+ .pseudo_epilogue,
+ .pseudo_dbg_prologue_end,
+ .pseudo_dbg_epilogue_begin,
+ .pseudo_dbg_line_column,
+ .pseudo_load_rm,
+ .pseudo_store_rm,
+ .pseudo_lea_rm,
+ .pseudo_j,
+ .pseudo_dead,
+ .pseudo_load_symbol,
+ .pseudo_load_tlv,
+ .pseudo_mv,
+ .pseudo_restore_regs,
+ .pseudo_spill_regs,
+ .pseudo_compare,
+ .pseudo_not,
+ .pseudo_extern_fn_reloc,
+ .nop,
+ => std.debug.panic("lir: didn't catch pseudo {s}", .{@tagName(mnem)}),
+ // zig fmt: on
+ };
+ }
+};
+
+/// This is the final form of the instruction. Lir is transformed into
+/// this, which is then bitcast into a u32.
+pub const Instruction = union(Lir.Format) {
+ R: packed struct(u32) {
+ opcode: u7,
+ rd: u5,
+ funct3: u3,
+ rs1: u5,
+ rs2: u5,
+ funct7: u7,
+ },
+ I: packed struct(u32) {
+ opcode: u7,
+ rd: u5,
+ funct3: u3,
+ rs1: u5,
+ imm0_11: u12,
+ },
+ S: packed struct(u32) {
+ opcode: u7,
+ imm0_4: u5,
+ funct3: u3,
+ rs1: u5,
+ rs2: u5,
+ imm5_11: u7,
+ },
+ B: packed struct(u32) {
+ opcode: u7,
+ imm11: u1,
+ imm1_4: u4,
+ funct3: u3,
+ rs1: u5,
+ rs2: u5,
+ imm5_10: u6,
+ imm12: u1,
+ },
+ U: packed struct(u32) {
+ opcode: u7,
+ rd: u5,
+ imm12_31: u20,
+ },
+ J: packed struct(u32) {
+ opcode: u7,
+ rd: u5,
+ imm12_19: u8,
+ imm11: u1,
+ imm1_10: u10,
+ imm20: u1,
+ },
+ extra: u32,
+
+ comptime {
+ for (std.meta.fields(Instruction)) |field| {
+ assert(@bitSizeOf(field.type) == 32);
+ }
+ }
+
+ pub const Operand = union(enum) {
+ none,
+ reg: Register,
+ csr: CSR,
+ mem: Memory,
+ imm: Immediate,
+ barrier: Mir.Barrier,
+ };
+
+ pub fn toU32(inst: Instruction) u32 {
+ return switch (inst) {
+ inline else => |v| @bitCast(v),
+ };
+ }
+
+ pub fn encode(inst: Instruction, writer: anytype) !void {
+ try writer.writeInt(u32, inst.toU32(), .little);
+ }
+
+ pub fn fromLir(lir: Lir, ops: []const Operand) Instruction {
+ const opcode: u7 = @intFromEnum(lir.opcode);
+
+ switch (lir.format) {
+ .R => {
+ return .{
+ .R = switch (lir.data) {
+ .ff => |ff| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .rs2 = ops[2].reg.encodeId(),
+
+ .opcode = opcode,
+ .funct3 = ff.funct3,
+ .funct7 = ff.funct7,
+ },
+ .fmt => |fmt| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .rs2 = ops[2].reg.encodeId(),
+
+ .opcode = opcode,
+ .funct3 = fmt.rm,
+ .funct7 = (@as(u7, fmt.funct5) << 2) | @intFromEnum(fmt.fmt),
+ },
+ .fcvt => |fcvt| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .rs2 = @intFromEnum(fcvt.width),
+
+ .opcode = opcode,
+ .funct3 = fcvt.rm,
+ .funct7 = (@as(u7, fcvt.funct5) << 2) | @intFromEnum(fcvt.fmt),
+ },
+ .vecls => |vec| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+
+ .rs2 = @intFromEnum(vec.umop),
+
+ .opcode = opcode,
+ .funct3 = @intFromEnum(vec.width),
+ .funct7 = (@as(u7, vec.nf) << 4) | (@as(u7, @intFromBool(vec.mew)) << 3) | (@as(u7, @intFromEnum(vec.mop)) << 1) | @intFromBool(vec.vm),
+ },
+ .vecmath => |vec| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .rs2 = ops[2].reg.encodeId(),
+
+ .opcode = opcode,
+ .funct3 = @intFromEnum(vec.funct3),
+ .funct7 = (@as(u7, vec.funct6) << 1) | @intFromBool(vec.vm),
+ },
+ .amo => |amo| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .rs2 = ops[2].reg.encodeId(),
+
+ .opcode = opcode,
+ .funct3 = @intFromEnum(amo.width),
+ .funct7 = @as(u7, amo.funct5) << 2 |
+ @as(u7, @intFromBool(ops[3].barrier == .rl)) << 1 |
+ @as(u7, @intFromBool(ops[4].barrier == .aq)),
+ },
+ else => unreachable,
+ },
+ };
+ },
+ .S => {
+ assert(ops.len == 3);
+ const umm = ops[2].imm.asBits(u12);
+ return .{
+ .S = .{
+ .imm0_4 = @truncate(umm),
+ .rs1 = ops[0].reg.encodeId(),
+ .rs2 = ops[1].reg.encodeId(),
+ .imm5_11 = @truncate(umm >> 5),
+
+ .opcode = opcode,
+ .funct3 = lir.data.f.funct3,
+ },
+ };
+ },
+ .I => {
+ return .{
+ .I = switch (lir.data) {
+ .f => |f| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .imm0_11 = ops[2].imm.asBits(u12),
+
+ .opcode = opcode,
+ .funct3 = f.funct3,
+ },
+ .sh => |sh| .{
+ .rd = ops[0].reg.encodeId(),
+ .rs1 = ops[1].reg.encodeId(),
+ .imm0_11 = (@as(u12, sh.typ) << 6) |
+ if (sh.has_5) ops[2].imm.asBits(u6) else (@as(u6, 0) | ops[2].imm.asBits(u5)),
+
+ .opcode = opcode,
+ .funct3 = sh.funct3,
+ },
+ .fence => |fence| .{
+ .rd = 0,
+ .rs1 = 0,
+ .funct3 = 0,
+ .imm0_11 = (@as(u12, @intFromEnum(fence.fm)) << 8) |
+ (@as(u12, @intFromEnum(ops[1].barrier)) << 4) |
+ @as(u12, @intFromEnum(ops[0].barrier)),
+ .opcode = opcode,
+ },
+ else => unreachable,
+ },
+ };
+ },
+ .U => {
+ assert(ops.len == 2);
+ return .{
+ .U = .{
+ .rd = ops[0].reg.encodeId(),
+ .imm12_31 = ops[1].imm.asBits(u20),
+
+ .opcode = opcode,
+ },
+ };
+ },
+ .J => {
+ assert(ops.len == 2);
+
+ const umm = ops[1].imm.asBits(u21);
+ // the RISC-V spec says the target index of a jump
+ // must be a multiple of 2
+ assert(umm % 2 == 0);
+
+ return .{
+ .J = .{
+ .rd = ops[0].reg.encodeId(),
+ .imm1_10 = @truncate(umm >> 1),
+ .imm11 = @truncate(umm >> 11),
+ .imm12_19 = @truncate(umm >> 12),
+ .imm20 = @truncate(umm >> 20),
+
+ .opcode = opcode,
+ },
+ };
+ },
+ .B => {
+ assert(ops.len == 3);
+
+ const umm = ops[2].imm.asBits(u13);
+ // the RISC-V spec says the target index of a branch
+ // must be a multiple of 2
+ assert(umm % 2 == 0);
+
+ return .{
+ .B = .{
+ .rs1 = ops[0].reg.encodeId(),
+ .rs2 = ops[1].reg.encodeId(),
+ .imm1_4 = @truncate(umm >> 1),
+ .imm5_10 = @truncate(umm >> 5),
+ .imm11 = @truncate(umm >> 11),
+ .imm12 = @truncate(umm >> 12),
+
+ .opcode = opcode,
+ .funct3 = lir.data.f.funct3,
+ },
+ };
+ },
+ .extra => {
+ assert(ops.len == 0);
+
+ return .{
+ .I = .{
+ .rd = Register.zero.encodeId(),
+ .rs1 = Register.zero.encodeId(),
+ .imm0_11 = switch (lir.data.extra) {
+ .ecall => 0x000,
+ .ebreak => 0x001,
+ .unimp => 0x000,
+ else => unreachable,
+ },
+
+ .opcode = opcode,
+ .funct3 = 0b000,
+ },
+ };
+ },
+ }
+ }
+};
+
+const std = @import("std");
+const assert = std.debug.assert;
+const log = std.log.scoped(.format);
+
+const bits = @import("bits.zig");
+const Mir = @import("Mir.zig");
+const Mnemonic = @import("mnem.zig").Mnemonic;
+const Lower = @import("Lower.zig");
+
+const Register = bits.Register;
+const CSR = bits.CSR;
+const Memory = bits.Memory;
+const Immediate = bits.Immediate;
diff --git a/src/arch/riscv64/mnem.zig b/src/arch/riscv64/mnem.zig
new file mode 100644
index 0000000000..cb4734e59c
--- /dev/null
+++ b/src/arch/riscv64/mnem.zig
@@ -0,0 +1,257 @@
+pub const Mnemonic = enum(u16) {
+ // Arithmetics
+ addi,
+ add,
+ addw,
+
+ sub,
+ subw,
+
+ // Bits
+ xori,
+ xor,
+ @"or",
+
+ @"and",
+ andi,
+
+ slt,
+ sltu,
+ sltiu,
+
+ slli,
+ srli,
+ srai,
+
+ slliw,
+ srliw,
+ sraiw,
+
+ sll,
+ srl,
+ sra,
+
+ sllw,
+ srlw,
+ sraw,
+
+ // Control Flow
+ jalr,
+ jal,
+
+ beq,
+ bne,
+
+ // Memory
+ lui,
+ auipc,
+
+ ld,
+ lw,
+ lh,
+ lb,
+ lbu,
+ lhu,
+ lwu,
+
+ sd,
+ sw,
+ sh,
+ sb,
+
+ // System
+ ebreak,
+ ecall,
+ unimp,
+ nop,
+
+ // M extension
+ mul,
+ mulh,
+ mulhu,
+ mulhsu,
+ mulw,
+
+ div,
+ divu,
+ divw,
+ divuw,
+
+ rem,
+ remu,
+ remw,
+ remuw,
+
+ // F extension (32-bit float)
+ fadds,
+ fsubs,
+ fmuls,
+ fdivs,
+
+ fmins,
+ fmaxs,
+
+ fsqrts,
+
+ flw,
+ fsw,
+
+ feqs,
+ flts,
+ fles,
+
+ // D extension (64-bit float)
+ faddd,
+ fsubd,
+ fmuld,
+ fdivd,
+
+ fmind,
+ fmaxd,
+
+ fsqrtd,
+
+ fld,
+ fsd,
+
+ feqd,
+ fltd,
+ fled,
+
+ fcvtws,
+ fcvtwus,
+ fcvtls,
+ fcvtlus,
+
+ fcvtwd,
+ fcvtwud,
+ fcvtld,
+ fcvtlud,
+
+ fcvtsw,
+ fcvtswu,
+ fcvtsl,
+ fcvtslu,
+
+ fcvtdw,
+ fcvtdwu,
+ fcvtdl,
+ fcvtdlu,
+
+ fsgnjns,
+ fsgnjnd,
+
+ fsgnjxs,
+ fsgnjxd,
+
+ // Zicsr Extension Instructions
+ csrrs,
+
+ // V Extension Instructions
+ vsetvli,
+ vsetivli,
+ vaddvv,
+ vfaddvv,
+ vsubvv,
+ vfsubvv,
+ vmulvv,
+ vfmulvv,
+ vslidedownvx,
+
+ vle8v,
+ vle16v,
+ vle32v,
+ vle64v,
+
+ vse8v,
+ vse16v,
+ vse32v,
+ vse64v,
+
+ vadcvv,
+ vmvvx,
+
+ // Zbb Extension Instructions
+ clz,
+ clzw,
+ cpop,
+ cpopw,
+
+ // A Extension Instructions
+ fence,
+ fencetso,
+
+ lrw,
+ scw,
+ amoswapw,
+ amoaddw,
+ amoandw,
+ amoorw,
+ amoxorw,
+ amomaxw,
+ amominw,
+ amomaxuw,
+ amominuw,
+
+ lrd,
+ scd,
+ amoswapd,
+ amoaddd,
+ amoandd,
+ amoord,
+ amoxord,
+ amomaxd,
+ amomind,
+ amomaxud,
+ amominud,
+
+ // Pseudo-instructions. Used for anything that isn't 1:1 with an
+ // assembly instruction.
+
+ /// Pseudo-instruction that will generate a backpatched
+ /// function prologue.
+ pseudo_prologue,
+ /// Pseudo-instruction that will generate a backpatched
+ /// function epilogue
+ pseudo_epilogue,
+
+ /// Pseudo-instruction: End of prologue
+ pseudo_dbg_prologue_end,
+ /// Pseudo-instruction: Beginning of epilogue
+ pseudo_dbg_epilogue_begin,
+ /// Pseudo-instruction: Update debug line
+ pseudo_dbg_line_column,
+
+ /// Pseudo-instruction that loads from memory into a register.
+ pseudo_load_rm,
+ /// Pseudo-instruction that stores from a register into memory
+ pseudo_store_rm,
+ /// Pseudo-instruction that loads the address of memory into a register.
+ pseudo_lea_rm,
+ /// Jumps. Uses `inst` payload.
+ pseudo_j,
+ /// Dead inst, ignored by the emitter.
+ pseudo_dead,
+ /// Loads the address of a value that hasn't yet been allocated in memory.
+ pseudo_load_symbol,
+ /// Loads the address of a TLV.
+ pseudo_load_tlv,
+
+ /// Moves the value of rs1 to rd.
+ pseudo_mv,
+
+ pseudo_restore_regs,
+ pseudo_spill_regs,
+
+ pseudo_compare,
+ pseudo_not,
+ pseudo_extern_fn_reloc,
+};
+
+pub const Pseudo = enum(u8) {
+ li,
+ mv,
+ tail,
+ beqz,
+ ret,
+};
diff --git a/src/codegen.zig b/src/codegen.zig
index ce1488f020..9c3fd1914b 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -106,7 +106,9 @@ pub fn generateLazyFunction(
const target = namespace.fileScope(zcu).mod.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
- inline .stage2_x86_64 => |backend| {
+ inline .stage2_x86_64,
+ .stage2_riscv64,
+ => |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
diff --git a/src/link/riscv.zig b/src/link/riscv.zig
index 5107992b48..bf23010c80 100644
--- a/src/link/riscv.zig
+++ b/src/link/riscv.zig
@@ -25,47 +25,27 @@ pub fn writeAddend(
}
pub fn writeInstU(code: *[4]u8, value: u32) void {
- var data = Encoding.Data{
- .U = mem.bytesToValue(std.meta.TagPayload(
- Encoding.Data,
- Encoding.Data.U,
- ), code),
- };
+ var data: Instruction = .{ .U = mem.bytesToValue(std.meta.TagPayload(Instruction, .U), code) };
const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800);
data.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstI(code: *[4]u8, value: u32) void {
- var data = Encoding.Data{
- .I = mem.bytesToValue(std.meta.TagPayload(
- Encoding.Data,
- Encoding.Data.I,
- ), code),
- };
+ var data: Instruction = .{ .I = mem.bytesToValue(std.meta.TagPayload(Instruction, .I), code) };
data.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstS(code: *[4]u8, value: u32) void {
- var data = Encoding.Data{
- .S = mem.bytesToValue(std.meta.TagPayload(
- Encoding.Data,
- Encoding.Data.S,
- ), code),
- };
+ var data: Instruction = .{ .S = mem.bytesToValue(std.meta.TagPayload(Instruction, .S), code) };
data.S.imm0_4 = bitSlice(value, 4, 0);
data.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstJ(code: *[4]u8, value: u32) void {
- var data = Encoding.Data{
- .J = mem.bytesToValue(std.meta.TagPayload(
- Encoding.Data,
- Encoding.Data.J,
- ), code),
- };
+ var data: Instruction = .{ .J = mem.bytesToValue(std.meta.TagPayload(Instruction, .J), code) };
data.J.imm1_10 = bitSlice(value, 10, 1);
data.J.imm11 = bitSlice(value, 11, 11);
data.J.imm12_19 = bitSlice(value, 19, 12);
@@ -74,12 +54,7 @@ pub fn writeInstJ(code: *[4]u8, value: u32) void {
}
pub fn writeInstB(code: *[4]u8, value: u32) void {
- var data = Encoding.Data{
- .B = mem.bytesToValue(std.meta.TagPayload(
- Encoding.Data,
- Encoding.Data.B,
- ), code),
- };
+ var data: Instruction = .{ .B = mem.bytesToValue(std.meta.TagPayload(Instruction, .B), code) };
data.B.imm1_4 = bitSlice(value, 4, 1);
data.B.imm5_10 = bitSlice(value, 10, 5);
data.B.imm11 = bitSlice(value, 11, 11);
@@ -109,9 +84,8 @@ pub const RiscvEflags = packed struct(u32) {
_unused: u8,
};
-const encoder = @import("../arch/riscv64/encoder.zig");
-const Encoding = @import("../arch/riscv64/Encoding.zig");
const mem = std.mem;
const std = @import("std");
-pub const Instruction = encoder.Instruction;
+const encoding = @import("../arch/riscv64/encoding.zig");
+const Instruction = encoding.Instruction;
diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig
index 980d9446ef..802b755c4c 100644
--- a/test/behavior/abs.zig
+++ b/test/behavior/abs.zig
@@ -6,6 +6,7 @@ test "@abs integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testAbsIntegers();
try testAbsIntegers();
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 0b588ce091..83ebf7ec86 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -16,7 +16,6 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@@ -26,7 +25,6 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@@ -511,7 +509,6 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index f6d59ae0fa..a01e624a5d 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -22,7 +22,6 @@ test "arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [5]u32 = undefined;
@@ -160,7 +159,6 @@ test "array len field" {
test "array with sentinels" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(is_ct: bool) !void {
@@ -532,7 +530,6 @@ test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -614,7 +611,6 @@ test "type coercion of pointer to anon struct literal to pointer to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const U = union {
@@ -667,7 +663,6 @@ test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var pair: [2]usize = .{ 1, 2 };
@@ -688,8 +683,6 @@ test "array init of container level array variable" {
}
test "runtime initialized sentinel-terminated array literal" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var c: u16 = 300;
_ = &c;
const f = &[_:0x9999]u16{c};
@@ -776,8 +769,6 @@ test "array init with no result pointer sets field result types" {
}
test "runtime side-effects in comptime-known array init" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var side_effects: u4 = 0;
const init = [4]u4{
blk: {
@@ -802,8 +793,6 @@ test "runtime side-effects in comptime-known array init" {
}
test "slice initialized through reference to anonymous array init provides result types" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@@ -817,8 +806,6 @@ test "slice initialized through reference to anonymous array init provides resul
}
test "sentinel-terminated slice initialized through reference to anonymous array init provides result types" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@@ -869,8 +856,6 @@ test "many-item sentinel-terminated pointer initialized through reference to ano
}
test "pointer to array initialized through reference to anonymous array init provides result types" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@@ -884,8 +869,6 @@ test "pointer to array initialized through reference to anonymous array init pro
}
test "pointer to sentinel-terminated array initialized through reference to anonymous array init provides result types" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@@ -912,7 +895,6 @@ test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: [10][10]u32 = undefined;
@@ -925,7 +907,6 @@ test "copied array element doesn't alias source" {
test "array initialized with string literal" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
@@ -993,7 +974,6 @@ test "accessing multidimensional global array at comptime" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const array = [_][]const []const u8{
diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig
index c9d7254ef2..393829a989 100644
--- a/test/behavior/atomics.zig
+++ b/test/behavior/atomics.zig
@@ -15,7 +15,6 @@ test "cmpxchg" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmpxchg();
try comptime testCmpxchg();
@@ -108,7 +107,6 @@ test "cmpxchg with ignored result" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 1234;
@@ -153,7 +151,6 @@ test "cmpxchg on a global variable" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// https://github.com/ziglang/zig/issues/10627
@@ -169,7 +166,6 @@ test "atomic load and rmw with enum" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Value = enum(u8) { a, b, c };
var x = Value.a;
@@ -205,7 +201,6 @@ test "atomicrmw with floats" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// https://github.com/ziglang/zig/issues/10627
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 05d6549683..90d12e6858 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -16,8 +16,6 @@ test "empty function with comments" {
}
test "truncate" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
try expect(testTruncate(0x10fd) == 0xfd);
comptime assert(testTruncate(0x10fd) == 0xfd);
}
@@ -27,7 +25,6 @@ fn testTruncate(x: u32) u8 {
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);
@@ -45,7 +42,6 @@ test "truncate to non-power-of-two integers from 128-bit" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010101, 0x01);
try testTrunc(u128, u1, 0xffffffff_ffffffff_ffffffff_01010110, 0x00);
@@ -224,7 +220,6 @@ const OpaqueB = opaque {};
test "opaque types" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(*OpaqueA != *OpaqueB);
@@ -376,7 +371,6 @@ test "take address of parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTakeAddressOfParameter(12.34);
}
@@ -401,7 +395,6 @@ test "array 2D const double ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_2d_vertexes = [_][1]f32{
[_]f32{1.0},
@@ -414,7 +407,6 @@ test "array 2D const double ptr with offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_2d_vertexes = [_][2]f32{
[_]f32{ 3.0, 4.239 },
@@ -427,7 +419,6 @@ test "array 3D const double ptr with offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const rect_3d_vertexes = [_][2][2]f32{
[_][2]f32{
@@ -622,7 +613,6 @@ var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -641,7 +631,6 @@ test "global constant is loaded with a runtime-known index" {
test "multiline string literal is null terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const s1 =
\\one
@@ -656,7 +645,6 @@ test "string escapes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expectEqualStrings("\"", "\x22");
try expectEqualStrings("\'", "\x27");
@@ -789,7 +777,6 @@ test "discarding the result of various expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !u32 {
@@ -1072,7 +1059,6 @@ test "returning an opaque type from a function" {
test "orelse coercion as function argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Loc = struct { start: i32 = -1 };
const Container = struct {
@@ -1186,8 +1172,6 @@ fn testUnsignedCmp(comptime T: type) !void {
}
test "integer compare <= 64 bits" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
inline for (.{ u8, u16, u32, u64, usize, u10, u20, u30, u60 }) |T| {
try testUnsignedCmp(T);
try comptime testUnsignedCmp(T);
@@ -1324,7 +1308,6 @@ test "break out of block based on comptime known values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const source = "A-";
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index 9d203dcfe3..e2b63b622b 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -65,7 +65,6 @@ test "sharded table" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// realistic 16-way sharding
try testShardedTable(u32, 4, 8);
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index cc84a948d1..1cb72ac42b 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -10,7 +10,6 @@ const native_endian = builtin.target.cpu.arch.endian();
test "@bitCast iX -> uX (32, 64)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const bit_values = [_]usize{ 32, 64 };
@@ -165,7 +164,6 @@ test "@bitCast packed structs at runtime and comptime" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Full = packed struct {
number: u16,
@@ -491,7 +489,6 @@ test "@bitCast of packed struct of bools all true" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const P = packed struct {
b0: bool,
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index 8636955215..4a6e369918 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -342,7 +342,6 @@ test "inline call preserves tail call" {
test "inline call doesn't re-evaluate non generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(f: struct { a: u8, b: u8 }) !void {
@@ -441,7 +440,6 @@ test "non-anytype generic parameters provide result type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn f(comptime T: type, y: T) !void {
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 6cc881b64d..8118e6ad76 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -24,7 +24,6 @@ test "peer type resolution: ?T and T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(peerTypeTAndOptionalT(true, false).? == 0);
try expect(peerTypeTAndOptionalT(false, false).? == 3);
@@ -104,7 +103,6 @@ test "@floatFromInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -163,7 +161,6 @@ test "@intFromFloat" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testIntFromFloats();
try comptime testIntFromFloats();
@@ -303,7 +300,6 @@ test "peer result null and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn blah(n: i32) ?i32 {
@@ -372,7 +368,6 @@ test "return u8 coercing into ?u32 return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -450,7 +445,6 @@ test "implicitly cast from T to anyerror!?T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try castToOptionalTypeError(1);
try comptime castToOptionalTypeError(1);
@@ -602,7 +596,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const window_name = [1][*]const u8{"window name"};
const x: [*]const ?[*]const u8 = &window_name;
@@ -668,7 +661,6 @@ test "@floatCast cast down" {
test "peer type resolution: unreachable, error set, unreachable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Error = error{
FileDescriptorAlreadyPresentInSet,
@@ -763,7 +755,6 @@ test "peer type resolution: error union and error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a: error{Three} = undefined;
const b: error{ One, Two }!u32 = undefined;
@@ -957,7 +948,6 @@ test "peer cast [:x]T to [*:x]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -1023,7 +1013,6 @@ test "variable initialization uses result locations properly with regards to the
test "cast between C pointer with different but compatible types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(arg: [*]c_ushort) u16 {
@@ -1219,7 +1208,6 @@ test "implicitly cast from [N]T to ?[]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(mem.eql(u8, castToOptionalSlice().?, "hi"));
comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
@@ -1544,7 +1532,6 @@ test "cast typed undefined to int" {
test "implicit cast from [:0]T to [*c]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: [:0]const u8 = "foo";
_ = &a;
@@ -1753,7 +1740,6 @@ test "peer type resolution: array and vector with same child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var arr: [2]u32 = .{ 0, 1 };
var vec: @Vector(2, u32) = .{ 2, 3 };
@@ -1845,7 +1831,6 @@ test "peer type resolution: three-way resolution combines error set and optional
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = error{Foo};
var a: E = error.Foo;
@@ -1913,7 +1898,6 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a: ?i32 = 42;
_ = &a;
@@ -1960,7 +1944,6 @@ test "peer type resolution: vector and tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var vec: @Vector(3, i32) = .{ 1, 2, 3 };
_ = &vec;
@@ -1985,7 +1968,6 @@ test "peer type resolution: vector and array and tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var vec: @Vector(2, i8) = .{ 10, 20 };
var arr: [2]i8 = .{ 30, 40 };
@@ -2094,7 +2076,6 @@ test "peer type resolution: many compatible pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf = "foo-3".*;
@@ -2216,7 +2197,6 @@ test "peer type resolution: pointer attributes are combined correctly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf_a align(4) = "foo".*;
var buf_b align(4) = "bar".*;
@@ -2277,7 +2257,6 @@ test "cast builtins can wrap result in optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const MyEnum = enum(u32) { _ };
@@ -2586,7 +2565,6 @@ test "result information is preserved through many nested structures" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig
index 67834385d1..9faa123a62 100644
--- a/test/behavior/cast_int.zig
+++ b/test/behavior/cast_int.zig
@@ -35,8 +35,6 @@ test "coerce i8 to i32 and @intCast back" {
}
test "coerce non byte-sized integers accross 32bits boundary" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
{
var v: u21 = 6417;
_ = &v;
diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig
index fc764f55e3..219e88b554 100644
--- a/test/behavior/defer.zig
+++ b/test/behavior/defer.zig
@@ -53,7 +53,6 @@ test "return variable while defer expression in scope to modify it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -116,6 +115,7 @@ test "errdefer with payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !i32 {
@@ -138,6 +138,7 @@ test "reference to errdefer payload" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() !i32 {
diff --git a/test/behavior/destructure.zig b/test/behavior/destructure.zig
index 3164d25187..43ddbb7a4d 100644
--- a/test/behavior/destructure.zig
+++ b/test/behavior/destructure.zig
@@ -23,8 +23,6 @@ test "simple destructure" {
}
test "destructure with comptime syntax" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct {
fn doTheTest() !void {
{
diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig
index 7972135bfa..354d74c4a6 100644
--- a/test/behavior/enum.zig
+++ b/test/behavior/enum.zig
@@ -618,7 +618,6 @@ test "enum with specified tag values" {
test "non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const E = enum(u8) { a, b, _ };
@@ -934,7 +933,6 @@ const Bar = enum { A, B, C, D };
test "enum literal casting to error union with payload enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var bar: error{B}!Bar = undefined;
bar = .B; // should never cast to the error set
@@ -1076,7 +1074,6 @@ test "enum literal casting to optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var bar: ?Bar = undefined;
bar = .B;
@@ -1105,7 +1102,6 @@ test "bit field access with enum fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data = bit_field_1;
try expect(getA(&data) == A.Two);
@@ -1223,8 +1219,6 @@ test "enum tag from a local variable" {
}
test "auto-numbered enum with signed tag type" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const E = enum(i32) { a, b };
try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a));
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index 9b6300e743..1903bac8f7 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -319,7 +319,6 @@ test "error inference with an empty set" {
test "error union peer type resolution" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testErrorUnionPeerTypeResolution(1);
}
@@ -403,7 +402,6 @@ test "nested error union function call in optional unwrap" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Foo = struct {
@@ -450,7 +448,6 @@ test "nested error union function call in optional unwrap" {
test "return function call to error set from error union function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn errorable() anyerror!i32 {
@@ -469,7 +466,6 @@ test "optional error set is the same size as error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime assert(@sizeOf(?anyerror) == @sizeOf(anyerror));
comptime assert(@alignOf(?anyerror) == @alignOf(anyerror));
@@ -917,7 +913,6 @@ test "field access of anyerror results in smaller error set" {
test "optional error union return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo() ?anyerror!u32 {
@@ -932,7 +927,6 @@ test "optional error union return type" {
test "optional error set return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const E = error{ A, B };
const S = struct {
@@ -946,8 +940,6 @@ test "optional error set return type" {
}
test "optional error set function parameter" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct {
fn doTheTest(a: ?anyerror) !void {
try std.testing.expect(a.? == error.OutOfMemory);
@@ -977,7 +969,6 @@ test "returning an error union containing a type with no runtime bits" {
test "try used in recursive function with inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Value = union(enum) {
@@ -1079,7 +1070,6 @@ test "result location initialization of error union with OPV payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: u0,
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index fb916df3cf..b32191cbb2 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -782,7 +782,6 @@ test "array concatenation peer resolves element types - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var a = [2]u3{ 1, 7 };
var b = [3]u8{ 200, 225, 255 };
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index f644465d63..f9f2579b0a 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -134,7 +134,6 @@ test "cmp f16" {
test "cmp f32/f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 73ef9bdbfe..1e4039c1bb 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -181,7 +181,6 @@ test "function with complex callconv and return type expressions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(fComplexCallconvRet(3).x == 9);
}
@@ -451,7 +450,6 @@ test "implicit cast function to function ptr" {
test "method call with optional and error union first param" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: i32 = 1234,
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 4f873bbbe4..7614fd4683 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -112,7 +112,6 @@ test "for with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(slice: []const u8) !void {
@@ -154,7 +153,6 @@ test "for loop with pointer elem var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const source = "abcdefg";
var target: [source.len]u8 = undefined;
@@ -228,7 +226,6 @@ test "else continue outer for" {
test "for loop with else branch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var x = [_]u32{ 1, 2 };
@@ -489,7 +486,6 @@ test "inferred alloc ptr of for loop" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
var cond = false;
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index 6bd627dfe3..67ec438d9e 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -19,7 +19,6 @@ fn checkSize(comptime T: type) usize {
test "simple generic fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(max(i32, 3, -1) == 3);
try expect(max(u8, 1, 100) == 100);
@@ -56,7 +55,6 @@ test "fn with comptime args" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(gimmeTheBigOne(1234, 5678) == 5678);
try expect(shouldCallSameInstance(34, 12) == 34);
@@ -67,7 +65,6 @@ test "anytype params" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(max_i32(12, 34) == 34);
try expect(max_f64(1.2, 3.4) == 3.4);
@@ -404,8 +401,6 @@ test "generic struct as parameter type" {
}
test "slice as parameter type" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct {
fn internComptimeString(comptime str: []const u8) *const []const u8 {
return &struct {
@@ -503,7 +498,6 @@ test "union in struct captures argument" {
test "function argument tuple used as struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn DeleagateWithContext(comptime Function: type) type {
diff --git a/test/behavior/globals.zig b/test/behavior/globals.zig
index f7a23b725f..89dc20c5c7 100644
--- a/test/behavior/globals.zig
+++ b/test/behavior/globals.zig
@@ -18,7 +18,6 @@ test "store to global vector" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(vpos[1] == 0.0);
vpos = @Vector(2, f32){ 0.0, 1.0 };
diff --git a/test/behavior/if.zig b/test/behavior/if.zig
index a82d9a5c61..69ad917e6a 100644
--- a/test/behavior/if.zig
+++ b/test/behavior/if.zig
@@ -82,7 +82,6 @@ test "const result loc, runtime if cond, else unreachable" {
test "if copies its payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -119,7 +118,6 @@ test "if peer expressions inferred optional type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var self: []const u8 = "abcdef";
var index: usize = 0;
@@ -147,8 +145,6 @@ test "if-else expression with runtime condition result location is inferred opti
}
test "result location with inferred type ends up being pointer to comptime_int" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var a: ?u32 = 1234;
var b: u32 = 2000;
_ = .{ &a, &b };
@@ -194,8 +190,6 @@ test "if value shouldn't be load-elided if used later (structs)" {
}
test "if value shouldn't be load-elided if used later (optionals)" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var a: ?i32 = 1;
var b: ?i32 = 1;
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index cd110bc80d..9d911c556b 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -65,7 +65,6 @@ test "@clz" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClz();
try comptime testClz();
@@ -663,8 +662,6 @@ fn rem(comptime T: type, a: T, b: T) T {
}
test "unsigned wrapping" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
try testUnsignedWrappingEval(maxInt(u32));
try comptime testUnsignedWrappingEval(maxInt(u32));
}
@@ -676,8 +673,6 @@ fn testUnsignedWrappingEval(x: u32) !void {
}
test "signed wrapping" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
try testSignedWrappingEval(maxInt(i32));
try comptime testSignedWrappingEval(maxInt(i32));
}
@@ -725,7 +720,6 @@ fn negateWrap(comptime T: type, x: T) T {
test "unsigned 64-bit division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
// https://github.com/ziglang/zig/issues/16846
@@ -838,7 +832,6 @@ test "@addWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testAddWithOverflow(u8, 250, 100, 94, 1);
try testAddWithOverflow(u8, 100, 150, 250, 0);
@@ -899,7 +892,6 @@ test "@addWithOverflow > 64 bits" {
test "small int addition" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: u2 = 0;
try expect(x == 0);
@@ -927,7 +919,6 @@ fn testMulWithOverflow(comptime T: type, a: T, b: T, mul: T, bit: u1) !void {
test "basic @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMulWithOverflow(u8, 86, 3, 2, 1);
try testMulWithOverflow(u8, 85, 3, 255, 0);
diff --git a/test/behavior/memcpy.zig b/test/behavior/memcpy.zig
index a571b1e2f7..fa9203713d 100644
--- a/test/behavior/memcpy.zig
+++ b/test/behavior/memcpy.zig
@@ -7,7 +7,6 @@ test "memcpy and memset intrinsics" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyMemset();
try comptime testMemcpyMemset();
@@ -29,7 +28,6 @@ test "@memcpy with both operands single-ptr-to-array, one is null-terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyBothSinglePtrArrayOneIsNullTerminated();
try comptime testMemcpyBothSinglePtrArrayOneIsNullTerminated();
@@ -50,7 +48,6 @@ test "@memcpy dest many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpyDestManyPtr();
try comptime testMemcpyDestManyPtr();
@@ -73,7 +70,6 @@ test "@memcpy slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemcpySlice();
try comptime testMemcpySlice();
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index ebc390c36a..9815389838 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -53,7 +53,6 @@ test "maybe return" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try maybeReturnImpl();
try comptime maybeReturnImpl();
@@ -73,7 +72,6 @@ fn foo(x: ?i32) ?bool {
test "test null runtime" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTestNullRuntime(null);
}
@@ -188,7 +186,6 @@ test "unwrap optional which is field of global var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
struct_with_optional.field = null;
if (struct_with_optional.field) |payload| {
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 80156d1dd6..53738a107b 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -9,7 +9,6 @@ const expectEqualStrings = std.testing.expectEqualStrings;
test "passing an optional integer as a parameter" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() bool {
@@ -134,7 +133,6 @@ test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S2 = struct {
y: u8,
@@ -260,7 +258,6 @@ test "unwrap function call with optional pointer return value" {
test "nested orelse" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@@ -287,7 +284,6 @@ test "nested orelse" {
test "self-referential struct through a slice of optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Node = struct {
@@ -344,7 +340,6 @@ test "0-bit child type coerced to optional return ptr result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -535,7 +530,6 @@ test "Optional slice size is optimized" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(@sizeOf(?[]u8) == @sizeOf([]u8));
var a: ?[]const u8 = null;
@@ -549,7 +543,6 @@ test "Optional slice passed to function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo(a: ?[]const u8) !void {
@@ -566,7 +559,6 @@ test "Optional slice passed to function" {
test "peer type resolution in nested if expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Thing = struct { n: i32 };
var a = false;
@@ -594,6 +586,7 @@ test "cast slice to const slice nested in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn inner() !?[]u8 {
@@ -632,7 +625,6 @@ test "result location initialization of optional with OPV payload" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
x: u0,
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index 60fcd5e9f6..9880aff14e 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -258,7 +258,6 @@ test "nested packed struct unaligned" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
@@ -331,7 +330,6 @@ test "byte-aligned field pointer offsets" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const A = packed struct {
@@ -434,7 +432,6 @@ test "nested packed struct field pointers" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S2 = packed struct {
@@ -962,7 +959,6 @@ test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct(u32) {
test_bit: bool,
@@ -1008,8 +1004,6 @@ test "bitcast back and forth" {
}
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
// Originally reported at https://github.com/ziglang/zig/issues/14200
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@@ -1028,8 +1022,6 @@ test "field access of packed struct smaller than its abi size inside struct init
}
test "modify nested packed struct aligned field" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
// Originally reported at https://github.com/ziglang/zig/issues/14632
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -1096,7 +1088,6 @@ test "packed struct used as part of anon decl name" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct { a: u0 = 0 };
var a: u8 = 0;
@@ -1164,7 +1155,6 @@ test "assignment to non-byte-aligned field in packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Frame = packed struct {
num: u20,
@@ -1275,7 +1265,6 @@ test "2-byte packed struct argument in C calling convention" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct(u16) {
x: u15 = 0,
diff --git a/test/behavior/packed-union.zig b/test/behavior/packed-union.zig
index b0b0bd7f39..aa4f98b783 100644
--- a/test/behavior/packed-union.zig
+++ b/test/behavior/packed-union.zig
@@ -8,7 +8,6 @@ test "flags in packed union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFlagsInPackedUnion();
try comptime testFlagsInPackedUnion();
@@ -51,7 +50,6 @@ test "flags in packed union at offset" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFlagsInPackedUnionAtOffset();
try comptime testFlagsInPackedUnionAtOffset();
diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig
index 36152cf81a..42e3ea0ae9 100644
--- a/test/behavior/pointers.zig
+++ b/test/behavior/pointers.zig
@@ -125,7 +125,6 @@ test "initialize const optional C pointer to null" {
test "assigning integer to C pointer" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: i32 = 0;
var y: i32 = 1;
@@ -143,7 +142,6 @@ test "assigning integer to C pointer" {
test "C pointer comparison and arithmetic" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -230,6 +228,7 @@ test "implicit cast error unions with non-optional to optional pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -343,7 +342,6 @@ test "array initialization types" {
test "null terminated pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -517,7 +515,6 @@ test "element pointer to slice" {
test "element pointer arithmetic to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -572,7 +569,6 @@ test "ptrCast comptime known slice to C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const s: [:0]const u8 = "foo";
var p: [*c]const u8 = @ptrCast(s);
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index 56a2171083..1bf5f96515 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -8,7 +8,6 @@ test "@popCount integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testPopCountIntegers();
try testPopCountIntegers();
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index e1576ca302..18c876c3e0 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -30,7 +30,6 @@ comptime {
test "slicing" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var array: [20]i32 = undefined;
@@ -256,7 +255,6 @@ test "C pointer slice access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [10]u32 = [1]u32{42} ** 10;
const c_ptr = @as([*c]const u32, @ptrCast(&buf));
@@ -836,7 +834,6 @@ test "global slice field access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
var slice: []const u8 = undefined;
@@ -892,7 +889,6 @@ test "empty slice ptr is non null" {
test "slice decays to many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var buf: [8]u8 = "abcdefg\x00".*;
const p: [*:0]const u8 = buf[0..7 :0];
@@ -903,7 +899,6 @@ test "write through pointer to optional slice arg" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn bar(foo: *?[]const u8) !void {
@@ -956,7 +951,6 @@ test "slicing slices gives correct result" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const foo = "1234";
const bar = foo[0..4];
diff --git a/test/behavior/src.zig b/test/behavior/src.zig
index 7c2b377d5b..ebf6ab06b0 100644
--- a/test/behavior/src.zig
+++ b/test/behavior/src.zig
@@ -17,7 +17,6 @@ test "@src" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try doTheTest();
}
@@ -38,8 +37,6 @@ test "@src used as a comptime parameter" {
}
test "@src in tuple passed to anytype function" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct {
fn Foo(a: anytype) u32 {
return a[0].line;
diff --git a/test/behavior/string_literals.zig b/test/behavior/string_literals.zig
index a45403af97..13cceb2f83 100644
--- a/test/behavior/string_literals.zig
+++ b/test/behavior/string_literals.zig
@@ -34,7 +34,6 @@ const ptr_type_name: [*:0]const u8 = type_name;
test "@typeName() returns a string literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try std.testing.expect(*const [type_name.len:0]u8 == @TypeOf(type_name));
@@ -65,7 +64,6 @@ fn testFnForSrc() std.builtin.SourceLocation {
test "@src() returns a struct containing 0-terminated string slices" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const src = testFnForSrc();
try std.testing.expect([:0]const u8 == @TypeOf(src.file));
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 97617a1fd1..c8dd22e98c 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -68,7 +68,6 @@ const SmallStruct = struct {
test "lower unnamed constants" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var foo = SmallStruct{ .a = 1, .b = 255 };
try expect(foo.first() == 1);
@@ -419,7 +418,6 @@ test "packed struct 24bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; // TODO
if (comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -506,7 +504,6 @@ test "packed struct fields are ordered from LSB to MSB" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var all: u64 = 0x7765443322221111;
var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined;
@@ -527,7 +524,6 @@ test "implicit cast packed struct field to const ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const LevelUpMove = packed struct {
move_id: u9,
@@ -593,7 +589,6 @@ test "bit field access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data = bit_field_1;
try expect(getA(&data) == 1);
@@ -650,7 +645,6 @@ test "packed array 24bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime {
try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
@@ -718,7 +712,6 @@ test "pointer to packed struct member in a stack variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = packed struct {
a: u2,
@@ -1103,7 +1096,6 @@ test "packed struct with undefined initializers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const P = packed struct {
@@ -1133,7 +1125,6 @@ test "packed struct with undefined initializers" {
test "for loop over pointers to struct, getting field from struct pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Foo = struct {
@@ -1243,7 +1234,6 @@ test "typed init through error unions and optionals" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
@@ -1465,8 +1455,6 @@ test "struct field has a pointer to an aligned version of itself" {
}
test "struct has only one reference" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct {
fn optionalStructParam(_: ?struct { x: u8 }) void {}
fn errorUnionStructParam(_: error{}!struct { x: u8 }) void {}
@@ -1573,7 +1561,6 @@ test "no dependency loop on optional field wrapped in generic function" {
test "optional field init with tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: ?struct { b: u32 },
@@ -1588,7 +1575,6 @@ test "optional field init with tuple" {
test "if inside struct init inside if" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const MyStruct = struct { x: u32 };
const b: u32 = 5;
@@ -1770,8 +1756,6 @@ test "struct init with no result pointer sets field result types" {
}
test "runtime side-effects in comptime-known struct init" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
var side_effects: u4 = 0;
const S = struct { a: u4, b: u4, c: u4, d: u4 };
const init = S{
@@ -1797,8 +1781,6 @@ test "runtime side-effects in comptime-known struct init" {
}
test "pointer to struct initialized through reference to anonymous initializer provides result types" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const S = struct { a: u8, b: u16, c: *const anyopaque };
var my_u16: u16 = 0xABCD;
_ = &my_u16;
@@ -1995,7 +1977,6 @@ test "runtime call in nested initializer" {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Holder = struct {
array: []const u8,
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index 1275d0f433..1cec0dfad4 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -316,7 +316,6 @@ test "switch on union with some prongs capturing" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const X = union(enum) {
a,
@@ -427,6 +426,7 @@ test "else prong of switch on error set excludes other cases" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -462,6 +462,7 @@ test "switch prongs with error set cases make a new error set type for capture v
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -516,7 +517,6 @@ test "switch with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest(c: u8) !void {
@@ -537,7 +537,6 @@ test "switch prongs with cases with identical payload types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Union = union(enum) {
A: usize,
@@ -781,8 +780,6 @@ test "comptime inline switch" {
}
test "switch capture peer type resolution" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const U = union(enum) {
a: u32,
b: u64,
@@ -798,8 +795,6 @@ test "switch capture peer type resolution" {
}
test "switch capture peer type resolution for in-memory coercible payloads" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -821,7 +816,6 @@ test "switch capture peer type resolution for in-memory coercible payloads" {
test "switch pointer capture peer type resolution" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -924,8 +918,6 @@ test "switch prong captures range" {
}
test "prong with inline call to unreachable" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const U = union(enum) {
void: void,
bool: bool,
diff --git a/test/behavior/switch_prong_implicit_cast.zig b/test/behavior/switch_prong_implicit_cast.zig
index 2281ddd448..54107bb6bd 100644
--- a/test/behavior/switch_prong_implicit_cast.zig
+++ b/test/behavior/switch_prong_implicit_cast.zig
@@ -18,7 +18,6 @@ test "switch prong implicit cast" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const result = switch (foo(2) catch unreachable) {
FormValue.One => false,
diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig
index 87daebda78..f91e10d12d 100644
--- a/test/behavior/threadlocal.zig
+++ b/test/behavior/threadlocal.zig
@@ -6,7 +6,6 @@ test "thread local variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
.x86_64, .x86 => {},
else => return error.SkipZigTest,
@@ -29,7 +28,6 @@ test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
.x86_64, .x86 => {},
else => return error.SkipZigTest,
@@ -47,7 +45,6 @@ test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
.x86_64, .x86 => {},
else => return error.SkipZigTest,
diff --git a/test/behavior/try.zig b/test/behavior/try.zig
index cc76658e93..53fdc48934 100644
--- a/test/behavior/try.zig
+++ b/test/behavior/try.zig
@@ -51,6 +51,7 @@ test "`try`ing an if/else expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn getError() !void {
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 5cab5e9375..495e00c409 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -280,7 +280,6 @@ test "tuple in tuple passed to generic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn pair(x: f32, y: f32) std.meta.Tuple(&.{ f32, f32 }) {
@@ -300,7 +299,6 @@ test "coerce tuple to tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = std.meta.Tuple(&.{u8});
const S = struct {
@@ -315,7 +313,6 @@ test "tuple type with void field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = std.meta.Tuple(&[_]type{void});
const x = T{{}};
@@ -352,7 +349,6 @@ test "zero sized struct in tuple handled correctly" {
test "tuple type with void field and a runtime field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = std.meta.Tuple(&[_]type{ usize, void });
var t: T = .{ 5, {} };
@@ -409,7 +405,6 @@ test "nested runtime conditionals in tuple initializer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var data: u8 = 0;
_ = &data;
diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig
index dc9214e7bb..e6d5d76fc8 100644
--- a/test/behavior/tuple_declarations.zig
+++ b/test/behavior/tuple_declarations.zig
@@ -7,7 +7,6 @@ const expectEqualStrings = testing.expectEqualStrings;
test "tuple declaration type info" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
const T = struct { comptime u32 align(2) = 1, []const u8 };
@@ -36,7 +35,6 @@ test "tuple declaration type info" {
test "Tuple declaration usage" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = struct { u32, []const u8 };
var t: T = .{ 1, "foo" };
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index 9ac5e25e89..b650248e42 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -565,8 +565,6 @@ test "StructField.is_comptime" {
}
test "typeInfo resolves usingnamespace declarations" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const A = struct {
pub const f1 = 42;
};
@@ -592,7 +590,6 @@ test "value from struct @typeInfo default_value can be loaded at comptime" {
test "@typeInfo decls and usingnamespace" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const A = struct {
pub const x = 5;
@@ -633,8 +630,6 @@ test "type info of tuple of string literal default value" {
}
test "@typeInfo only contains pub decls" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
const other = struct {
const std = @import("std");
diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig
index b08de5484e..cd83e40485 100644
--- a/test/behavior/typename.zig
+++ b/test/behavior/typename.zig
@@ -16,7 +16,6 @@ test "anon fn param" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/9339
try expectEqualStringsIgnoreDigits(
@@ -42,7 +41,6 @@ test "anon field init" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Foo = .{
@@ -69,7 +67,6 @@ test "basic" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expectEqualStrings("i64", @typeName(i64));
try expectEqualStrings("*usize", @typeName(*usize));
@@ -91,7 +88,6 @@ test "top level decl" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try expectEqualStrings(
@@ -142,7 +138,6 @@ test "fn param" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/675
@@ -223,7 +218,6 @@ test "local variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Foo = struct { a: u32 };
@@ -243,7 +237,6 @@ test "comptime parameters not converted to anytype in function type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const T = fn (fn (type) void, void) void;
try expectEqualStrings("fn (comptime fn (comptime type) void, void) void", @typeName(T));
@@ -253,7 +246,6 @@ test "anon name strategy used in sub expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 413362caba..b1c36c42cc 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -100,7 +100,6 @@ const FooExtern = extern union {
test "basic extern unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var foo = FooExtern{ .int = 1 };
try expect(foo.int == 1);
@@ -172,7 +171,6 @@ test "constant tagged union with payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var empty = TaggedUnionWithPayload{ .Empty = {} };
var full = TaggedUnionWithPayload{ .Full = 13 };
@@ -656,7 +654,6 @@ test "union(enum(u32)) with specified and unspecified tag values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
comptime assert(Tag(Tag(MultipleChoice2)) == u32);
try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
@@ -808,7 +805,6 @@ test "return union init with void payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry() !void {
@@ -971,7 +967,6 @@ test "function call result coerces from tagged union to the tag" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Arch = union(enum) {
@@ -1136,7 +1131,6 @@ test "@unionInit on union with tag but no fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
const Type = enum(u8) { no_op = 105 };
@@ -1700,7 +1694,6 @@ test "packed union field pointer has correct alignment" {
test "union with 128 bit integer" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const ValueTag = enum { int, other };
@@ -1917,7 +1910,6 @@ test "reinterpret packed union" {
test "reinterpret packed union inside packed struct" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = packed union {
a: u7,
@@ -2196,7 +2188,6 @@ test "copied union field doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) {
array: [10]u32,
diff --git a/test/behavior/union_with_members.zig b/test/behavior/union_with_members.zig
index 83ce38d5bc..186a30ad63 100644
--- a/test/behavior/union_with_members.zig
+++ b/test/behavior/union_with_members.zig
@@ -21,7 +21,6 @@ test "enum with members" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a = ET{ .SINT = -42 };
const b = ET{ .UINT = 42 };
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 6a1e36e9c4..bc38650f1e 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -102,7 +102,6 @@ test "vector float operators" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@@ -119,7 +118,7 @@ test "vector float operators" {
try expectEqual(v + x, .{ 11, 22, 33, 44 });
try expectEqual(v - x, .{ 9, 18, 27, 36 });
try expectEqual(v * x, .{ 10, 40, 90, 160 });
- try expectEqual(-x, .{ -1, -2, -3, -4 });
+ if (builtin.zig_backend != .stage2_riscv64) try expectEqual(-x, .{ -1, -2, -3, -4 });
}
};
@@ -129,6 +128,8 @@ test "vector float operators" {
try S.doTheTest(f64);
try comptime S.doTheTest(f64);
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+
try S.doTheTest(f16);
try comptime S.doTheTest(f16);
@@ -394,7 +395,6 @@ test "load vector elements via comptime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -416,7 +416,6 @@ test "store vector elements via comptime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -444,7 +443,6 @@ test "load vector elements via runtime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -467,7 +465,6 @@ test "store vector elements via runtime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -1239,7 +1236,6 @@ test "loading the second vector from a slice of vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@setRuntimeSafety(false);
var small_bases = [2]@Vector(2, u8){
@@ -1326,7 +1322,6 @@ test "zero multiplicand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const zeros = @Vector(2, u32){ 0.0, 0.0 };
var ones = @Vector(2, u32){ 1.0, 1.0 };
@@ -1487,7 +1482,6 @@ test "store vector with memset" {
test "addition of vectors represented as strings" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const V = @Vector(3, u8);
const foo: V = "foo".*;
@@ -1514,7 +1508,6 @@ test "vector pointer is indexable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const V = @Vector(2, u32);
diff --git a/test/behavior/while.zig b/test/behavior/while.zig
index 71641ea265..532bac258d 100644
--- a/test/behavior/while.zig
+++ b/test/behavior/while.zig
@@ -106,7 +106,6 @@ fn testBreakOuter() void {
test "while copies its payload" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -208,7 +207,6 @@ test "while on optional with else result follow else prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const result = while (returnNull()) |value| {
break value;
@@ -220,7 +218,6 @@ test "while on optional with else result follow break prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const result = while (returnOptional(10)) |value| {
break value;
@@ -293,7 +290,6 @@ test "while optional 2 break statements and an else" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn entry(opt_t: ?bool, f: bool) !void {
@@ -392,7 +388,6 @@ test "breaking from a loop in an if statement" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn retOpt() ?u32 {
diff --git a/test/tests.zig b/test/tests.zig
index 7116233c7e..b4c720a1ff 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -439,7 +439,7 @@ const test_targets = blk: {
.target = std.Target.Query.parse(
.{
.arch_os_abi = "riscv64-linux-musl",
- .cpu_features = "baseline+v",
+ .cpu_features = "baseline+v+zbb",
},
) catch @panic("OOM"),
.use_llvm = false,