aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xci/zinc/linux_test.sh8
-rw-r--r--doc/langref.html.in3
-rw-r--r--lib/std/hash.zig14
-rw-r--r--lib/std/hash_map.zig13
-rw-r--r--lib/std/special/compiler_rt/int.zig7
-rw-r--r--lib/std/special/compiler_rt/multi3.zig2
-rw-r--r--lib/std/special/compiler_rt/negsi2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/paritydi2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/paritysi2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/parityti2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/popcountdi2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/popcountsi2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/popcountti2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/truncXfYf2_test.zig5
-rw-r--r--lib/std/special/compiler_rt/udivmod.zig7
-rw-r--r--lib/std/time.zig2
-rw-r--r--lib/std/valgrind.zig4
-rw-r--r--lib/std/zig/Ast.zig148
-rw-r--r--lib/std/zig/system/darwin.zig2
-rw-r--r--src/Air.zig6
-rw-r--r--src/AstGen.zig15
-rw-r--r--src/Compilation.zig5
-rw-r--r--src/Liveness.zig1
-rw-r--r--src/Module.zig48
-rw-r--r--src/Sema.zig358
-rw-r--r--src/Zir.zig12
-rw-r--r--src/arch/aarch64/CodeGen.zig18
-rw-r--r--src/arch/arm/CodeGen.zig109
-rw-r--r--src/arch/arm/Mir.zig2
-rw-r--r--src/arch/riscv64/CodeGen.zig18
-rw-r--r--src/arch/wasm/CodeGen.zig427
-rw-r--r--src/arch/wasm/Emit.zig12
-rw-r--r--src/arch/wasm/Mir.zig24
-rw-r--r--src/arch/x86_64/CodeGen.zig18
-rw-r--r--src/arch/x86_64/Isel.zig11
-rw-r--r--src/codegen.zig12
-rw-r--r--src/codegen/c.zig17
-rw-r--r--src/codegen/llvm.zig146
-rw-r--r--src/libcxx.zig4
-rw-r--r--src/link.zig23
-rw-r--r--src/link/MachO.zig212
-rw-r--r--src/link/Plan9.zig25
-rw-r--r--src/mingw.zig2
-rw-r--r--src/print_air.zig1
-rw-r--r--src/tracy.zig7
-rw-r--r--src/translate_c.zig4
-rw-r--r--src/type.zig206
-rw-r--r--src/value.zig10
-rw-r--r--test/behavior.zig362
-rw-r--r--test/behavior/bit_shifting.zig1
-rw-r--r--test/behavior/cast.zig13
-rw-r--r--test/behavior/cast_int.zig10
-rw-r--r--test/behavior/enum_stage1.zig7
-rw-r--r--test/behavior/error_llvm.zig24
-rw-r--r--test/behavior/error_stage1.zig21
-rw-r--r--test/behavior/import.zig5
-rw-r--r--test/behavior/int128.zig6
-rw-r--r--test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig5
-rw-r--r--test/behavior/reflection.zig5
-rw-r--r--test/behavior/sizeof_and_typeof.zig113
-rw-r--r--test/behavior/sizeof_and_typeof_stage1.zig112
-rw-r--r--test/behavior/translate_c_macros.zig5
-rw-r--r--test/behavior/translate_c_macros_stage1.zig5
-rw-r--r--test/behavior/widening.zig5
-rw-r--r--test/stage2/x86_64.zig39
-rw-r--r--test/standalone/install_raw_hex/build.zig3
66 files changed, 1937 insertions, 807 deletions
diff --git a/ci/zinc/linux_test.sh b/ci/zinc/linux_test.sh
index 886fbdaf81..d9f42e6876 100755
--- a/ci/zinc/linux_test.sh
+++ b/ci/zinc/linux_test.sh
@@ -4,9 +4,11 @@
ZIG=$DEBUG_STAGING/bin/zig
-$ZIG test test/behavior.zig -fno-stage1 -fLLVM -I test
-$ZIG test test/behavior.zig -fno-stage1 -ofmt=c -I test
-$ZIG test test/behavior.zig -fno-stage1 -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM
+$ZIG test test/behavior.zig -fno-stage1 -I test -ofmt=c
+$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
+$ZIG test test/behavior.zig -fno-stage1 -I test
$ZIG build test-behavior -fqemu -fwasmtime
$ZIG build test-compiler-rt -fqemu -fwasmtime
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 03e18378eb..825d03899e 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -8518,6 +8518,9 @@ test "@hasDecl" {
<li>{#syntax#}@import("builtin"){#endsyntax#} - Target-specific information
The command <code>zig build-exe --show-builtin</code> outputs the source to stdout for reference.
</li>
+ <li>{#syntax#}@import("root"){#endsyntax#} - Points to the root source file
+ This is usually `src/main.zig` but it depends on what file is chosen to be built.
+ </li>
</ul>
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
diff --git a/lib/std/hash.zig b/lib/std/hash.zig
index f96d331d0f..2680a8e263 100644
--- a/lib/std/hash.zig
+++ b/lib/std/hash.zig
@@ -33,11 +33,11 @@ const wyhash = @import("hash/wyhash.zig");
pub const Wyhash = wyhash.Wyhash;
test "hash" {
- _ = @import("hash/adler.zig");
- _ = @import("hash/auto_hash.zig");
- _ = @import("hash/crc.zig");
- _ = @import("hash/fnv.zig");
- _ = @import("hash/murmur.zig");
- _ = @import("hash/cityhash.zig");
- _ = @import("hash/wyhash.zig");
+ _ = adler;
+ _ = auto_hash;
+ _ = crc;
+ _ = fnv;
+ _ = murmur;
+ _ = cityhash;
+ _ = wyhash;
}
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 851df83f84..15b62e0d40 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -750,12 +750,19 @@ pub fn HashMapUnmanaged(
fingerprint: FingerPrint = free,
used: u1 = 0,
+ const slot_free = @bitCast(u8, Metadata{ .fingerprint = free });
+ const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone });
+
pub fn isUsed(self: Metadata) bool {
return self.used == 1;
}
pub fn isTombstone(self: Metadata) bool {
- return !self.isUsed() and self.fingerprint == tombstone;
+ return @bitCast(u8, self) == slot_tombstone;
+ }
+
+ pub fn isFree(self: Metadata) bool {
+ return @bitCast(u8, self) == slot_free;
}
pub fn takeFingerprint(hash: Hash) FingerPrint {
@@ -1115,7 +1122,7 @@ pub fn HashMapUnmanaged(
var idx = @truncate(usize, hash & mask);
var metadata = self.metadata.? + idx;
- while ((metadata[0].isUsed() or metadata[0].isTombstone()) and limit != 0) {
+ while (!metadata[0].isFree() and limit != 0) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const test_key = &self.keys()[idx];
// If you get a compile error on this line, it means that your generic eql
@@ -1294,7 +1301,7 @@ pub fn HashMapUnmanaged(
var first_tombstone_idx: usize = self.capacity(); // invalid index
var metadata = self.metadata.? + idx;
- while ((metadata[0].isUsed() or metadata[0].isTombstone()) and limit != 0) {
+ while (!metadata[0].isFree() and limit != 0) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const test_key = &self.keys()[idx];
// If you get a compile error on this line, it means that your generic eql
diff --git a/lib/std/special/compiler_rt/int.zig b/lib/std/special/compiler_rt/int.zig
index e4d9df7a22..0f3400d37e 100644
--- a/lib/std/special/compiler_rt/int.zig
+++ b/lib/std/special/compiler_rt/int.zig
@@ -1,8 +1,9 @@
// Builtin functions that operate on integer types
const builtin = @import("builtin");
-const testing = @import("std").testing;
-const maxInt = @import("std").math.maxInt;
-const minInt = @import("std").math.minInt;
+const std = @import("std");
+const testing = std.testing;
+const maxInt = std.math.maxInt;
+const minInt = std.math.minInt;
const udivmod = @import("udivmod.zig").udivmod;
diff --git a/lib/std/special/compiler_rt/multi3.zig b/lib/std/special/compiler_rt/multi3.zig
index a05fb3bd6d..4e5c49730a 100644
--- a/lib/std/special/compiler_rt/multi3.zig
+++ b/lib/std/special/compiler_rt/multi3.zig
@@ -17,7 +17,7 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
return r.all;
}
-const v128 = @import("std").meta.Vector(2, u64);
+const v128 = std.meta.Vector(2, u64);
pub fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
@bitCast(i128, a),
diff --git a/lib/std/special/compiler_rt/negsi2_test.zig b/lib/std/special/compiler_rt/negsi2_test.zig
index 9458ea995c..608a44c12f 100644
--- a/lib/std/special/compiler_rt/negsi2_test.zig
+++ b/lib/std/special/compiler_rt/negsi2_test.zig
@@ -1,7 +1,8 @@
+const std = @import("std");
const neg = @import("negXi2.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
-const print = @import("std").debug.print;
+const print = std.debug.print;
fn test__negsi2(a: i32, expected: i32) !void {
var result = neg.__negsi2(a);
diff --git a/lib/std/special/compiler_rt/paritydi2_test.zig b/lib/std/special/compiler_rt/paritydi2_test.zig
index 7c481bbaef..a13abda5fe 100644
--- a/lib/std/special/compiler_rt/paritydi2_test.zig
+++ b/lib/std/special/compiler_rt/paritydi2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const parity = @import("parity.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn paritydi2Naive(a: i64) i32 {
var x = @bitCast(u64, a);
@@ -25,7 +26,7 @@ test "paritydi2" {
try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe)));
try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/paritysi2_test.zig b/lib/std/special/compiler_rt/paritysi2_test.zig
index c4386bcf1f..f63854e34f 100644
--- a/lib/std/special/compiler_rt/paritysi2_test.zig
+++ b/lib/std/special/compiler_rt/paritysi2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const parity = @import("parity.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn paritysi2Naive(a: i32) i32 {
var x = @bitCast(u32, a);
@@ -25,7 +26,7 @@ test "paritysi2" {
try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffe)));
try test__paritysi2(@bitCast(i32, @as(u32, 0xffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/parityti2_test.zig b/lib/std/special/compiler_rt/parityti2_test.zig
index 0de07df31a..e018932555 100644
--- a/lib/std/special/compiler_rt/parityti2_test.zig
+++ b/lib/std/special/compiler_rt/parityti2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const parity = @import("parity.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn parityti2Naive(a: i128) i32 {
var x = @bitCast(u128, a);
@@ -25,7 +26,7 @@ test "parityti2" {
try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)));
try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/popcountdi2_test.zig b/lib/std/special/compiler_rt/popcountdi2_test.zig
index e20693987a..e02628e636 100644
--- a/lib/std/special/compiler_rt/popcountdi2_test.zig
+++ b/lib/std/special/compiler_rt/popcountdi2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const popcount = @import("popcount.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn popcountdi2Naive(a: i64) i32 {
var x = a;
@@ -24,7 +25,7 @@ test "popcountdi2" {
try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe)));
try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/popcountsi2_test.zig b/lib/std/special/compiler_rt/popcountsi2_test.zig
index c0c92e396e..7606b1a97e 100644
--- a/lib/std/special/compiler_rt/popcountsi2_test.zig
+++ b/lib/std/special/compiler_rt/popcountsi2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const popcount = @import("popcount.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn popcountsi2Naive(a: i32) i32 {
var x = a;
@@ -24,7 +25,7 @@ test "popcountsi2" {
try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffe)));
try test__popcountsi2(@bitCast(i32, @as(u32, 0xffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/popcountti2_test.zig b/lib/std/special/compiler_rt/popcountti2_test.zig
index 83f2a18e7d..fae2beccd4 100644
--- a/lib/std/special/compiler_rt/popcountti2_test.zig
+++ b/lib/std/special/compiler_rt/popcountti2_test.zig
@@ -1,5 +1,6 @@
+const std = @import("std");
const popcount = @import("popcount.zig");
-const testing = @import("std").testing;
+const testing = std.testing;
fn popcountti2Naive(a: i128) i32 {
var x = a;
@@ -24,7 +25,7 @@ test "popcountti2" {
try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)));
try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)));
- const RndGen = @import("std").rand.DefaultPrng;
+ const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
var i: u32 = 0;
while (i < 10_000) : (i += 1) {
diff --git a/lib/std/special/compiler_rt/truncXfYf2_test.zig b/lib/std/special/compiler_rt/truncXfYf2_test.zig
index 1464c3bfcb..3f11dd0380 100644
--- a/lib/std/special/compiler_rt/truncXfYf2_test.zig
+++ b/lib/std/special/compiler_rt/truncXfYf2_test.zig
@@ -1,3 +1,4 @@
+const std = @import("std");
const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
fn test__truncsfhf2(a: u32, expected: u16) !void {
@@ -217,7 +218,7 @@ fn test__truncdfsf2(a: f64, expected: u32) void {
}
}
- @import("std").debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
+ std.debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
@panic("__trunctfsf2 test failure");
}
@@ -248,7 +249,7 @@ fn test__trunctfhf2(a: f128, expected: u16) void {
return;
}
- @import("std").debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
+ std.debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
@panic("__trunctfhf2 test failure");
}
diff --git a/lib/std/special/compiler_rt/udivmod.zig b/lib/std/special/compiler_rt/udivmod.zig
index b2fe2048d4..d941c242d2 100644
--- a/lib/std/special/compiler_rt/udivmod.zig
+++ b/lib/std/special/compiler_rt/udivmod.zig
@@ -1,6 +1,7 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_endian = builtin.cpu.arch.endian();
+const std = @import("std");
const low = switch (native_endian) {
.Big => 1,
@@ -13,9 +14,9 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
const double_int_bits = @typeInfo(DoubleInt).Int.bits;
const single_int_bits = @divExact(double_int_bits, 2);
- const SingleInt = @import("std").meta.Int(.unsigned, single_int_bits);
- const SignedDoubleInt = @import("std").meta.Int(.signed, double_int_bits);
- const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
+ const SingleInt = std.meta.Int(.unsigned, single_int_bits);
+ const SignedDoubleInt = std.meta.Int(.signed, double_int_bits);
+ const Log2SingleInt = std.math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
diff --git a/lib/std/time.zig b/lib/std/time.zig
index b9580f5f3e..e8e1d1010c 100644
--- a/lib/std/time.zig
+++ b/lib/std/time.zig
@@ -287,5 +287,5 @@ test "Timer" {
}
test {
- _ = @import("time/epoch.zig");
+ _ = epoch;
}
diff --git a/lib/std/valgrind.zig b/lib/std/valgrind.zig
index 91034fa03e..7532e73e49 100644
--- a/lib/std/valgrind.zig
+++ b/lib/std/valgrind.zig
@@ -258,6 +258,6 @@ pub const memcheck = @import("valgrind/memcheck.zig");
pub const callgrind = @import("valgrind/callgrind.zig");
test {
- _ = @import("valgrind/memcheck.zig");
- _ = @import("valgrind/callgrind.zig");
+ _ = memcheck;
+ _ = callgrind;
}
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index da8616ed9e..65772c87a8 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -16,7 +16,7 @@ const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const Token = std.zig.Token;
-const Tree = @This();
+const Ast = @This();
pub const TokenIndex = u32;
pub const ByteOffset = u32;
@@ -34,7 +34,7 @@ pub const Location = struct {
line_end: usize,
};
-pub fn deinit(tree: *Tree, gpa: mem.Allocator) void {
+pub fn deinit(tree: *Ast, gpa: mem.Allocator) void {
tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa);
gpa.free(tree.extra_data);
@@ -52,7 +52,7 @@ pub const RenderError = error{
/// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Ast, gpa: mem.Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -60,11 +60,11 @@ pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
return buffer.toOwnedSlice();
}
-pub fn renderToArrayList(tree: Tree, buffer: *std.ArrayList(u8)) RenderError!void {
+pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void {
return @import("./render.zig").renderTree(buffer, tree);
}
-pub fn tokenLocation(self: Tree, start_offset: ByteOffset, token_index: TokenIndex) Location {
+pub fn tokenLocation(self: Ast, start_offset: ByteOffset, token_index: TokenIndex) Location {
var loc = Location{
.line = 0,
.column = 0,
@@ -91,7 +91,7 @@ pub fn tokenLocation(self: Tree, start_offset: ByteOffset, token_index: TokenInd
return loc;
}
-pub fn tokenSlice(tree: Tree, token_index: TokenIndex) []const u8 {
+pub fn tokenSlice(tree: Ast, token_index: TokenIndex) []const u8 {
const token_starts = tree.tokens.items(.start);
const token_tags = tree.tokens.items(.tag);
const token_tag = token_tags[token_index];
@@ -112,7 +112,7 @@ pub fn tokenSlice(tree: Tree, token_index: TokenIndex) []const u8 {
return tree.source[token.loc.start..token.loc.end];
}
-pub fn extraData(tree: Tree, index: usize, comptime T: type) T {
+pub fn extraData(tree: Ast, index: usize, comptime T: type) T {
const fields = std.meta.fields(T);
var result: T = undefined;
inline for (fields) |field, i| {
@@ -122,13 +122,13 @@ pub fn extraData(tree: Tree, index: usize, comptime T: type) T {
return result;
}
-pub fn rootDecls(tree: Tree) []const Node.Index {
+pub fn rootDecls(tree: Ast) []const Node.Index {
// Root is always index 0.
const nodes_data = tree.nodes.items(.data);
return tree.extra_data[nodes_data[0].lhs..nodes_data[0].rhs];
}
-pub fn renderError(tree: Tree, parse_error: Error, stream: anytype) !void {
+pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
const token_tags = tree.tokens.items(.tag);
switch (parse_error.tag) {
.asterisk_after_ptr_deref => {
@@ -321,7 +321,7 @@ pub fn renderError(tree: Tree, parse_error: Error, stream: anytype) !void {
}
}
-pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
+pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex {
const tags = tree.nodes.items(.tag);
const datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -625,7 +625,7 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
};
}
-pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
+pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
const tags = tree.nodes.items(.tag);
const datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -1157,13 +1157,13 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
};
}
-pub fn tokensOnSameLine(tree: Tree, token1: TokenIndex, token2: TokenIndex) bool {
+pub fn tokensOnSameLine(tree: Ast, token1: TokenIndex, token2: TokenIndex) bool {
const token_starts = tree.tokens.items(.start);
const source = tree.source[token_starts[token1]..token_starts[token2]];
return mem.indexOfScalar(u8, source, '\n') == null;
}
-pub fn getNodeSource(tree: Tree, node: Node.Index) []const u8 {
+pub fn getNodeSource(tree: Ast, node: Node.Index) []const u8 {
const token_starts = tree.tokens.items(.start);
const first_token = tree.firstToken(node);
const last_token = tree.lastToken(node);
@@ -1172,7 +1172,7 @@ pub fn getNodeSource(tree: Tree, node: Node.Index) []const u8 {
return tree.source[start..end];
}
-pub fn globalVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
+pub fn globalVarDecl(tree: Ast, node: Node.Index) full.VarDecl {
assert(tree.nodes.items(.tag)[node] == .global_var_decl);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.GlobalVarDecl);
@@ -1186,7 +1186,7 @@ pub fn globalVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
});
}
-pub fn localVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
+pub fn localVarDecl(tree: Ast, node: Node.Index) full.VarDecl {
assert(tree.nodes.items(.tag)[node] == .local_var_decl);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.LocalVarDecl);
@@ -1200,7 +1200,7 @@ pub fn localVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
});
}
-pub fn simpleVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
+pub fn simpleVarDecl(tree: Ast, node: Node.Index) full.VarDecl {
assert(tree.nodes.items(.tag)[node] == .simple_var_decl);
const data = tree.nodes.items(.data)[node];
return tree.fullVarDecl(.{
@@ -1213,7 +1213,7 @@ pub fn simpleVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
});
}
-pub fn alignedVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
+pub fn alignedVarDecl(tree: Ast, node: Node.Index) full.VarDecl {
assert(tree.nodes.items(.tag)[node] == .aligned_var_decl);
const data = tree.nodes.items(.data)[node];
return tree.fullVarDecl(.{
@@ -1226,7 +1226,7 @@ pub fn alignedVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
});
}
-pub fn ifSimple(tree: Tree, node: Node.Index) full.If {
+pub fn ifSimple(tree: Ast, node: Node.Index) full.If {
assert(tree.nodes.items(.tag)[node] == .if_simple);
const data = tree.nodes.items(.data)[node];
return tree.fullIf(.{
@@ -1237,7 +1237,7 @@ pub fn ifSimple(tree: Tree, node: Node.Index) full.If {
});
}
-pub fn ifFull(tree: Tree, node: Node.Index) full.If {
+pub fn ifFull(tree: Ast, node: Node.Index) full.If {
assert(tree.nodes.items(.tag)[node] == .@"if");
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.If);
@@ -1249,7 +1249,7 @@ pub fn ifFull(tree: Tree, node: Node.Index) full.If {
});
}
-pub fn containerField(tree: Tree, node: Node.Index) full.ContainerField {
+pub fn containerField(tree: Ast, node: Node.Index) full.ContainerField {
assert(tree.nodes.items(.tag)[node] == .container_field);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.ContainerField);
@@ -1261,7 +1261,7 @@ pub fn containerField(tree: Tree, node: Node.Index) full.ContainerField {
});
}
-pub fn containerFieldInit(tree: Tree, node: Node.Index) full.ContainerField {
+pub fn containerFieldInit(tree: Ast, node: Node.Index) full.ContainerField {
assert(tree.nodes.items(.tag)[node] == .container_field_init);
const data = tree.nodes.items(.data)[node];
return tree.fullContainerField(.{
@@ -1272,7 +1272,7 @@ pub fn containerFieldInit(tree: Tree, node: Node.Index) full.ContainerField {
});
}
-pub fn containerFieldAlign(tree: Tree, node: Node.Index) full.ContainerField {
+pub fn containerFieldAlign(tree: Ast, node: Node.Index) full.ContainerField {
assert(tree.nodes.items(.tag)[node] == .container_field_align);
const data = tree.nodes.items(.data)[node];
return tree.fullContainerField(.{
@@ -1283,7 +1283,7 @@ pub fn containerFieldAlign(tree: Tree, node: Node.Index) full.ContainerField {
});
}
-pub fn fnProtoSimple(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.FnProto {
+pub fn fnProtoSimple(tree: Ast, buffer: *[1]Node.Index, node: Node.Index) full.FnProto {
assert(tree.nodes.items(.tag)[node] == .fn_proto_simple);
const data = tree.nodes.items(.data)[node];
buffer[0] = data.lhs;
@@ -1300,7 +1300,7 @@ pub fn fnProtoSimple(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.
});
}
-pub fn fnProtoMulti(tree: Tree, node: Node.Index) full.FnProto {
+pub fn fnProtoMulti(tree: Ast, node: Node.Index) full.FnProto {
assert(tree.nodes.items(.tag)[node] == .fn_proto_multi);
const data = tree.nodes.items(.data)[node];
const params_range = tree.extraData(data.lhs, Node.SubRange);
@@ -1317,7 +1317,7 @@ pub fn fnProtoMulti(tree: Tree, node: Node.Index) full.FnProto {
});
}
-pub fn fnProtoOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.FnProto {
+pub fn fnProtoOne(tree: Ast, buffer: *[1]Node.Index, node: Node.Index) full.FnProto {
assert(tree.nodes.items(.tag)[node] == .fn_proto_one);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.FnProtoOne);
@@ -1335,7 +1335,7 @@ pub fn fnProtoOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.FnP
});
}
-pub fn fnProto(tree: Tree, node: Node.Index) full.FnProto {
+pub fn fnProto(tree: Ast, node: Node.Index) full.FnProto {
assert(tree.nodes.items(.tag)[node] == .fn_proto);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.FnProto);
@@ -1352,7 +1352,7 @@ pub fn fnProto(tree: Tree, node: Node.Index) full.FnProto {
});
}
-pub fn structInitOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.StructInit {
+pub fn structInitOne(tree: Ast, buffer: *[1]Node.Index, node: Node.Index) full.StructInit {
assert(tree.nodes.items(.tag)[node] == .struct_init_one or
tree.nodes.items(.tag)[node] == .struct_init_one_comma);
const data = tree.nodes.items(.data)[node];
@@ -1365,7 +1365,7 @@ pub fn structInitOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.
});
}
-pub fn structInitDotTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) full.StructInit {
+pub fn structInitDotTwo(tree: Ast, buffer: *[2]Node.Index, node: Node.Index) full.StructInit {
assert(tree.nodes.items(.tag)[node] == .struct_init_dot_two or
tree.nodes.items(.tag)[node] == .struct_init_dot_two_comma);
const data = tree.nodes.items(.data)[node];
@@ -1383,7 +1383,7 @@ pub fn structInitDotTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) fu
});
}
-pub fn structInitDot(tree: Tree, node: Node.Index) full.StructInit {
+pub fn structInitDot(tree: Ast, node: Node.Index) full.StructInit {
assert(tree.nodes.items(.tag)[node] == .struct_init_dot or
tree.nodes.items(.tag)[node] == .struct_init_dot_comma);
const data = tree.nodes.items(.data)[node];
@@ -1394,7 +1394,7 @@ pub fn structInitDot(tree: Tree, node: Node.Index) full.StructInit {
});
}
-pub fn structInit(tree: Tree, node: Node.Index) full.StructInit {
+pub fn structInit(tree: Ast, node: Node.Index) full.StructInit {
assert(tree.nodes.items(.tag)[node] == .struct_init or
tree.nodes.items(.tag)[node] == .struct_init_comma);
const data = tree.nodes.items(.data)[node];
@@ -1406,7 +1406,7 @@ pub fn structInit(tree: Tree, node: Node.Index) full.StructInit {
});
}
-pub fn arrayInitOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.ArrayInit {
+pub fn arrayInitOne(tree: Ast, buffer: *[1]Node.Index, node: Node.Index) full.ArrayInit {
assert(tree.nodes.items(.tag)[node] == .array_init_one or
tree.nodes.items(.tag)[node] == .array_init_one_comma);
const data = tree.nodes.items(.data)[node];
@@ -1421,7 +1421,7 @@ pub fn arrayInitOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.A
};
}
-pub fn arrayInitDotTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) full.ArrayInit {
+pub fn arrayInitDotTwo(tree: Ast, buffer: *[2]Node.Index, node: Node.Index) full.ArrayInit {
assert(tree.nodes.items(.tag)[node] == .array_init_dot_two or
tree.nodes.items(.tag)[node] == .array_init_dot_two_comma);
const data = tree.nodes.items(.data)[node];
@@ -1441,7 +1441,7 @@ pub fn arrayInitDotTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) ful
};
}
-pub fn arrayInitDot(tree: Tree, node: Node.Index) full.ArrayInit {
+pub fn arrayInitDot(tree: Ast, node: Node.Index) full.ArrayInit {
assert(tree.nodes.items(.tag)[node] == .array_init_dot or
tree.nodes.items(.tag)[node] == .array_init_dot_comma);
const data = tree.nodes.items(.data)[node];
@@ -1454,7 +1454,7 @@ pub fn arrayInitDot(tree: Tree, node: Node.Index) full.ArrayInit {
};
}
-pub fn arrayInit(tree: Tree, node: Node.Index) full.ArrayInit {
+pub fn arrayInit(tree: Ast, node: Node.Index) full.ArrayInit {
assert(tree.nodes.items(.tag)[node] == .array_init or
tree.nodes.items(.tag)[node] == .array_init_comma);
const data = tree.nodes.items(.data)[node];
@@ -1468,7 +1468,7 @@ pub fn arrayInit(tree: Tree, node: Node.Index) full.ArrayInit {
};
}
-pub fn arrayType(tree: Tree, node: Node.Index) full.ArrayType {
+pub fn arrayType(tree: Ast, node: Node.Index) full.ArrayType {
assert(tree.nodes.items(.tag)[node] == .array_type);
const data = tree.nodes.items(.data)[node];
return .{
@@ -1481,7 +1481,7 @@ pub fn arrayType(tree: Tree, node: Node.Index) full.ArrayType {
};
}
-pub fn arrayTypeSentinel(tree: Tree, node: Node.Index) full.ArrayType {
+pub fn arrayTypeSentinel(tree: Ast, node: Node.Index) full.ArrayType {
assert(tree.nodes.items(.tag)[node] == .array_type_sentinel);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.ArrayTypeSentinel);
@@ -1496,7 +1496,7 @@ pub fn arrayTypeSentinel(tree: Tree, node: Node.Index) full.ArrayType {
};
}
-pub fn ptrTypeAligned(tree: Tree, node: Node.Index) full.PtrType {
+pub fn ptrTypeAligned(tree: Ast, node: Node.Index) full.PtrType {
assert(tree.nodes.items(.tag)[node] == .ptr_type_aligned);
const data = tree.nodes.items(.data)[node];
return tree.fullPtrType(.{
@@ -1510,7 +1510,7 @@ pub fn ptrTypeAligned(tree: Tree, node: Node.Index) full.PtrType {
});
}
-pub fn ptrTypeSentinel(tree: Tree, node: Node.Index) full.PtrType {
+pub fn ptrTypeSentinel(tree: Ast, node: Node.Index) full.PtrType {
assert(tree.nodes.items(.tag)[node] == .ptr_type_sentinel);
const data = tree.nodes.items(.data)[node];
return tree.fullPtrType(.{
@@ -1524,7 +1524,7 @@ pub fn ptrTypeSentinel(tree: Tree, node: Node.Index) full.PtrType {
});
}
-pub fn ptrType(tree: Tree, node: Node.Index) full.PtrType {
+pub fn ptrType(tree: Ast, node: Node.Index) full.PtrType {
assert(tree.nodes.items(.tag)[node] == .ptr_type);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.PtrType);
@@ -1539,7 +1539,7 @@ pub fn ptrType(tree: Tree, node: Node.Index) full.PtrType {
});
}
-pub fn ptrTypeBitRange(tree: Tree, node: Node.Index) full.PtrType {
+pub fn ptrTypeBitRange(tree: Ast, node: Node.Index) full.PtrType {
assert(tree.nodes.items(.tag)[node] == .ptr_type_bit_range);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.PtrTypeBitRange);
@@ -1554,7 +1554,7 @@ pub fn ptrTypeBitRange(tree: Tree, node: Node.Index) full.PtrType {
});
}
-pub fn sliceOpen(tree: Tree, node: Node.Index) full.Slice {
+pub fn sliceOpen(tree: Ast, node: Node.Index) full.Slice {
assert(tree.nodes.items(.tag)[node] == .slice_open);
const data = tree.nodes.items(.data)[node];
return .{
@@ -1568,7 +1568,7 @@ pub fn sliceOpen(tree: Tree, node: Node.Index) full.Slice {
};
}
-pub fn slice(tree: Tree, node: Node.Index) full.Slice {
+pub fn slice(tree: Ast, node: Node.Index) full.Slice {
assert(tree.nodes.items(.tag)[node] == .slice);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.Slice);
@@ -1583,7 +1583,7 @@ pub fn slice(tree: Tree, node: Node.Index) full.Slice {
};
}
-pub fn sliceSentinel(tree: Tree, node: Node.Index) full.Slice {
+pub fn sliceSentinel(tree: Ast, node: Node.Index) full.Slice {
assert(tree.nodes.items(.tag)[node] == .slice_sentinel);
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.SliceSentinel);
@@ -1598,7 +1598,7 @@ pub fn sliceSentinel(tree: Tree, node: Node.Index) full.Slice {
};
}
-pub fn containerDeclTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) full.ContainerDecl {
+pub fn containerDeclTwo(tree: Ast, buffer: *[2]Node.Index, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .container_decl_two or
tree.nodes.items(.tag)[node] == .container_decl_two_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1617,7 +1617,7 @@ pub fn containerDeclTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) fu
});
}
-pub fn containerDecl(tree: Tree, node: Node.Index) full.ContainerDecl {
+pub fn containerDecl(tree: Ast, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .container_decl or
tree.nodes.items(.tag)[node] == .container_decl_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1629,7 +1629,7 @@ pub fn containerDecl(tree: Tree, node: Node.Index) full.ContainerDecl {
});
}
-pub fn containerDeclArg(tree: Tree, node: Node.Index) full.ContainerDecl {
+pub fn containerDeclArg(tree: Ast, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .container_decl_arg or
tree.nodes.items(.tag)[node] == .container_decl_arg_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1642,7 +1642,7 @@ pub fn containerDeclArg(tree: Tree, node: Node.Index) full.ContainerDecl {
});
}
-pub fn taggedUnionTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) full.ContainerDecl {
+pub fn taggedUnionTwo(tree: Ast, buffer: *[2]Node.Index, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .tagged_union_two or
tree.nodes.items(.tag)[node] == .tagged_union_two_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1662,7 +1662,7 @@ pub fn taggedUnionTwo(tree: Tree, buffer: *[2]Node.Index, node: Node.Index) full
});
}
-pub fn taggedUnion(tree: Tree, node: Node.Index) full.ContainerDecl {
+pub fn taggedUnion(tree: Ast, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .tagged_union or
tree.nodes.items(.tag)[node] == .tagged_union_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1675,7 +1675,7 @@ pub fn taggedUnion(tree: Tree, node: Node.Index) full.ContainerDecl {
});
}
-pub fn taggedUnionEnumTag(tree: Tree, node: Node.Index) full.ContainerDecl {
+pub fn taggedUnionEnumTag(tree: Ast, node: Node.Index) full.ContainerDecl {
assert(tree.nodes.items(.tag)[node] == .tagged_union_enum_tag or
tree.nodes.items(.tag)[node] == .tagged_union_enum_tag_trailing);
const data = tree.nodes.items(.data)[node];
@@ -1689,7 +1689,7 @@ pub fn taggedUnionEnumTag(tree: Tree, node: Node.Index) full.ContainerDecl {
});
}
-pub fn switchCaseOne(tree: Tree, node: Node.Index) full.SwitchCase {
+pub fn switchCaseOne(tree: Ast, node: Node.Index) full.SwitchCase {
const data = &tree.nodes.items(.data)[node];
const values: *[1]Node.Index = &data.lhs;
return tree.fullSwitchCase(.{
@@ -1699,7 +1699,7 @@ pub fn switchCaseOne(tree: Tree, node: Node.Index) full.SwitchCase {
});
}
-pub fn switchCase(tree: Tree, node: Node.Index) full.SwitchCase {
+pub fn switchCase(tree: Ast, node: Node.Index) full.SwitchCase {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.lhs, Node.SubRange);
return tree.fullSwitchCase(.{
@@ -1709,7 +1709,7 @@ pub fn switchCase(tree: Tree, node: Node.Index) full.SwitchCase {
});
}
-pub fn asmSimple(tree: Tree, node: Node.Index) full.Asm {
+pub fn asmSimple(tree: Ast, node: Node.Index) full.Asm {
const data = tree.nodes.items(.data)[node];
return tree.fullAsm(.{
.asm_token = tree.nodes.items(.main_token)[node],
@@ -1719,7 +1719,7 @@ pub fn asmSimple(tree: Tree, node: Node.Index) full.Asm {
});
}
-pub fn asmFull(tree: Tree, node: Node.Index) full.Asm {
+pub fn asmFull(tree: Ast, node: Node.Index) full.Asm {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.Asm);
return tree.fullAsm(.{
@@ -1730,7 +1730,7 @@ pub fn asmFull(tree: Tree, node: Node.Index) full.Asm {
});
}
-pub fn whileSimple(tree: Tree, node: Node.Index) full.While {
+pub fn whileSimple(tree: Ast, node: Node.Index) full.While {
const data = tree.nodes.items(.data)[node];
return tree.fullWhile(.{
.while_token = tree.nodes.items(.main_token)[node],
@@ -1741,7 +1741,7 @@ pub fn whileSimple(tree: Tree, node: Node.Index) full.While {
});
}
-pub fn whileCont(tree: Tree, node: Node.Index) full.While {
+pub fn whileCont(tree: Ast, node: Node.Index) full.While {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.WhileCont);
return tree.fullWhile(.{
@@ -1753,7 +1753,7 @@ pub fn whileCont(tree: Tree, node: Node.Index) full.While {
});
}
-pub fn whileFull(tree: Tree, node: Node.Index) full.While {
+pub fn whileFull(tree: Ast, node: Node.Index) full.While {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.While);
return tree.fullWhile(.{
@@ -1765,7 +1765,7 @@ pub fn whileFull(tree: Tree, node: Node.Index) full.While {
});
}
-pub fn forSimple(tree: Tree, node: Node.Index) full.While {
+pub fn forSimple(tree: Ast, node: Node.Index) full.While {
const data = tree.nodes.items(.data)[node];
return tree.fullWhile(.{
.while_token = tree.nodes.items(.main_token)[node],
@@ -1776,7 +1776,7 @@ pub fn forSimple(tree: Tree, node: Node.Index) full.While {
});
}
-pub fn forFull(tree: Tree, node: Node.Index) full.While {
+pub fn forFull(tree: Ast, node: Node.Index) full.While {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.If);
return tree.fullWhile(.{
@@ -1788,7 +1788,7 @@ pub fn forFull(tree: Tree, node: Node.Index) full.While {
});
}
-pub fn callOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.Call {
+pub fn callOne(tree: Ast, buffer: *[1]Node.Index, node: Node.Index) full.Call {
const data = tree.nodes.items(.data)[node];
buffer.* = .{data.rhs};
const params = if (data.rhs != 0) buffer[0..1] else buffer[0..0];
@@ -1799,7 +1799,7 @@ pub fn callOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.Call {
});
}
-pub fn callFull(tree: Tree, node: Node.Index) full.Call {
+pub fn callFull(tree: Ast, node: Node.Index) full.Call {
const data = tree.nodes.items(.data)[node];
const extra = tree.extraData(data.rhs, Node.SubRange);
return tree.fullCall(.{
@@ -1809,7 +1809,7 @@ pub fn callFull(tree: Tree, node: Node.Index) full.Call {
});
}
-fn fullVarDecl(tree: Tree, info: full.VarDecl.Components) full.VarDecl {
+fn fullVarDecl(tree: Ast, info: full.VarDecl.Components) full.VarDecl {
const token_tags = tree.tokens.items(.tag);
var result: full.VarDecl = .{
.ast = info,
@@ -1834,7 +1834,7 @@ fn fullVarDecl(tree: Tree, info: full.VarDecl.Components) full.VarDecl {
return result;
}
-fn fullIf(tree: Tree, info: full.If.Components) full.If {
+fn fullIf(tree: Ast, info: full.If.Components) full.If {
const token_tags = tree.tokens.items(.tag);
var result: full.If = .{
.ast = info,
@@ -1859,7 +1859,7 @@ fn fullIf(tree: Tree, info: full.If.Components) full.If {
return result;
}
-fn fullContainerField(tree: Tree, info: full.ContainerField.Components) full.ContainerField {
+fn fullContainerField(tree: Ast, info: full.ContainerField.Components) full.ContainerField {
const token_tags = tree.tokens.items(.tag);
var result: full.ContainerField = .{
.ast = info,
@@ -1873,7 +1873,7 @@ fn fullContainerField(tree: Tree, info: full.ContainerField.Components) full.Con
return result;
}
-fn fullFnProto(tree: Tree, info: full.FnProto.Components) full.FnProto {
+fn fullFnProto(tree: Ast, info: full.FnProto.Components) full.FnProto {
const token_tags = tree.tokens.items(.tag);
var result: full.FnProto = .{
.ast = info,
@@ -1909,7 +1909,7 @@ fn fullFnProto(tree: Tree, info: full.FnProto.Components) full.FnProto {
return result;
}
-fn fullStructInit(tree: Tree, info: full.StructInit.Components) full.StructInit {
+fn fullStructInit(tree: Ast, info: full.StructInit.Components) full.StructInit {
_ = tree;
var result: full.StructInit = .{
.ast = info,
@@ -1917,7 +1917,7 @@ fn fullStructInit(tree: Tree, info: full.StructInit.Components) full.StructInit
return result;
}
-fn fullPtrType(tree: Tree, info: full.PtrType.Components) full.PtrType {
+fn fullPtrType(tree: Ast, info: full.PtrType.Components) full.PtrType {
const token_tags = tree.tokens.items(.tag);
// TODO: looks like stage1 isn't quite smart enough to handle enum
// literals in some places here
@@ -1966,7 +1966,7 @@ fn fullPtrType(tree: Tree, info: full.PtrType.Components) full.PtrType {
return result;
}
-fn fullContainerDecl(tree: Tree, info: full.ContainerDecl.Components) full.ContainerDecl {
+fn fullContainerDecl(tree: Ast, info: full.ContainerDecl.Components) full.ContainerDecl {
const token_tags = tree.tokens.items(.tag);
var result: full.ContainerDecl = .{
.ast = info,
@@ -1979,7 +1979,7 @@ fn fullContainerDecl(tree: Tree, info: full.ContainerDecl.Components) full.Conta
return result;
}
-fn fullSwitchCase(tree: Tree, info: full.SwitchCase.Components) full.SwitchCase {
+fn fullSwitchCase(tree: Ast, info: full.SwitchCase.Components) full.SwitchCase {
const token_tags = tree.tokens.items(.tag);
var result: full.SwitchCase = .{
.ast = info,
@@ -1991,7 +1991,7 @@ fn fullSwitchCase(tree: Tree, info: full.SwitchCase.Components) full.SwitchCase
return result;
}
-fn fullAsm(tree: Tree, info: full.Asm.Components) full.Asm {
+fn fullAsm(tree: Ast, info: full.Asm.Components) full.Asm {
const token_tags = tree.tokens.items(.tag);
const node_tags = tree.nodes.items(.tag);
var result: full.Asm = .{
@@ -2054,7 +2054,7 @@ fn fullAsm(tree: Tree, info: full.Asm.Components) full.Asm {
return result;
}
-fn fullWhile(tree: Tree, info: full.While.Components) full.While {
+fn fullWhile(tree: Ast, info: full.While.Components) full.While {
const token_tags = tree.tokens.items(.tag);
var result: full.While = .{
.ast = info,
@@ -2089,7 +2089,7 @@ fn fullWhile(tree: Tree, info: full.While.Components) full.While {
return result;
}
-fn fullCall(tree: Tree, info: full.Call.Components) full.Call {
+fn fullCall(tree: Ast, info: full.Call.Components) full.Call {
const token_tags = tree.tokens.items(.tag);
var result: full.Call = .{
.ast = info,
@@ -2201,7 +2201,7 @@ pub const full = struct {
/// in the params slice, since they are simple identifiers and
/// not sub-expressions.
pub const Iterator = struct {
- tree: *const Tree,
+ tree: *const Ast,
fn_proto: *const FnProto,
param_i: usize,
tok_i: TokenIndex,
@@ -2291,7 +2291,7 @@ pub const full = struct {
}
};
- pub fn iterate(fn_proto: FnProto, tree: Tree) Iterator {
+ pub fn iterate(fn_proto: FnProto, tree: Ast) Iterator {
return .{
.tree = &tree,
.fn_proto = &fn_proto,
@@ -2485,7 +2485,7 @@ pub const Node = struct {
}
/// Note: The FooComma/FooSemicolon variants exist to ease the implementation of
- /// Tree.lastToken()
+ /// Ast.lastToken()
pub const Tag = enum {
/// sub_list[lhs...rhs]
root,
diff --git a/lib/std/zig/system/darwin.zig b/lib/std/zig/system/darwin.zig
index c20607440d..52abddc06a 100644
--- a/lib/std/zig/system/darwin.zig
+++ b/lib/std/zig/system/darwin.zig
@@ -88,5 +88,5 @@ pub const DarwinSDK = struct {
};
test "" {
- _ = @import("darwin/macos.zig");
+ _ = macos;
}
diff --git a/src/Air.zig b/src/Air.zig
index 08e11716cc..2e910f9c9a 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -501,6 +501,10 @@ pub const Inst = struct {
/// Uses the `un_op` field.
tag_name,
+ /// Given an error value, return the error name. Result type is always `[:0] const u8`.
+ /// Uses the `un_op` field.
+ error_name,
+
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
return switch (op) {
.lt => .cmp_lt,
@@ -816,7 +820,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.bool_to_int => return Type.initTag(.u1),
- .tag_name => return Type.initTag(.const_slice_u8_sentinel_0),
+ .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0),
.call => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand);
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 7ff3d75682..87cc07fae8 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6918,12 +6918,25 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
+ .src => {
+ const token_starts = tree.tokens.items(.start);
+ const node_start = token_starts[tree.firstToken(node)];
+
+ astgen.advanceSourceCursor(tree.source, node_start);
+
+ const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.LineColumn{
+ .line = @intCast(u32, astgen.source_line),
+ .column = @intCast(u32, astgen.source_column),
+ });
+
+ return rvalue(gz, rl, result, node);
+ },
+
.breakpoint => return simpleNoOpVoid(gz, rl, node, .breakpoint),
// zig fmt: off
.This => return rvalue(gz, rl, try gz.addNodeExtended(.this, node), node),
.return_address => return rvalue(gz, rl, try gz.addNodeExtended(.ret_addr, node), node),
- .src => return rvalue(gz, rl, try gz.addNodeExtended(.builtin_src, node), node),
.error_return_trace => return rvalue(gz, rl, try gz.addNodeExtended(.error_return_trace, node), node),
.frame => return rvalue(gz, rl, try gz.addNodeExtended(.frame, node), node),
.frame_address => return rvalue(gz, rl, try gz.addNodeExtended(.frame_address, node), node),
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 7d648d4649..4ad4d3edfa 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -34,6 +34,7 @@ const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig");
const libtsan = @import("libtsan.zig");
const Zir = @import("Zir.zig");
+const Color = @import("main.zig").Color;
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
@@ -148,7 +149,7 @@ owned_link_dir: ?std.fs.Dir,
/// This is for stage1 and should be deleted upon completion of self-hosting.
/// Don't use this for anything other than stage1 compatibility.
-color: @import("main.zig").Color = .auto,
+color: Color = .auto,
/// This mutex guards all `Compilation` mutable state.
mutex: std.Thread.Mutex = .{},
@@ -794,7 +795,7 @@ pub const InitOptions = struct {
machine_code_model: std.builtin.CodeModel = .default,
clang_preprocessor_mode: ClangPreprocessorMode = .no,
/// This is for stage1 and should be deleted upon completion of self-hosting.
- color: @import("main.zig").Color = .auto,
+ color: Color = .auto,
test_filter: ?[]const u8 = null,
test_name_prefix: ?[]const u8 = null,
subsystem: ?std.Target.SubSystem = null,
diff --git a/src/Liveness.zig b/src/Liveness.zig
index f3c194123d..39de37d4b8 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -334,6 +334,7 @@ fn analyzeInst(
.ret,
.ret_load,
.tag_name,
+ .error_name,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
diff --git a/src/Module.zig b/src/Module.zig
index 0cbf75c735..e93fe2549c 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -831,6 +831,10 @@ pub const Struct = struct {
have_field_types,
layout_wip,
have_layout,
+ fully_resolved_wip,
+ // The types and all its fields have had their layout resolved. Even through pointer,
+ // which `have_layout` does not ensure.
+ fully_resolved,
},
/// If true, definitely nonzero size at runtime. If false, resolving the fields
/// is necessary to determine whether it has bits at runtime.
@@ -889,6 +893,22 @@ pub const Struct = struct {
.have_field_types,
.layout_wip,
.have_layout,
+ .fully_resolved_wip,
+ .fully_resolved,
+ => true,
+ };
+ }
+
+ pub fn haveLayout(s: Struct) bool {
+ return switch (s.status) {
+ .none,
+ .field_types_wip,
+ .have_field_types,
+ .layout_wip,
+ => false,
+ .have_layout,
+ .fully_resolved_wip,
+ .fully_resolved,
=> true,
};
}
@@ -1003,6 +1023,10 @@ pub const Union = struct {
have_field_types,
layout_wip,
have_layout,
+ fully_resolved_wip,
+ // The types and all its fields have had their layout resolved. Even through pointer,
+ // which `have_layout` does not ensure.
+ fully_resolved,
},
pub const Field = struct {
@@ -1033,6 +1057,8 @@ pub const Union = struct {
.have_field_types,
.layout_wip,
.have_layout,
+ .fully_resolved_wip,
+ .fully_resolved,
=> true,
};
}
@@ -1102,8 +1128,22 @@ pub const Union = struct {
tag_size: u64,
};
+ pub fn haveLayout(u: Union) bool {
+ return switch (u.status) {
+ .none,
+ .field_types_wip,
+ .have_field_types,
+ .layout_wip,
+ => false,
+ .have_layout,
+ .fully_resolved_wip,
+ .fully_resolved,
+ => true,
+ };
+ }
+
pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout {
- assert(u.status == .have_layout);
+ assert(u.haveLayout());
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
@@ -1623,6 +1663,11 @@ pub const File = struct {
return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path});
}
+ /// Returns the full path to this file relative to its package.
+ pub fn fullPathZ(file: File, ally: Allocator) ![:0]u8 {
+ return file.pkg.root_src_directory.joinZ(ally, &[_][]const u8{file.sub_file_path});
+ }
+
pub fn dumpSrc(file: *File, src: LazySrcLoc) void {
const loc = std.zig.findLineColumn(file.source.bytes, src);
std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 });
@@ -4397,6 +4442,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
const arg = try sema.addConstant(param_type, opv);
sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
total_param_index += 1;
+ runtime_param_index += 1;
continue;
}
const ty_ref = try sema.addType(param_type);
diff --git a/src/Sema.zig b/src/Sema.zig
index 80231ac3cb..7dd9b3497d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -4503,14 +4503,14 @@ fn analyzeCall(
const arg_src = call_src; // TODO: better source location
if (i < fn_params_len) {
const param_ty = func_ty.fnParamType(i);
- try sema.resolveTypeForCodegen(block, arg_src, param_ty);
+ try sema.resolveTypeFully(block, arg_src, param_ty);
args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
} else {
args[i] = uncasted_arg;
}
}
- try sema.resolveTypeForCodegen(block, call_src, func_ty_info.return_type);
+ try sema.resolveTypeFully(block, call_src, func_ty_info.return_type);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
args.len);
@@ -4580,7 +4580,7 @@ fn finishGenericCall(
const param_ty = new_fn_ty.fnParamType(runtime_i);
const arg_src = call_src; // TODO: better source location
const uncasted_arg = uncasted_args[total_i];
- try sema.resolveTypeForCodegen(block, arg_src, param_ty);
+ try sema.resolveTypeFully(block, arg_src, param_ty);
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
runtime_args[runtime_i] = casted_arg;
runtime_i += 1;
@@ -4588,7 +4588,7 @@ fn finishGenericCall(
total_i += 1;
}
- try sema.resolveTypeForCodegen(block, call_src, new_fn_ty.fnReturnType());
+ try sema.resolveTypeFully(block, call_src, new_fn_ty.fnReturnType());
}
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
@@ -4826,26 +4826,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
return Air.Inst.Ref.anyerror_type;
}
// Resolve both error sets now.
- const lhs_names = switch (lhs_ty.tag()) {
- .error_set_single => blk: {
- // Work around coercion problems
- const tmp: *const [1][]const u8 = &lhs_ty.castTag(.error_set_single).?.data;
- break :blk tmp;
- },
- .error_set_merged => lhs_ty.castTag(.error_set_merged).?.data.keys(),
- .error_set => lhs_ty.castTag(.error_set).?.data.names.keys(),
- else => unreachable,
- };
-
- const rhs_names = switch (rhs_ty.tag()) {
- .error_set_single => blk: {
- const tmp: *const [1][]const u8 = &rhs_ty.castTag(.error_set_single).?.data;
- break :blk tmp;
- },
- .error_set_merged => rhs_ty.castTag(.error_set_merged).?.data.keys(),
- .error_set => rhs_ty.castTag(.error_set).?.data.names.keys(),
- else => unreachable,
- };
+ const lhs_names = lhs_ty.errorSetNames();
+ const rhs_names = rhs_ty.errorSetNames();
// TODO do we really want to create a Decl for this?
// The reason we do it right now is for memory management.
@@ -6080,6 +6062,8 @@ fn zirSwitchCond(
}
}
+const SwitchErrorSet = std.StringHashMap(Module.SwitchProngSrc);
+
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -6250,8 +6234,110 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
},
}
},
+ .ErrorSet => {
+ var seen_errors = SwitchErrorSet.init(gpa);
+ defer seen_errors.deinit();
+
+ var extra_index: usize = special.end;
+ {
+ var scalar_i: u32 = 0;
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ extra_index += body_len;
- .ErrorSet => return sema.fail(block, src, "TODO validate switch .ErrorSet", .{}),
+ try sema.validateSwitchItemError(
+ block,
+ &seen_errors,
+ item_ref,
+ src_node_offset,
+ .{ .scalar = scalar_i },
+ );
+ }
+ }
+ {
+ var multi_i: u32 = 0;
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
+ const items_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const ranges_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const body_len = sema.code.extra[extra_index];
+ extra_index += 1;
+ const items = sema.code.refSlice(extra_index, items_len);
+ extra_index += items_len + body_len;
+
+ for (items) |item_ref, item_i| {
+ try sema.validateSwitchItemError(
+ block,
+ &seen_errors,
+ item_ref,
+ src_node_offset,
+ .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+ );
+ }
+
+ try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
+ }
+ }
+
+ if (operand_ty.isAnyError()) {
+ if (special_prong != .@"else") {
+ return sema.fail(
+ block,
+ src,
+ "switch must handle all possibilities",
+ .{},
+ );
+ }
+ } else {
+ var maybe_msg: ?*Module.ErrorMsg = null;
+ errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
+
+ for (operand_ty.errorSetNames()) |error_name| {
+ if (!seen_errors.contains(error_name) and special_prong != .@"else") {
+ const msg = maybe_msg orelse blk: {
+ maybe_msg = try sema.errMsg(
+ block,
+ src,
+ "switch must handle all possibilities",
+ .{},
+ );
+ break :blk maybe_msg.?;
+ };
+
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "unhandled error value: error.{s}",
+ .{error_name},
+ );
+ }
+ }
+
+ if (maybe_msg) |msg| {
+ try sema.mod.errNoteNonLazy(
+ operand_ty.declSrcLoc(),
+ msg,
+ "error set '{}' declared here",
+ .{operand_ty},
+ );
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ if (special_prong == .@"else") {
+ return sema.fail(
+ block,
+ special_prong_src,
+ "unreachable else prong; all cases already handled",
+ .{},
+ );
+ }
+ }
+ },
.Union => return sema.fail(block, src, "TODO validate switch .Union", .{}),
.Int, .ComptimeInt => {
var range_set = RangeSet.init(gpa);
@@ -6924,6 +7010,24 @@ fn validateSwitchItemEnum(
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
+fn validateSwitchItemError(
+ sema: *Sema,
+ block: *Block,
+ seen_errors: *SwitchErrorSet,
+ item_ref: Zir.Inst.Ref,
+ src_node_offset: i32,
+ switch_prong_src: Module.SwitchProngSrc,
+) CompileError!void {
+ const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ // TODO: Do i need to typecheck here?
+ const error_name = item_tv.val.castTag(.@"error").?.data.name;
+ const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
+ prev.value
+ else
+ null;
+ return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
+}
+
fn validateSwitchDupe(
sema: *Sema,
block: *Block,
@@ -7214,8 +7318,8 @@ fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
- if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
- if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
+ if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
+ if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
const lhs_ty = sema.typeOf(lhs);
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(lhs_ty);
@@ -7227,6 +7331,12 @@ fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const val = try lhs_val.shr(rhs_val, sema.arena);
return sema.addConstant(lhs_ty, val);
}
+ // Even if lhs is not comptime known, we can still deduce certain things based
+ // on rhs.
+ // If rhs is 0, return lhs without doing any calculations.
+ else if (rhs_val.compareWithZero(.eq)) {
+ return lhs;
+ }
}
try sema.requireRuntimeBlock(block, src);
@@ -9072,8 +9182,50 @@ fn zirBuiltinSrc(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
- return sema.fail(block, src, "TODO: implement Sema.zirBuiltinSrc", .{});
+ const extra = sema.code.extraData(Zir.Inst.LineColumn, extended.operand).data;
+ const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
+
+ const func_name_val = blk: {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ const name = std.mem.span(func.owner_decl.name);
+ const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]);
+ const new_decl = try anon_decl.finish(
+ try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1),
+ try Value.Tag.bytes.create(anon_decl.arena(), bytes),
+ );
+ break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
+ };
+
+ const file_name_val = blk: {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ const name = try func.owner_decl.getFileScope().fullPathZ(anon_decl.arena());
+ const new_decl = try anon_decl.finish(
+ try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len),
+ try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]),
+ );
+ break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
+ };
+
+ const field_values = try sema.arena.alloc(Value, 4);
+ // file: [:0]const u8,
+ field_values[0] = file_name_val;
+ // fn_name: [:0]const u8,
+ field_values[1] = func_name_val;
+ // line: u32
+ field_values[2] = try Value.Tag.int_u64.create(sema.arena, extra.line + 1);
+ // column: u32,
+ field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1);
+
+ return sema.addConstant(
+ try sema.getBuiltinType(block, src, "SourceLocation"),
+ try Value.Tag.@"struct".create(sema.arena, field_values),
+ );
}
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -10368,7 +10520,18 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirErrorName", .{});
+ _ = src;
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
+ const bytes = val.castTag(.@"error").?.data.name;
+ return sema.addStrLit(block, bytes);
+ }
+
+ // Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass
+ // might be able to resolve the result at compile time.
+ return block.addUnOp(.error_name, operand);
}
fn zirUnaryMath(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -10883,15 +11046,63 @@ fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirBitOffsetOf", .{});
+ const offset = try bitOffsetOf(sema, block, inst);
+ return sema.addIntUnsigned(Type.comptime_int, offset);
}
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const offset = try bitOffsetOf(sema, block, inst);
+ return sema.addIntUnsigned(Type.comptime_int, offset / 8);
+}
+
+fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirOffsetOf", .{});
+ sema.src = .{ .node_offset_bin_op = inst_data.src_node };
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+
+ const ty = try sema.resolveType(block, lhs_src, extra.lhs);
+ const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs);
+
+ try sema.resolveTypeLayout(block, lhs_src, ty);
+ if (ty.tag() != .@"struct") {
+ return sema.fail(
+ block,
+ lhs_src,
+ "expected struct type, found '{}'",
+ .{ty},
+ );
+ }
+
+ const index = ty.structFields().getIndex(field_name) orelse {
+ return sema.fail(
+ block,
+ rhs_src,
+ "struct '{}' has no field '{s}'",
+ .{ ty, field_name },
+ );
+ };
+
+ const target = sema.mod.getTarget();
+ const layout = ty.containerLayout();
+ if (layout == .Packed) {
+ var it = ty.iteratePackedStructOffsets(target);
+ while (it.next()) |field_offset| {
+ if (field_offset.field == index) {
+ return (field_offset.offset * 8) + field_offset.running_bits;
+ }
+ }
+ } else {
+ var it = ty.iterateStructOffsets(target);
+ while (it.next()) |field_offset| {
+ if (field_offset.field == index) {
+ return field_offset.offset * 8;
+ }
+ }
+ }
+
+ unreachable;
}
/// Returns `true` if the type was a comptime_int.
@@ -15118,7 +15329,7 @@ fn resolveStructLayout(
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
- .have_layout => return,
+ .have_layout, .fully_resolved_wip, .fully_resolved => return,
}
struct_obj.status = .layout_wip;
for (struct_obj.fields.values()) |field| {
@@ -15140,7 +15351,7 @@ fn resolveUnionLayout(
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "union {} depends on itself", .{ty});
},
- .have_layout => return,
+ .have_layout, .fully_resolved_wip, .fully_resolved => return,
}
union_obj.status = .layout_wip;
for (union_obj.fields.values()) |field| {
@@ -15149,7 +15360,7 @@ fn resolveUnionLayout(
union_obj.status = .have_layout;
}
-fn resolveTypeForCodegen(
+fn resolveTypeFully(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
@@ -15158,20 +15369,67 @@ fn resolveTypeForCodegen(
switch (ty.zigTypeTag()) {
.Pointer => {
const child_ty = try sema.resolveTypeFields(block, src, ty.childType());
- return resolveTypeForCodegen(sema, block, src, child_ty);
+ return resolveTypeFully(sema, block, src, child_ty);
},
- .Struct => return resolveStructLayout(sema, block, src, ty),
- .Union => return resolveUnionLayout(sema, block, src, ty),
- .Array => return resolveTypeForCodegen(sema, block, src, ty.childType()),
+ .Struct => return resolveStructFully(sema, block, src, ty),
+ .Union => return resolveUnionFully(sema, block, src, ty),
+ .Array => return resolveTypeFully(sema, block, src, ty.childType()),
.Optional => {
var buf: Type.Payload.ElemType = undefined;
- return resolveTypeForCodegen(sema, block, src, ty.optionalChild(&buf));
+ return resolveTypeFully(sema, block, src, ty.optionalChild(&buf));
},
- .ErrorUnion => return resolveTypeForCodegen(sema, block, src, ty.errorUnionPayload()),
+ .ErrorUnion => return resolveTypeFully(sema, block, src, ty.errorUnionPayload()),
else => {},
}
}
+fn resolveStructFully(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ty: Type,
+) CompileError!void {
+ try resolveStructLayout(sema, block, src, ty);
+
+ const resolved_ty = try sema.resolveTypeFields(block, src, ty);
+ const struct_obj = resolved_ty.castTag(.@"struct").?.data;
+ switch (struct_obj.status) {
+ .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
+ .fully_resolved_wip, .fully_resolved => return,
+ }
+
+ // After we have resolve struct layout we have to go over the fields again to
+ // make sure pointer fields get their child types resolved as well
+ struct_obj.status = .fully_resolved_wip;
+ for (struct_obj.fields.values()) |field| {
+ try sema.resolveTypeFully(block, src, field.ty);
+ }
+ struct_obj.status = .fully_resolved;
+}
+
+fn resolveUnionFully(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ty: Type,
+) CompileError!void {
+ try resolveUnionLayout(sema, block, src, ty);
+
+ const resolved_ty = try sema.resolveTypeFields(block, src, ty);
+ const union_obj = resolved_ty.cast(Type.Payload.Union).?.data;
+ switch (union_obj.status) {
+ .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
+ .fully_resolved_wip, .fully_resolved => return,
+ }
+
+ // Same goes for unions (see comment about structs)
+ union_obj.status = .fully_resolved_wip;
+ for (union_obj.fields.values()) |field| {
+ try sema.resolveTypeFully(block, src, field.ty);
+ }
+ union_obj.status = .fully_resolved;
+}
+
fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type {
switch (ty.tag()) {
.@"struct" => {
@@ -15181,7 +15439,12 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp
.field_types_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
- .have_field_types, .have_layout, .layout_wip => return ty,
+ .have_field_types,
+ .have_layout,
+ .layout_wip,
+ .fully_resolved_wip,
+ .fully_resolved,
+ => return ty,
}
struct_obj.status = .field_types_wip;
@@ -15214,7 +15477,12 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp
.field_types_wip => {
return sema.fail(block, src, "union {} depends on itself", .{ty});
},
- .have_field_types, .have_layout, .layout_wip => return ty,
+ .have_field_types,
+ .have_layout,
+ .layout_wip,
+ .fully_resolved_wip,
+ .fully_resolved,
+ => return ty,
}
union_obj.status = .field_types_wip;
diff --git a/src/Zir.zig b/src/Zir.zig
index a7d813cfad..68c1b9df48 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -1510,7 +1510,7 @@ pub const Inst = struct {
/// `operand` is `src_node: i32`.
ret_addr,
/// Implements the `@src` builtin.
- /// `operand` is `src_node: i32`.
+ /// `operand` is payload index to `ColumnLine`.
builtin_src,
/// Implements the `@errorReturnTrace` builtin.
/// `operand` is `src_node: i32`.
@@ -2160,10 +2160,7 @@ pub const Inst = struct {
switch_inst: Index,
prong_index: u32,
},
- dbg_stmt: struct {
- line: u32,
- column: u32,
- },
+ dbg_stmt: LineColumn,
/// Used for unary operators which reference an inst,
/// with an AST node source location.
inst_node: struct {
@@ -2964,6 +2961,11 @@ pub const Inst = struct {
token: Ast.TokenIndex,
};
};
+
+ pub const LineColumn = struct {
+ line: u32,
+ column: u32,
+ };
};
pub const SpecialProng = enum { none, @"else", under };
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 1a7105da31..1e69f7db5a 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -592,6 +592,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
.tag_name => try self.airTagName(inst),
+ .error_name => try self.airErrorName(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -2103,17 +2104,16 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// block results.
.mcv = MCValue{ .none = {} },
});
- const block_data = self.blocks.getPtr(inst).?;
- defer block_data.relocs.deinit(self.gpa);
+ defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
+ for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
- const result = @bitCast(MCValue, block_data.mcv);
+ const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -2557,6 +2557,16 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
+fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
+ _ = operand;
+ return self.fail("TODO implement airErrorName for aarch64", .{});
+ };
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index b24ec3fa9b..3501a597f9 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -338,7 +338,6 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index {
return try self.addInst(.{
.tag = .nop,
- .cond = .al,
.data = .{ .nop = {} },
});
}
@@ -371,7 +370,6 @@ fn gen(self: *Self) !void {
// mov fp, sp
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = .fp,
.rn = .r0,
@@ -405,7 +403,6 @@ fn gen(self: *Self) !void {
self.mir_instructions.set(push_reloc, .{
.tag = .push,
- .cond = .al,
.data = .{ .register_list = saved_regs },
});
@@ -416,7 +413,6 @@ fn gen(self: *Self) !void {
if (Instruction.Operand.fromU32(stack_size)) |op| {
self.mir_instructions.set(sub_reloc, .{
.tag = .sub,
- .cond = .al,
.data = .{ .rr_op = .{ .rd = .sp, .rn = .sp, .op = op } },
});
} else {
@@ -440,7 +436,6 @@ fn gen(self: *Self) !void {
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
- .cond = .al,
.data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
});
}
@@ -452,7 +447,6 @@ fn gen(self: *Self) !void {
// mov sp, fp
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = .sp,
.rn = .r0,
@@ -463,7 +457,6 @@ fn gen(self: *Self) !void {
// pop {fp, pc}
_ = try self.addInst(.{
.tag = .pop,
- .cond = .al,
.data = .{ .register_list = saved_regs },
});
} else {
@@ -590,6 +583,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
.tag_name => try self.airTagName(inst),
+ .error_name => try self.airErrorName(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -1250,7 +1244,6 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = dst_reg,
.rn = base_mcv.register,
@@ -1261,7 +1254,6 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
2 => {
_ = try self.addInst(.{
.tag = .ldrh,
- .cond = .al,
.data = .{ .rr_extra_offset = .{
.rt = dst_reg,
.rn = base_mcv.register,
@@ -1405,7 +1397,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.register => |dst_reg| {
_ = try self.addInst(.{
.tag = .ldr,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = dst_reg,
.rn = reg,
@@ -1429,7 +1420,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
const tmp_regs = try self.register_manager.allocRegs(2, .{ null, null }, &.{reg});
_ = try self.addInst(.{
.tag = .ldr,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = tmp_regs[0],
.rn = reg,
@@ -1438,7 +1428,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
});
_ = try self.addInst(.{
.tag = .ldr,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = tmp_regs[1],
.rn = reg,
@@ -1464,7 +1453,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
};
_ = try self.addInst(.{
.tag = .sub,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = dst_reg,
.rn = .fp,
@@ -1478,7 +1466,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
};
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = len_reg,
.rn = .r0,
@@ -1559,7 +1546,6 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.register => |value_reg| {
_ = try self.addInst(.{
.tag = .str,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = value_reg,
.rn = addr_reg,
@@ -1865,7 +1851,6 @@ fn genArmBinOpCode(
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = dst_reg,
.rn = op1,
@@ -1878,7 +1863,6 @@ fn genArmBinOpCode(
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = dst_reg,
.rn = op1,
@@ -1889,7 +1873,6 @@ fn genArmBinOpCode(
.cmp_eq => {
_ = try self.addInst(.{
.tag = .cmp,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = .r0,
.rn = op1,
@@ -1915,7 +1898,6 @@ fn genArmBinOpCode(
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_shift = .{
.rd = dst_reg,
.rm = op1,
@@ -1992,7 +1974,6 @@ fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Ai
_ = try self.addInst(.{
.tag = .mul,
- .cond = .al,
.data = .{ .rrr = .{
.rd = dst_mcv.register,
.rn = lhs_mcv.register,
@@ -2044,7 +2025,6 @@ fn genArmMulConstant(self: *Self, inst: Air.Inst.Index, op: Air.Inst.Ref, op_ind
_ = try self.addInst(.{
.tag = .mul,
- .cond = .al,
.data = .{ .rrr = .{
.rd = dst_mcv.register,
.rn = lhs_mcv.register,
@@ -2065,7 +2045,6 @@ fn genArmInlineMemcpy(
// mov count, #0
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = count,
.rn = .r0,
@@ -2077,7 +2056,6 @@ fn genArmInlineMemcpy(
// cmp count, len
_ = try self.addInst(.{
.tag = .cmp,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = .r0,
.rn = count,
@@ -2095,7 +2073,6 @@ fn genArmInlineMemcpy(
// ldrb tmp, [src, count]
_ = try self.addInst(.{
.tag = .ldrb,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = src,
@@ -2106,7 +2083,6 @@ fn genArmInlineMemcpy(
// strb tmp, [src, count]
_ = try self.addInst(.{
.tag = .strb,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = dst,
@@ -2117,7 +2093,6 @@ fn genArmInlineMemcpy(
// add count, count, #1
_ = try self.addInst(.{
.tag = .add,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = count,
.rn = count,
@@ -2128,7 +2103,6 @@ fn genArmInlineMemcpy(
// b loop
_ = try self.addInst(.{
.tag = .b,
- .cond = .al,
.data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
});
@@ -2235,7 +2209,6 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .bkpt,
- .cond = .al,
.data = .{ .imm16 = 0 },
});
return self.finishAirBookkeeping();
@@ -2347,14 +2320,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
_ = try self.addInst(.{
.tag = .blx,
- .cond = .al,
.data = .{ .reg = .lr },
});
} else {
return self.fail("TODO fix blx emulation for ARM <v5", .{});
// _ = try self.addInst(.{
// .tag = .mov,
- // .cond = .al,
// .data = .{ .rr_op = .{
// .rd = .lr,
// .rn = .r0,
@@ -2363,7 +2334,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
// });
// _ = try self.addInst(.{
// .tag = .bx,
- // .cond = .al,
// .data = .{ .reg = .lr },
// });
}
@@ -2534,6 +2504,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
break :blk condition.negate();
},
.register => |reg| blk: {
+ try self.spillCompareFlagsIfOccupied();
+
// cmp reg, 1
// bne ...
_ = try self.addInst(.{
@@ -2548,6 +2520,26 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
break :blk .ne;
},
+ .stack_offset,
+ .memory,
+ => blk: {
+ try self.spillCompareFlagsIfOccupied();
+
+ const reg = try self.copyToTmpRegister(Type.initTag(.bool), cond);
+
+ // cmp reg, 1
+ // bne ...
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .rr_op = .{
+ .rd = .r0,
+ .rn = reg,
+ .op = Instruction.Operand.imm(1, 0),
+ } },
+ });
+
+ break :blk .ne;
+ },
else => return self.fail("TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }),
};
@@ -2888,7 +2880,6 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
fn jump(self: *Self, inst: Mir.Inst.Index) !void {
_ = try self.addInst(.{
.tag = .b,
- .cond = .al,
.data = .{ .inst = inst },
});
}
@@ -2904,17 +2895,16 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// block results.
.mcv = MCValue{ .none = {} },
});
- const block_data = self.blocks.getPtr(inst).?;
- defer block_data.relocs.deinit(self.gpa);
+ defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
+ for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
- const result = @bitCast(MCValue, block_data.mcv);
+ const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -2958,7 +2948,16 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
- block_data.mcv = operand_mcv;
+ block_data.mcv = switch (operand_mcv) {
+ .none, .dead, .unreach => unreachable,
+ .register, .stack_offset, .memory => operand_mcv,
+ .immediate => blk: {
+ const new_mcv = try self.allocRegOrMem(block, true);
+ try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
+ break :blk new_mcv;
+ },
+ else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}),
+ };
} else {
try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv);
}
@@ -2972,7 +2971,6 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.append(self.gpa, try self.addInst(.{
.tag = .b,
- .cond = .al,
.data = .{ .inst = undefined }, // populated later through performReloc
}));
}
@@ -3028,7 +3026,6 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
if (mem.eql(u8, asm_source, "svc #0")) {
_ = try self.addInst(.{
.tag = .svc,
- .cond = .al,
.data = .{ .imm24 = 0 },
});
} else {
@@ -3134,7 +3131,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = .fp,
@@ -3152,7 +3148,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
_ = try self.addInst(.{
.tag = .strh,
- .cond = .al,
.data = .{ .rr_extra_offset = .{
.rt = reg,
.rn = .fp,
@@ -3199,7 +3194,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
};
_ = try self.addInst(.{
.tag = .sub,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = src_reg,
.rn = .fp,
@@ -3214,7 +3208,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
};
_ = try self.addInst(.{
.tag = .sub,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = dst_reg,
.rn = .fp,
@@ -3229,7 +3222,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
};
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = len_reg,
.rn = .r0,
@@ -3271,7 +3263,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// mov reg, 0
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3296,7 +3287,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (Instruction.Operand.fromU32(@intCast(u32, x))) |op| {
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3306,7 +3296,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} else if (Instruction.Operand.fromU32(~@intCast(u32, x))) |op| {
_ = try self.addInst(.{
.tag = .mvn,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3317,7 +3306,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v7)) {
_ = try self.addInst(.{
.tag = .movw,
- .cond = .al,
.data = .{ .r_imm16 = .{
.rd = reg,
.imm16 = @intCast(u16, x),
@@ -3326,7 +3314,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} else {
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3335,7 +3322,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
_ = try self.addInst(.{
.tag = .orr,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
@@ -3352,7 +3338,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// movt reg, #0xaaaa
_ = try self.addInst(.{
.tag = .movw,
- .cond = .al,
.data = .{ .r_imm16 = .{
.rd = reg,
.imm16 = @truncate(u16, x),
@@ -3360,7 +3345,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
_ = try self.addInst(.{
.tag = .movt,
- .cond = .al,
.data = .{ .r_imm16 = .{
.rd = reg,
.imm16 = @truncate(u16, x >> 16),
@@ -3374,7 +3358,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// orr reg, reg, #0xdd, 8
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3383,7 +3366,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
_ = try self.addInst(.{
.tag = .orr,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
@@ -3392,7 +3374,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
_ = try self.addInst(.{
.tag = .orr,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
@@ -3401,7 +3382,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
_ = try self.addInst(.{
.tag = .orr,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = reg,
@@ -3419,7 +3399,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// mov reg, src_reg
_ = try self.addInst(.{
.tag = .mov,
- .cond = .al,
.data = .{ .rr_op = .{
.rd = reg,
.rn = .r0,
@@ -3433,7 +3412,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
_ = try self.addInst(.{
.tag = .ldr,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = reg,
@@ -3460,7 +3438,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = .fp,
@@ -3478,7 +3455,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
_ = try self.addInst(.{
.tag = .ldrh,
- .cond = .al,
.data = .{ .rr_extra_offset = .{
.rt = reg,
.rn = .fp,
@@ -3506,7 +3482,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .r_stack_offset = .{
.rt = reg,
.stack_offset = @intCast(u32, adj_off),
@@ -3550,7 +3525,6 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
_ = try self.addInst(.{
.tag = tag,
- .cond = .al,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = .sp,
@@ -3565,7 +3539,6 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
_ = try self.addInst(.{
.tag = .strh,
- .cond = .al,
.data = .{ .rr_extra_offset = .{
.rt = reg,
.rn = .sp,
@@ -3682,6 +3655,16 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
+fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
+ _ = operand;
+ return self.fail("TODO implement airErrorName for arm", .{});
+ };
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 13aa1bbe59..b19186e003 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -20,7 +20,7 @@ extra: []const u32,
pub const Inst = struct {
tag: Tag,
- cond: bits.Condition,
+ cond: bits.Condition = .al,
/// The meaning of this depends on `tag`.
data: Data,
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index ddb87c4651..a8f2b69d90 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -571,6 +571,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
.tag_name => try self.airTagName(inst),
+ .error_name => try self.airErrorName(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -1719,17 +1720,16 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// block results.
.mcv = MCValue{ .none = {} },
});
- const block_data = self.blocks.getPtr(inst).?;
- defer block_data.relocs.deinit(self.gpa);
+ defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
+ for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
- const result = @bitCast(MCValue, block_data.mcv);
+ const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -2056,6 +2056,16 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
+fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
+ _ = operand;
+ return self.fail("TODO implement airErrorName for riscv64", .{});
+ };
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index fc93eb9dc8..ae9c8b37b7 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -597,7 +597,8 @@ fn resolveInst(self: Self, ref: Air.Inst.Ref) WValue {
};
const inst_type = self.air.typeOfIndex(inst_index);
- if (!inst_type.hasCodeGenBits()) return .none;
+ // It's allowed to have 0-bit integers
+ if (!inst_type.hasCodeGenBits() and !inst_type.isInt()) return WValue{ .none = {} };
if (self.air.instructions.items(.tag)[inst_index] == .constant) {
const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
@@ -689,7 +690,7 @@ fn typeToValtype(self: *Self, ty: Type) InnerError!wasm.Valtype {
const info = ty.intInfo(self.target);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 64) break :blk wasm.Valtype.i64;
- return self.fail("Integer bit size not supported by wasm: '{d}'", .{info.bits});
+ break :blk wasm.Valtype.i32; // represented as pointer to stack
},
.Enum => switch (ty.tag()) {
.enum_simple => wasm.Valtype.i32,
@@ -752,7 +753,7 @@ fn genFunctype(self: *Self, fn_ty: Type) !wasm.Type {
defer returns.deinit();
const return_type = fn_ty.fnReturnType();
- const want_sret = isByRef(return_type);
+ const want_sret = self.isByRef(return_type);
if (want_sret) {
try params.append(try self.typeToValtype(Type.usize));
@@ -900,22 +901,11 @@ fn genTypedValue(self: *Self, ty: Type, val: Value) InnerError!Result {
.Array => switch (val.tag()) {
.bytes => {
const payload = val.castTag(.bytes).?;
- if (ty.sentinel()) |sentinel| {
- try self.code.appendSlice(payload.data);
-
- switch (try self.genTypedValue(ty.childType(), sentinel)) {
- .appended => return Result.appended,
- .externally_managed => |data| {
- try self.code.appendSlice(data);
- return Result.appended;
- },
- }
- }
return Result{ .externally_managed = payload.data };
},
.array => {
const elem_vals = val.castTag(.array).?.data;
- const elem_ty = ty.elemType();
+ const elem_ty = ty.childType();
for (elem_vals) |elem_val| {
switch (try self.genTypedValue(elem_ty, elem_val)) {
.appended => {},
@@ -1094,7 +1084,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
const ret_ty = fn_ty.fnReturnType();
// Check if we store the result as a pointer to the stack rather than
// by value
- if (isByRef(ret_ty)) {
+ if (self.isByRef(ret_ty)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = self.local_index };
@@ -1219,7 +1209,7 @@ fn ptrSize(self: *const Self) u16 {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value.
-fn isByRef(ty: Type) bool {
+fn isByRef(self: Self, ty: Type) bool {
switch (ty.zigTypeTag()) {
.Type,
.ComptimeInt,
@@ -1234,7 +1224,6 @@ fn isByRef(ty: Type) bool {
.NoReturn,
.Void,
.Bool,
- .Int,
.Float,
.ErrorSet,
.Fn,
@@ -1248,6 +1237,7 @@ fn isByRef(ty: Type) bool {
.Frame,
.Union,
=> return ty.hasCodeGenBits(),
+ .Int => return if (ty.intInfo(self.target).bits > 64) true else false,
.ErrorUnion => {
const has_tag = ty.errorUnionSet().hasCodeGenBits();
const has_pl = ty.errorUnionPayload().hasCodeGenBits();
@@ -1269,10 +1259,15 @@ fn isByRef(ty: Type) bool {
/// Creates a new local for a pointer that points to memory with given offset.
/// This can be used to get a pointer to a struct field, error payload, etc.
-fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64) InnerError!WValue {
+/// By providing `modify` as action, it will modify the given `ptr_value` instead of making a new
+/// local value to store the pointer. This allows for local re-use and improves binary size.
+fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
// do not perform arithmetic when offset is 0.
if (offset == 0) return ptr_value;
- const result_ptr = try self.allocLocal(Type.usize);
+ const result_ptr: WValue = switch (action) {
+ .new => try self.allocLocal(Type.usize),
+ .modify => ptr_value,
+ };
try self.emitWValue(ptr_value);
switch (self.target.cpu.arch.ptrBitWidth()) {
32 => {
@@ -1289,6 +1284,16 @@ fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64) InnerError!WV
return result_ptr;
}
+/// Creates a new local and sets its value to the given `value` local.
+/// User must ensure `ty` matches that of given `value`.
+/// Asserts `value` is a `local`.
+fn copyLocal(self: *Self, value: WValue, ty: Type) InnerError!WValue {
+ const copy = try self.allocLocal(ty);
+ try self.addLabel(.local_get, value.local);
+ try self.addLabel(.local_set, copy.local);
+ return copy;
+}
+
fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
const air_tags = self.air.instructions.items(.tag);
return switch (air_tags[inst]) {
@@ -1303,6 +1308,8 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.bit_or => self.airBinOp(inst, .@"or"),
.bool_and => self.airBinOp(inst, .@"and"),
.bool_or => self.airBinOp(inst, .@"or"),
+ .shl => self.airBinOp(inst, .shl),
+ .shr => self.airBinOp(inst, .shr),
.xor => self.airBinOp(inst, .xor),
.cmp_eq => self.airCmp(inst, .eq),
@@ -1312,6 +1319,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.cmp_lt => self.airCmp(inst, .lt),
.cmp_neq => self.airCmp(inst, .neq),
+ .array_elem_val => self.airArrayElemVal(inst),
.array_to_slice => self.airArrayToSlice(inst),
.alloc => self.airAlloc(inst),
.arg => self.airArg(inst),
@@ -1325,6 +1333,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.constant => unreachable,
.dbg_stmt => WValue.none,
.intcast => self.airIntcast(inst),
+ .float_to_int => self.airFloatToInt(inst),
.is_err => self.airIsErr(inst, .i32_ne),
.is_non_err => self.airIsErr(inst, .i32_eq),
@@ -1401,17 +1410,16 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const child_type = self.air.typeOfIndex(inst).childType();
+ if (child_type.abiSize(self.target) == 0) return WValue{ .none = {} };
+
+ if (self.isByRef(child_type)) {
+ return self.return_value;
+ }
// Initialize the stack
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
-
- if (child_type.abiSize(self.target) == 0) return WValue{ .none = {} };
-
- if (isByRef(child_type)) {
- return self.return_value;
- }
return self.allocStack(child_type);
}
@@ -1421,7 +1429,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ret_ty = self.air.typeOf(un_op).childType();
if (!ret_ty.hasCodeGenBits()) return WValue.none;
- if (!isByRef(ret_ty)) {
+ if (!self.isByRef(ret_ty)) {
const result = try self.load(operand, ret_ty, 0);
try self.emitWValue(result);
}
@@ -1443,7 +1451,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType();
- const first_param_sret = isByRef(ret_ty);
+ const first_param_sret = self.isByRef(ret_ty);
const target: ?*Decl = blk: {
const func_val = self.air.value(pl_op.operand) orelse break :blk null;
@@ -1471,7 +1479,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// If we need to pass by reference, but the argument is a constant,
// we must first lower it before passing it.
- if (isByRef(arg_ty) and arg_val == .constant) {
+ if (self.isByRef(arg_ty) and arg_val == .constant) {
const arg_local = try self.allocStack(arg_ty);
try self.store(arg_local, arg_val, arg_ty, 0);
try self.emitWValue(arg_local);
@@ -1583,7 +1591,12 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
if (payload_ty.hasCodeGenBits()) {
const payload_local = try self.allocLocal(payload_ty);
try self.addLabel(.local_set, payload_local.local);
- try self.store(lhs, payload_local, payload_ty, payload_offset);
+ if (self.isByRef(payload_ty)) {
+ const ptr = try self.buildPointerOffset(lhs, payload_offset, .new);
+ try self.store(ptr, payload_local, payload_ty, 0);
+ } else {
+ try self.store(lhs, payload_local, payload_ty, payload_offset);
+ }
}
try self.addLabel(.local_set, tag_local.local);
@@ -1600,9 +1613,9 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
// Load values from `rhs` stack position and store in `lhs` instead
const tag_local = try self.load(rhs, tag_ty, 0);
if (payload_ty.hasCodeGenBits()) {
- if (isByRef(payload_ty)) {
- const payload_ptr = try self.buildPointerOffset(rhs, payload_offset);
- const lhs_payload_ptr = try self.buildPointerOffset(lhs, payload_offset);
+ if (self.isByRef(payload_ty)) {
+ const payload_ptr = try self.buildPointerOffset(rhs, payload_offset, .new);
+ const lhs_payload_ptr = try self.buildPointerOffset(lhs, payload_offset, .new);
try self.store(lhs_payload_ptr, payload_ptr, payload_ty, 0);
} else {
const payload_local = try self.load(rhs, payload_ty, payload_offset);
@@ -1632,13 +1645,14 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
else => unreachable,
}
},
- .Struct => {
- if (rhs == .constant) {
+ .Struct, .Array => {
+ const final_rhs = if (rhs == .constant) blk: {
+ const tmp = try self.allocLocal(Type.usize);
try self.emitWValue(rhs);
- try self.addLabel(.local_set, lhs.local);
- return;
- }
- return try self.memCopy(ty, lhs, rhs);
+ try self.addLabel(.local_set, tmp.local);
+ break :blk tmp;
+ } else rhs;
+ return try self.memCopy(ty, lhs, final_rhs);
},
.Pointer => {
if (ty.isSlice() and rhs == .constant) {
@@ -1649,10 +1663,20 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
const ptr_local = try self.allocLocal(Type.usize);
const len_offset = self.ptrSize();
if (val.castTag(.decl_ref)) |decl| {
- // for decl references we also need to retrieve the length and the original decl's pointer
- try self.addMemArg(.i32_load, .{ .offset = 0, .alignment = self.ptrSize() });
- try self.addLabel(.memory_address, decl.data.link.wasm.sym_index);
- try self.addMemArg(.i32_load, .{ .offset = len_offset, .alignment = self.ptrSize() });
+ const decl_ty: Type = decl.data.ty;
+ if (decl_ty.isSlice()) {
+ // for decl references we also need to retrieve the length and the original decl's pointer
+ try self.addMemArg(.i32_load, .{ .offset = 0, .alignment = self.ptrSize() });
+ try self.addLabel(.memory_address, decl.data.link.wasm.sym_index);
+ try self.addMemArg(.i32_load, .{ .offset = len_offset, .alignment = self.ptrSize() });
+ } else if (decl_ty.zigTypeTag() == .Array) {
+ const len = decl_ty.arrayLen();
+ switch (self.ptrSize()) {
+ 4 => try self.addImm32(@bitCast(i32, @intCast(u32, len))),
+ 8 => try self.addImm64(len),
+ else => unreachable,
+ }
+ } else return self.fail("Wasm todo: Implement storing slices for decl_ref with type: {}", .{decl_ty});
}
try self.addLabel(.local_set, len_local.local);
try self.addLabel(.local_set, ptr_local.local);
@@ -1661,15 +1685,23 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
return;
} else if (ty.isSlice()) {
// store pointer first
- const ptr_local = try self.load(rhs, Type.@"usize", 0);
- try self.store(lhs, ptr_local, Type.@"usize", 0);
+ const ptr_local = try self.load(rhs, Type.usize, 0);
+ try self.store(lhs, ptr_local, Type.usize, 0);
// retrieve length from rhs, and store that alongside lhs as well
- const len_local = try self.load(rhs, Type.@"usize", 4);
- try self.store(lhs, len_local, Type.@"usize", 4);
+ const len_local = try self.load(rhs, Type.usize, self.ptrSize());
+ try self.store(lhs, len_local, Type.usize, self.ptrSize());
return;
}
},
+ .Int => if (ty.intInfo(self.target).bits > 64) {
+ if (rhs == .constant) {
+ try self.emitWValue(rhs);
+ try self.addLabel(.local_set, lhs.local);
+ return;
+ }
+ return try self.memCopy(ty, lhs, rhs);
+ },
else => {},
}
try self.emitWValue(lhs);
@@ -1707,7 +1739,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (!ty.hasCodeGenBits()) return WValue{ .none = {} };
- if (isByRef(ty)) {
+ if (self.isByRef(ty)) {
const new_local = try self.allocStack(ty);
try self.store(new_local, operand, ty, 0);
return new_local;
@@ -1771,9 +1803,16 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = self.resolveInst(bin_op.lhs);
const rhs = self.resolveInst(bin_op.rhs);
+ const operand_ty = self.air.typeOfIndex(inst);
+
+ if (self.isByRef(operand_ty)) {
+ return self.fail("TODO: Implement binary operation for type: {}", .{operand_ty});
+ }
try self.emitWValue(lhs);
try self.emitWValue(rhs);
@@ -1849,16 +1888,36 @@ fn emitConstant(self: *Self, val: Value, ty: Type) InnerError!void {
// write constant
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
- 0...32 => try self.addImm32(@intCast(i32, val.toSignedInt())),
- 33...64 => try self.addImm64(@bitCast(u64, val.toSignedInt())),
+ 0...32 => return try self.addImm32(@intCast(i32, val.toSignedInt())),
+ 33...64 => return try self.addImm64(@bitCast(u64, val.toSignedInt())),
+ 65...128 => {},
else => |bits| return self.fail("Wasm todo: emitConstant for integer with {d} bits", .{bits}),
},
.unsigned => switch (int_info.bits) {
- 0...32 => try self.addImm32(@bitCast(i32, @intCast(u32, val.toUnsignedInt()))),
- 33...64 => try self.addImm64(val.toUnsignedInt()),
+ 0...32 => return try self.addImm32(@bitCast(i32, @intCast(u32, val.toUnsignedInt()))),
+ 33...64 => return try self.addImm64(val.toUnsignedInt()),
+ 65...128 => {},
else => |bits| return self.fail("Wasm TODO: emitConstant for integer with {d} bits", .{bits}),
},
}
+ const result = try self.allocStack(ty);
+ var space: Value.BigIntSpace = undefined;
+ const bigint = val.toBigInt(&space);
+ if (bigint.limbs.len == 1 and bigint.limbs[0] == 0) {
+ try self.addLabel(.local_get, result.local);
+ return;
+ }
+ if (@sizeOf(usize) != @sizeOf(u64)) {
+ return self.fail("Wasm todo: Implement big integers for 32bit compiler", .{});
+ }
+
+ for (bigint.limbs) |_, index| {
+ const limb = bigint.limbs[bigint.limbs.len - index - 1];
+ try self.addLabel(.local_get, result.local);
+ try self.addImm64(limb);
+ try self.addMemArg(.i64_store, .{ .offset = @intCast(u32, index * 8), .alignment = 8 });
+ }
+ try self.addLabel(.local_get, result.local);
},
.Bool => try self.addImm32(@intCast(i32, val.toSignedInt())),
.Float => {
@@ -1968,19 +2027,76 @@ fn emitConstant(self: *Self, val: Value, ty: Type) InnerError!void {
const result = try self.allocStack(ty);
const fields = ty.structFields();
- var offset: u32 = 0;
+ const offset = try self.copyLocal(result, ty);
for (fields.values()) |field, index| {
- if (isByRef(field.ty)) {
- return self.fail("TODO: emitConstant for struct field type {}\n", .{field.ty});
- }
const tmp = try self.allocLocal(field.ty);
try self.emitConstant(struct_data.data[index], field.ty);
try self.addLabel(.local_set, tmp.local);
- try self.store(result, tmp, field.ty, offset);
- offset += @intCast(u32, field.ty.abiSize(self.target));
+ try self.store(offset, tmp, field.ty, 0);
+
+ // this prevents us from emitting useless instructions when we reached the end of the loop
+ if (index != (fields.count() - 1)) {
+ _ = try self.buildPointerOffset(offset, field.ty.abiSize(self.target), .modify);
+ }
}
try self.addLabel(.local_get, result.local);
},
+ .Array => {
+ const result = try self.allocStack(ty);
+ if (val.castTag(.bytes)) |bytes| {
+ for (bytes.data) |byte, index| {
+ try self.addLabel(.local_get, result.local);
+ try self.addImm32(@intCast(i32, byte));
+ try self.addMemArg(.i32_store8, .{ .offset = @intCast(u32, index), .alignment = 1 });
+ }
+ } else if (val.castTag(.array)) |array| {
+ const elem_ty = ty.childType();
+ const elem_size = elem_ty.abiSize(self.target);
+ const tmp = try self.allocLocal(elem_ty);
+ const offset = try self.copyLocal(result, ty);
+ for (array.data) |value, index| {
+ try self.emitConstant(value, elem_ty);
+ try self.addLabel(.local_set, tmp.local);
+ try self.store(offset, tmp, elem_ty, 0);
+
+ if (index != (array.data.len - 1)) {
+ _ = try self.buildPointerOffset(offset, elem_size, .modify);
+ }
+ }
+ } else if (val.castTag(.repeated)) |repeated| {
+ const value = repeated.data;
+ const elem_ty = ty.childType();
+ const elem_size = elem_ty.abiSize(self.target);
+ const sentinel = ty.sentinel();
+ const len = ty.arrayLen();
+ const len_with_sent = len + @boolToInt(sentinel != null);
+ const tmp = try self.allocLocal(elem_ty);
+ const offset = try self.copyLocal(result, ty);
+
+ var index: u32 = 0;
+ while (index < len_with_sent) : (index += 1) {
+ if (sentinel != null and index == len) {
+ try self.emitConstant(sentinel.?, elem_ty);
+ } else {
+ try self.emitConstant(value, elem_ty);
+ }
+ try self.addLabel(.local_set, tmp.local);
+ try self.store(offset, tmp, elem_ty, 0);
+
+ if (index != (len_with_sent - 1)) {
+ _ = try self.buildPointerOffset(offset, elem_size, .modify);
+ }
+ }
+ } else if (val.tag() == .empty_array_sentinel) {
+ const elem_ty = ty.childType();
+ const sent_val = ty.sentinel().?;
+ const tmp = try self.allocLocal(elem_ty);
+ try self.emitConstant(sent_val, elem_ty);
+ try self.addLabel(.local_set, tmp.local);
+ try self.store(result, tmp, elem_ty, 0);
+ } else unreachable;
+ try self.addLabel(.local_get, result.local);
+ },
else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}),
}
}
@@ -2010,12 +2126,19 @@ fn emitUndefined(self: *Self, ty: Type) InnerError!void {
33...64 => try self.addFloat64(@bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa))),
else => |bits| return self.fail("Wasm TODO: emitUndefined for float bitsize: {d}", .{bits}),
},
- // As arrays point to linear memory, we cannot use 0xaaaaaaaa as the wasm
- // validator will not accept it due to out-of-bounds memory access);
- .Array => try self.addImm32(@bitCast(i32, @as(u32, 0xaa))),
- .Struct => {
- // TODO: Write 0xaa struct's memory
+ .Array, .Struct => {
const result = try self.allocStack(ty);
+ const abi_size = ty.abiSize(self.target);
+ var offset: u32 = 0;
+ while (offset < abi_size) : (offset += 1) {
+ try self.emitWValue(result);
+ try self.addImm32(0xaa);
+ switch (self.ptrSize()) {
+ 4 => try self.addMemArg(.i32_store8, .{ .offset = offset, .alignment = 1 }),
+ 8 => try self.addMemArg(.i64_store8, .{ .offset = offset, .alignment = 1 }),
+ else => unreachable,
+ }
+ }
try self.addLabel(.local_get, result.local);
},
.Pointer => switch (self.ptrSize()) {
@@ -2153,9 +2276,6 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
const rhs = self.resolveInst(bin_op.rhs);
const operand_ty = self.air.typeOf(bin_op.lhs);
- try self.emitWValue(lhs);
- try self.emitWValue(rhs);
-
if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
@@ -2163,10 +2283,15 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
- return self.fail("TODO: Implement airCmp for comparing optionals", .{});
+ return self.cmpOptionals(lhs, rhs, operand_ty, op);
}
+ } else if (self.isByRef(operand_ty)) {
+ return self.cmpBigInt(lhs, rhs, operand_ty, op);
}
+ try self.emitWValue(lhs);
+ try self.emitWValue(rhs);
+
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
if (operand_ty.zigTypeTag() != .Int) break :blk .unsigned;
@@ -2293,7 +2418,7 @@ fn structFieldPtr(self: *Self, struct_ptr: WValue, offset: u32) InnerError!WValu
},
else => unreachable,
};
- return self.buildPointerOffset(.{ .local = local }, final_offset);
+ return self.buildPointerOffset(.{ .local = local }, final_offset, .new);
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2310,7 +2435,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
};
- if (isByRef(field_ty)) {
+ if (self.isByRef(field_ty)) {
return WValue{ .local_with_offset = .{ .local = operand.local, .offset = offset } };
}
@@ -2493,13 +2618,16 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W
}
fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
- if (self.liveness.isUnused(inst)) return WValue.none;
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = self.resolveInst(ty_op.operand);
const err_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_ty.errorUnionPayload();
- if (!payload_ty.hasCodeGenBits()) return WValue.none;
+ if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} };
const offset = @intCast(u32, err_ty.errorUnionSet().abiSize(self.target));
+ if (self.isByRef(payload_ty)) {
+ return self.buildPointerOffset(operand, offset, .new);
+ }
return try self.load(operand, payload_ty, offset);
}
@@ -2528,7 +2656,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const offset = err_ty.errorUnionSet().abiSize(self.target);
const err_union = try self.allocStack(err_ty);
- const payload_ptr = try self.buildPointerOffset(err_union, offset);
+ const payload_ptr = try self.buildPointerOffset(err_union, offset, .new);
try self.store(payload_ptr, operand, op_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
@@ -2552,6 +2680,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const ty = self.air.getRefType(ty_op.ty);
const operand = self.resolveInst(ty_op.operand);
@@ -2576,7 +2706,8 @@ fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
.signed => .i64_extend_i32_s,
.unsigned => .i64_extend_i32_u,
});
- }
+ } else unreachable;
+
const result = try self.allocLocal(ty);
try self.addLabel(.local_set, result.local);
return result;
@@ -2588,12 +2719,16 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: en
const op_ty = self.air.typeOf(un_op);
const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
+ return self.isNull(operand, optional_ty, opcode);
+}
+
+fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try self.emitWValue(operand);
if (!optional_ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
- // When payload is zero-bits, we can treat operand as a value, rather than a
- // stack value
+ // When payload is zero-bits, we can treat operand as a value, rather than
+ // a pointer to the stack value
if (payload_ty.hasCodeGenBits()) {
try self.addMemArg(.i32_load8_u, .{ .offset = 0, .alignment = 1 });
}
@@ -2619,8 +2754,8 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
- if (isByRef(payload_ty)) {
- return self.buildPointerOffset(operand, offset);
+ if (self.isByRef(payload_ty)) {
+ return self.buildPointerOffset(operand, offset, .new);
}
return self.load(operand, payload_ty, @intCast(u32, offset));
@@ -2640,7 +2775,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
- return self.buildPointerOffset(operand, offset);
+ return self.buildPointerOffset(operand, offset, .new);
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2665,7 +2800,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
try self.addImm32(1);
try self.addMemArg(.i32_store8, .{ .offset = 0, .alignment = 1 });
- return self.buildPointerOffset(operand, offset);
+ return self.buildPointerOffset(operand, offset, .new);
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2696,7 +2831,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm32(1);
try self.addMemArg(.i32_store8, .{ .offset = 0, .alignment = 1 });
- const payload_ptr = try self.buildPointerOffset(result, offset);
+ const payload_ptr = try self.buildPointerOffset(result, offset, .new);
try self.store(payload_ptr, operand, payload_ty, 0);
return result;
@@ -2750,7 +2885,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const result = try self.allocLocal(elem_ty);
try self.addLabel(.local_set, result.local);
- if (isByRef(elem_ty)) {
+ if (self.isByRef(elem_ty)) {
return result;
}
return try self.load(result, elem_ty, 0);
@@ -2911,7 +3046,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const result = try self.allocLocal(elem_ty);
try self.addLabel(.local_set, result.local);
- if (isByRef(elem_ty)) {
+ if (self.isByRef(elem_ty)) {
return result;
}
return try self.load(result, elem_ty, 0);
@@ -2952,7 +3087,11 @@ fn airPtrBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr = self.resolveInst(bin_op.lhs);
const offset = self.resolveInst(bin_op.rhs);
- const pointee_ty = self.air.typeOf(bin_op.lhs).childType();
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const pointee_ty = switch (ptr_ty.ptrSize()) {
+ .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
+ else => ptr_ty.childType(),
+ };
const valtype = try self.typeToValtype(Type.usize);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
@@ -3036,3 +3175,125 @@ fn memSet(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
try self.endBlock();
try self.endBlock();
}
+
+fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const array_ty = self.air.typeOf(bin_op.lhs);
+ const array = self.resolveInst(bin_op.lhs);
+ const index = self.resolveInst(bin_op.rhs);
+ const elem_ty = array_ty.childType();
+ const elem_size = elem_ty.abiSize(self.target);
+
+ // calculate index into slice
+ try self.emitWValue(array);
+ try self.emitWValue(index);
+ try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+ try self.addTag(.i32_mul);
+ try self.addTag(.i32_add);
+
+ const result = try self.allocLocal(elem_ty);
+ try self.addLabel(.local_set, result.local);
+
+ if (self.isByRef(elem_ty)) {
+ return result;
+ }
+ return try self.load(result, elem_ty, 0);
+}
+
+fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = self.resolveInst(ty_op.operand);
+ const dest_ty = self.air.typeOfIndex(inst);
+ const op_ty = self.air.typeOf(ty_op.operand);
+
+ try self.emitWValue(operand);
+ const op = buildOpcode(.{
+ .op = .trunc,
+ .valtype1 = try self.typeToValtype(dest_ty),
+ .valtype2 = try self.typeToValtype(op_ty),
+ .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
+ });
+ try self.addTag(Mir.Inst.Tag.fromOpcode(op));
+
+ const result = try self.allocLocal(dest_ty);
+ try self.addLabel(.local_set, result.local);
+ return result;
+}
+
+fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ assert(operand_ty.hasCodeGenBits());
+ assert(op == .eq or op == .neq);
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = operand_ty.optionalChild(&buf);
+ const offset = @intCast(u32, operand_ty.abiSize(self.target) - payload_ty.abiSize(self.target));
+
+ const lhs_is_null = try self.isNull(lhs, operand_ty, .i32_eq);
+ const rhs_is_null = try self.isNull(rhs, operand_ty, .i32_eq);
+
+ // We store the final result in here that will be validated
+ // if the optional is truly equal.
+ const result = try self.allocLocal(Type.initTag(.i32));
+
+ try self.startBlock(.block, wasm.block_empty);
+ try self.emitWValue(lhs_is_null);
+ try self.emitWValue(rhs_is_null);
+ try self.addTag(.i32_ne); // inverse so we can exit early
+ try self.addLabel(.br_if, 0);
+
+ const lhs_pl = try self.load(lhs, payload_ty, offset);
+ const rhs_pl = try self.load(rhs, payload_ty, offset);
+
+ try self.emitWValue(lhs_pl);
+ try self.emitWValue(rhs_pl);
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = try self.typeToValtype(payload_ty) });
+ try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try self.addLabel(.br_if, 0);
+
+ try self.addImm32(1);
+ try self.addLabel(.local_set, result.local);
+ try self.endBlock();
+
+ try self.emitWValue(result);
+ try self.addImm32(0);
+ try self.addTag(if (op == .eq) .i32_ne else .i32_eq);
+ try self.addLabel(.local_set, result.local);
+ return result;
+}
+
+/// Compares big integers by checking both its high bits and low bits.
+/// TODO: Lower this to compiler_rt call
+fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ if (operand_ty.intInfo(self.target).bits > 128) {
+ return self.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(self.target).bits});
+ }
+
+ const result = try self.allocLocal(Type.initTag(.i32));
+ {
+ try self.startBlock(.block, wasm.block_empty);
+ const lhs_high_bit = try self.load(lhs, Type.initTag(.u64), 0);
+ const lhs_low_bit = try self.load(lhs, Type.initTag(.u64), 8);
+ const rhs_high_bit = try self.load(rhs, Type.initTag(.u64), 0);
+ const rhs_low_bit = try self.load(rhs, Type.initTag(.u64), 8);
+ try self.emitWValue(lhs_high_bit);
+ try self.emitWValue(rhs_high_bit);
+ try self.addTag(.i64_ne);
+ try self.addLabel(.br_if, 0);
+ try self.emitWValue(lhs_low_bit);
+ try self.emitWValue(rhs_low_bit);
+ try self.addTag(.i64_ne);
+ try self.addLabel(.br_if, 0);
+ try self.addImm32(1);
+ try self.addLabel(.local_set, result.local);
+ try self.endBlock();
+ }
+
+ try self.emitWValue(result);
+ try self.addImm32(0);
+ try self.addTag(if (op == .eq) .i32_ne else .i32_eq);
+ try self.addLabel(.local_set, result.local);
+ return result;
+}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index fb77ea5b0c..0fc7dc0527 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -161,6 +161,18 @@ pub fn emitMir(emit: *Emit) InnerError!void {
.i64_extend8_s => try emit.emitTag(tag),
.i64_extend16_s => try emit.emitTag(tag),
.i64_extend32_s => try emit.emitTag(tag),
+ .i32_reinterpret_f32 => try emit.emitTag(tag),
+ .i64_reinterpret_f64 => try emit.emitTag(tag),
+ .f32_reinterpret_i32 => try emit.emitTag(tag),
+ .f64_reinterpret_i64 => try emit.emitTag(tag),
+ .i32_trunc_f32_s => try emit.emitTag(tag),
+ .i32_trunc_f32_u => try emit.emitTag(tag),
+ .i32_trunc_f64_s => try emit.emitTag(tag),
+ .i32_trunc_f64_u => try emit.emitTag(tag),
+ .i64_trunc_f32_s => try emit.emitTag(tag),
+ .i64_trunc_f32_u => try emit.emitTag(tag),
+ .i64_trunc_f64_s => try emit.emitTag(tag),
+ .i64_trunc_f64_u => try emit.emitTag(tag),
.extended => try emit.emitExtended(inst),
}
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 9c76801cf3..0fceaf6042 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -363,10 +363,34 @@ pub const Inst = struct {
/// Uses `tag`
i32_wrap_i64 = 0xA7,
/// Uses `tag`
+ i32_trunc_f32_s = 0xA8,
+ /// Uses `tag`
+ i32_trunc_f32_u = 0xA9,
+ /// Uses `tag`
+ i32_trunc_f64_s = 0xAA,
+ /// Uses `tag`
+ i32_trunc_f64_u = 0xAB,
+ /// Uses `tag`
i64_extend_i32_s = 0xAC,
/// Uses `tag`
i64_extend_i32_u = 0xAD,
/// Uses `tag`
+ i64_trunc_f32_s = 0xAE,
+ /// Uses `tag`
+ i64_trunc_f32_u = 0xAF,
+ /// Uses `tag`
+ i64_trunc_f64_s = 0xB0,
+ /// Uses `tag`
+ i64_trunc_f64_u = 0xB1,
+ /// Uses `tag`
+ i32_reinterpret_f32 = 0xBC,
+ /// Uses `tag`
+ i64_reinterpret_f64 = 0xBD,
+ /// Uses `tag`
+ f32_reinterpret_i32 = 0xBE,
+ /// Uses `tag`
+ f64_reinterpret_i64 = 0xBF,
+ /// Uses `tag`
i32_extend8_s = 0xC0,
/// Uses `tag`
i32_extend16_s = 0xC1,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 28c7cdc5ce..2cb7fc1ab7 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -635,6 +635,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
.tag_name => try self.airTagName(inst),
+ .error_name, => try self.airErrorName(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -2852,17 +2853,16 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// block results.
.mcv = MCValue{ .none = {} },
});
- const block_data = self.blocks.getPtr(inst).?;
- defer block_data.relocs.deinit(self.gpa);
+ defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
+ for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
- const result = @bitCast(MCValue, block_data.mcv);
+ const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -3649,6 +3649,16 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
+fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
+ _ = operand;
+ return self.fail("TODO implement airErrorName for x86_64", .{});
+ };
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
diff --git a/src/arch/x86_64/Isel.zig b/src/arch/x86_64/Isel.zig
index a446ca3e84..acae794563 100644
--- a/src/arch/x86_64/Isel.zig
+++ b/src/arch/x86_64/Isel.zig
@@ -753,7 +753,16 @@ fn dbgAdvancePCAndLine(isel: *Isel, line: u32, column: u32) InnerError!void {
const d_pc_p9 = @intCast(i64, delta_pc) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
- try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+ var diff = @divExact(d_pc_p9, quant) - quant;
+ while (diff > 0) {
+ if (diff < 64) {
+ try dbg_out.dbg_line.append(@intCast(u8, diff + 128));
+ diff = 0;
+ } else {
+ try dbg_out.dbg_line.append(@intCast(u8, 64 + 128));
+ diff -= 64;
+ }
+ }
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
diff --git a/src/codegen.zig b/src/codegen.zig
index e385158ba6..1d20c4bc74 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -465,9 +465,15 @@ fn lowerDeclRef(
if (decl.analysis != .complete) return error.AnalysisFail;
markDeclAlive(decl);
- // TODO handle the dependency of this symbol on the decl's vaddr.
- // If the decl changes vaddr, then this symbol needs to get regenerated.
- const vaddr = bin_file.getDeclVAddr(decl);
+ const vaddr = vaddr: {
+ if (bin_file.cast(link.File.MachO)) |macho_file| {
+ break :vaddr try macho_file.getDeclVAddrWithReloc(decl, code.items.len);
+ }
+ // TODO handle the dependency of this symbol on the decl's vaddr.
+ // If the decl changes vaddr, then this symbol needs to get regenerated.
+ break :vaddr bin_file.getDeclVAddr(decl);
+ };
+
const endian = bin_file.options.target.cpu.arch.endian();
switch (bin_file.options.target.cpu.arch.ptrBitWidth()) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian),
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 922e1d9c3e..e85ca6c705 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1244,6 +1244,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.ctz => try airBuiltinCall(f, inst, "ctz"),
.popcount => try airBuiltinCall(f, inst, "popcount"),
.tag_name => try airTagName(f, inst),
+ .error_name => try airErrorName(f, inst),
.int_to_float,
.float_to_int,
@@ -2998,6 +2999,22 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
//return local;
}
+fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const un_op = f.air.instructions.items(.data)[inst].un_op;
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const operand = try f.resolveInst(un_op);
+ const local = try f.allocLocal(inst_ty, .Const);
+
+ try writer.writeAll(" = ");
+
+ _ = operand;
+ _ = local;
+ return f.fail("TODO: C backend: implement airErrorName", .{});
+}
+
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
return switch (order) {
.Unordered => "memory_order_relaxed",
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index b1046582fa..4a3ac80b70 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -181,6 +181,9 @@ pub const Object = struct {
/// The backing memory for `type_map`. Periodically garbage collected after flush().
/// The code for doing the periodical GC is not yet implemented.
type_map_arena: std.heap.ArenaAllocator,
+ /// The LLVM global table which holds the names corresponding to Zig errors. Note that the values
+ /// are not added until flushModule, when all errors in the compilation are known.
+ error_name_table: ?*const llvm.Value,
pub const TypeMap = std.HashMapUnmanaged(
Type,
@@ -269,6 +272,7 @@ pub const Object = struct {
.decl_map = .{},
.type_map = .{},
.type_map_arena = std.heap.ArenaAllocator.init(gpa),
+ .error_name_table = null,
};
}
@@ -298,7 +302,60 @@ pub const Object = struct {
return slice.ptr;
}
+ fn genErrorNameTable(self: *Object, comp: *Compilation) !void {
+ // If self.error_name_table is null, there was no instruction that actually referenced the error table.
+ const error_name_table_ptr_global = self.error_name_table orelse return;
+
+ const mod = comp.bin_file.options.module.?;
+ const target = mod.getTarget();
+
+ const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space
+ const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
+ const type_fields = [_]*const llvm.Type{
+ llvm_ptr_ty,
+ llvm_usize_ty,
+ };
+ const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False);
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_alignment = slice_ty.abiAlignment(target);
+
+ const error_name_list = mod.error_name_list.items;
+ const llvm_errors = try comp.gpa.alloc(*const llvm.Value, error_name_list.len);
+ defer comp.gpa.free(llvm_errors);
+
+ llvm_errors[0] = llvm_slice_ty.getUndef();
+ for (llvm_errors[1..]) |*llvm_error, i| {
+ const name = error_name_list[1..][i];
+ const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+ const str_global = self.llvm_module.addGlobal(str_init.typeOf(), "");
+ str_global.setInitializer(str_init);
+ str_global.setLinkage(.Private);
+ str_global.setGlobalConstant(.True);
+ str_global.setUnnamedAddr(.True);
+ str_global.setAlignment(1);
+
+ const slice_fields = [_]*const llvm.Value{
+ str_global.constBitCast(llvm_ptr_ty),
+ llvm_usize_ty.constInt(name.len, .False),
+ };
+ llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len);
+ }
+
+ const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len));
+
+ const error_name_table_global = self.llvm_module.addGlobal(error_name_table_init.typeOf(), "");
+ error_name_table_global.setInitializer(error_name_table_init);
+ error_name_table_global.setLinkage(.Private);
+ error_name_table_global.setGlobalConstant(.True);
+ error_name_table_global.setUnnamedAddr(.True);
+ error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode
+
+ const error_name_table_ptr = error_name_table_global.constBitCast(llvm_slice_ty.pointerType(0)); // TODO: Address space
+ error_name_table_ptr_global.setInitializer(error_name_table_ptr);
+ }
+
pub fn flushModule(self: *Object, comp: *Compilation) !void {
+ try self.genErrorNameTable(comp);
if (comp.verbose_llvm_ir) {
self.llvm_module.dump();
}
@@ -2031,6 +2088,7 @@ pub const FuncGen = struct {
.ctz => try self.airClzCtz(inst, "cttz"),
.popcount => try self.airPopCount(inst, "ctpop"),
.tag_name => try self.airTagName(inst),
+ .error_name => try self.airErrorName(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -2647,7 +2705,7 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag()) {
.Struct => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf);
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
return self.load(field_ptr, field_ptr_ty);
@@ -4279,6 +4337,40 @@ pub const FuncGen = struct {
return fn_val;
}
+ fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+
+ const error_name_table_ptr = try self.getErrorNameTable();
+ const error_name_table = self.builder.buildLoad(error_name_table_ptr, "");
+ const indices = [_]*const llvm.Value{operand};
+ const error_name_ptr = self.builder.buildInBoundsGEP(error_name_table, &indices, indices.len, "");
+ return self.builder.buildLoad(error_name_ptr, "");
+ }
+
+ fn getErrorNameTable(self: *FuncGen) !*const llvm.Value {
+ if (self.dg.object.error_name_table) |table| {
+ return table;
+ }
+
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget());
+ const llvm_slice_ty = try self.dg.llvmType(slice_ty);
+ const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space
+
+ const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
+ error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef());
+ error_name_table_global.setLinkage(.Private);
+ error_name_table_global.setGlobalConstant(.True);
+ error_name_table_global.setUnnamedAddr(.True);
+ error_name_table_global.setAlignment(slice_alignment);
+
+ self.dg.object.error_name_table = error_name_table_global;
+ return error_name_table_global;
+ }
+
/// Assumes the optional is not pointer-like and payload has bits.
fn optIsNonNull(self: *FuncGen, opt_handle: *const llvm.Value, is_by_ref: bool) *const llvm.Value {
if (is_by_ref) {
@@ -4354,8 +4446,18 @@ pub const FuncGen = struct {
.Struct => {
const target = self.dg.module.getTarget();
var ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ty_buf);
- return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
+ if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
+ return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
+ } else {
+ // If we found no index then this means this is a zero sized field at the
+ // end of the struct. Treat our struct pointer as an array of two and get
+ // the index to the element at index `1` to get a pointer to the end of
+ // the struct.
+ const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_index = llvm_usize.constInt(1, .False);
+ const indices: [1]*const llvm.Value = .{llvm_index};
+ return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, "");
+ }
},
.Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
else => unreachable,
@@ -4750,32 +4852,37 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
};
}
-/// Take into account 0 bit fields.
+/// Take into account 0 bit fields. Returns null if an llvm field could not be found. This only
+/// happends if you want the field index of a zero sized field at the end of the struct.
fn llvmFieldIndex(
ty: Type,
field_index: u32,
target: std.Target,
ptr_pl_buf: *Type.Payload.Pointer,
-) c_uint {
+) ?c_uint {
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout != .Packed) {
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
- if (!field.ty.hasCodeGenBits()) continue;
-
- if (i == field_index) {
- ptr_pl_buf.* = .{
- .data = .{
- .pointee_type = field.ty,
- .@"align" = field.normalAlignment(target),
- .@"addrspace" = .generic,
- },
- };
- return llvm_field_index;
+ if (!field.ty.hasCodeGenBits())
+ continue;
+ if (field_index > i) {
+ llvm_field_index += 1;
+ continue;
}
- llvm_field_index += 1;
+
+ ptr_pl_buf.* = .{
+ .data = .{
+ .pointee_type = field.ty,
+ .@"align" = field.normalAlignment(target),
+ .@"addrspace" = .generic,
+ },
+ };
+ return llvm_field_index;
+ } else {
+ // We did not find an llvm field that corrispons to this zig field.
+ return null;
}
- unreachable;
}
// Our job here is to return the host integer field index.
@@ -4784,7 +4891,8 @@ fn llvmFieldIndex(
var running_bits: u16 = 0;
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
- if (!field.ty.hasCodeGenBits()) continue;
+ if (!field.ty.hasCodeGenBits())
+ continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 2da680a40d..14ef2a7b04 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -114,8 +114,8 @@ pub fn buildLibCXX(comp: *Compilation) !void {
for (libcxx_files) |cxx_src| {
var cflags = std.ArrayList([]const u8).init(arena);
- if (target.os.tag == .windows or target.os.tag == .wasi) {
- // Filesystem stuff isn't supported on WASI and Windows.
+ if ((target.os.tag == .windows and target.abi == .msvc) or target.os.tag == .wasi) {
+ // Filesystem stuff isn't supported on WASI and Windows (MSVC).
if (std.mem.startsWith(u8, cxx_src, "src/filesystem/"))
continue;
}
diff --git a/src/link.zig b/src/link.zig
index 422d86d4b3..a4b990cf6b 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -643,7 +643,7 @@ pub const File = struct {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
- .plan9 => @panic("GET VADDR"),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl),
.c => unreachable,
.wasm => unreachable,
.spirv => unreachable,
@@ -672,12 +672,21 @@ pub const File = struct {
// is not needed we can refactor this into having the frontend do the rename
// directly, and remove this function from link.zig.
_ = base;
- try std.fs.rename(
- cache_directory.handle,
- tmp_dir_sub_path,
- cache_directory.handle,
- o_sub_path,
- );
+ while (true) {
+ std.fs.rename(
+ cache_directory.handle,
+ tmp_dir_sub_path,
+ cache_directory.handle,
+ o_sub_path,
+ ) catch |err| switch (err) {
+ error.PathAlreadyExists => {
+ try cache_directory.handle.deleteTree(o_sub_path);
+ continue;
+ },
+ else => |e| return e,
+ };
+ break;
+ }
}
pub fn linkAsArchive(base: *File, comp: *Compilation) !void {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 816e4e0023..0ee0879290 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -38,6 +38,7 @@ const Module = @import("../Module.zig");
const StringIndexAdapter = std.hash_map.StringIndexAdapter;
const StringIndexContext = std.hash_map.StringIndexContext;
const Trie = @import("MachO/Trie.zig");
+const Type = @import("../type.zig").Type;
pub const TextBlock = Atom;
@@ -220,7 +221,7 @@ managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
+decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{},
/// Currently active Module.Decl.
/// TODO this might not be necessary if we figure out how to pass Module.Decl instance
@@ -3450,7 +3451,7 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
if (decl.link.macho.local_sym_index != 0) return;
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
- try self.decls.putNoClobber(self.base.allocator, decl, {});
+ try self.decls.putNoClobber(self.base.allocator, decl, null);
if (self.locals_free_list.popOrNull()) |i| {
log.debug("reusing symbol index {d} for {s}", .{ i, decl.name });
@@ -3656,19 +3657,169 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
try self.updateDeclExports(module, decl, decl_exports);
}
+fn isElemTyPointer(ty: Type) bool {
+ switch (ty.zigTypeTag()) {
+ .Fn => return false,
+ .Pointer => return true,
+ .Array => {
+ const elem_ty = ty.elemType();
+ return isElemTyPointer(elem_ty);
+ },
+ .Struct, .Union => {
+ const len = ty.structFieldCount();
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ const field_ty = ty.structFieldType(i);
+ if (isElemTyPointer(field_ty)) return true;
+ }
+ return false;
+ },
+ else => return false,
+ }
+}
+
+fn getMatchingSectionDecl(self: *MachO, decl: *Module.Decl) !MatchingSection {
+ const code = decl.link.macho.code.items;
+ const alignment = decl.ty.abiAlignment(self.base.options.target);
+ const align_log_2 = math.log2(alignment);
+ const ty = decl.ty;
+ const zig_ty = ty.zigTypeTag();
+ const val = decl.val;
+ const mode = self.base.options.optimize_mode;
+ const match: MatchingSection = blk: {
+ // TODO finish and audit this function
+ if (val.isUndefDeep()) {
+ if (mode == .ReleaseFast or mode == .ReleaseSmall) {
+ break :blk MatchingSection{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.bss_section_index.?,
+ };
+ }
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__DATA"),
+ .sectname = makeStaticString("__data"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ }
+
+ switch (zig_ty) {
+ .Fn => {
+ break :blk MatchingSection{
+ .seg = self.text_segment_cmd_index.?,
+ .sect = self.text_section_index.?,
+ };
+ },
+ .Array => switch (val.tag()) {
+ .bytes => {
+ switch (ty.tag()) {
+ .array_u8_sentinel_0,
+ .const_slice_u8_sentinel_0,
+ .manyptr_const_u8_sentinel_0,
+ => {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__cstring"),
+ .flags = macho.S_CSTRING_LITERALS,
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ },
+ else => {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ },
+ }
+ },
+ .array => {
+ if (isElemTyPointer(ty)) {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__DATA_CONST"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = 3, // TODO I think this should not be needed
+ })).?;
+ } else {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ }
+ },
+ else => {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ },
+ },
+ .Pointer => {
+ if (val.castTag(.variable)) |_| {
+ break :blk MatchingSection{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.data_section_index.?,
+ };
+ } else {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__DATA_CONST"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ }
+ },
+ else => {
+ if (val.castTag(.variable)) |_| {
+ break :blk MatchingSection{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.data_section_index.?,
+ };
+ } else {
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
+ }
+ },
+ }
+ };
+ const seg = self.load_commands.items[match.seg].segment;
+ const sect = seg.sections.items[match.sect];
+ log.debug(" allocating atom in '{s},{s}' ({d},{d})", .{
+ sect.segName(),
+ sect.sectName(),
+ match.seg,
+ match.sect,
+ });
+ return match;
+}
+
fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 {
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
const symbol = &self.locals.items[decl.link.macho.local_sym_index];
+ const decl_ptr = self.decls.getPtr(decl).?;
+ if (decl_ptr.* == null) {
+ decl_ptr.* = try self.getMatchingSectionDecl(decl);
+ }
+ const match = decl_ptr.*.?;
+
if (decl.link.macho.size != 0) {
const capacity = decl.link.macho.capacity(self.*);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(&decl.link.macho, code_len, required_alignment, .{
- .seg = self.text_segment_cmd_index.?,
- .sect = self.text_section_index.?,
- });
+ const vaddr = try self.growAtom(&decl.link.macho, code_len, required_alignment, match);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
@@ -3690,10 +3841,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
symbol.n_value = vaddr;
} else if (code_len < decl.link.macho.size) {
- self.shrinkAtom(&decl.link.macho, code_len, .{
- .seg = self.text_segment_cmd_index.?,
- .sect = self.text_section_index.?,
- });
+ self.shrinkAtom(&decl.link.macho, code_len, match);
}
decl.link.macho.size = code_len;
decl.link.macho.dirty = true;
@@ -3714,22 +3862,16 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
defer self.base.allocator.free(decl_name);
const name_str_index = try self.makeString(decl_name);
- const addr = try self.allocateAtom(&decl.link.macho, code_len, required_alignment, .{
- .seg = self.text_segment_cmd_index.?,
- .sect = self.text_section_index.?,
- });
+ const addr = try self.allocateAtom(&decl.link.macho, code_len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, addr });
- errdefer self.freeAtom(&decl.link.macho, .{
- .seg = self.text_segment_cmd_index.?,
- .sect = self.text_section_index.?,
- });
+ errdefer self.freeAtom(&decl.link.macho, match);
symbol.* = .{
.n_strx = name_str_index,
.n_type = macho.N_SECT,
- .n_sect = @intCast(u8, self.text_section_index.?) + 1,
+ .n_sect = @intCast(u8, self.section_ordinals.getIndex(match).?) + 1,
.n_desc = 0,
.n_value = addr,
};
@@ -3912,12 +4054,11 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
}
log.debug("freeDecl {*}", .{decl});
- _ = self.decls.swapRemove(decl);
+ const kv = self.decls.fetchSwapRemove(decl);
+ if (kv.?.value) |match| {
+ self.freeAtom(&decl.link.macho, match);
+ }
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- self.freeAtom(&decl.link.macho, .{
- .seg = self.text_segment_cmd_index.?,
- .sect = self.text_section_index.?,
- });
if (decl.link.macho.local_sym_index != 0) {
self.locals_free_list.append(self.base.allocator, decl.link.macho.local_sym_index) catch {};
@@ -3958,6 +4099,29 @@ pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
return self.locals.items[decl.link.macho.local_sym_index].n_value;
}
+pub fn getDeclVAddrWithReloc(self: *MachO, decl: *const Module.Decl, offset: u64) !u64 {
+ assert(decl.link.macho.local_sym_index != 0);
+ assert(self.active_decl != null);
+
+ const atom = &self.active_decl.?.link.macho;
+ try atom.relocs.append(self.base.allocator, .{
+ .offset = @intCast(u32, offset),
+ .target = .{ .local = decl.link.macho.local_sym_index },
+ .addend = 0,
+ .subtractor = null,
+ .pcrel = false,
+ .length = 3,
+ .@"type" = switch (self.base.options.target.cpu.arch) {
+ .aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
+ .x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
+ else => unreachable,
+ },
+ });
+ try atom.rebases.append(self.base.allocator, offset);
+
+ return 0;
+}
+
fn populateMissingMetadata(self: *MachO) !void {
const cpu_arch = self.base.options.target.cpu.arch;
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 4493ae4d5b..200ac5e568 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -203,7 +203,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
self.syms.items[fn_map_res.value_ptr.sym_index] = .{
.type = .z,
// just put a giant number, no source file will have this many newlines
- .value = std.math.maxInt(u32),
+ .value = std.math.maxInt(u31),
.name = &.{ 0, 0 },
};
}
@@ -740,3 +740,26 @@ pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
_ = self;
_ = decl;
}
+pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl) u64 {
+ if (decl.ty.zigTypeTag() == .Fn) {
+ var start = self.bases.text;
+ var it_file = self.fn_decl_table.iterator();
+ while (it_file.next()) |fentry| {
+ var symidx_and_submap = fentry.value_ptr;
+ var submap_it = symidx_and_submap.functions.iterator();
+ while (submap_it.next()) |entry| {
+ if (entry.key_ptr.* == decl) return start;
+ start += entry.value_ptr.code.len;
+ }
+ }
+ unreachable;
+ } else {
+ var start = self.bases.data + self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
+ var it = self.data_decl_table.iterator();
+ while (it.next()) |kv| {
+ if (decl == kv.key_ptr.*) return start;
+ start += kv.value_ptr.len;
+ }
+ unreachable;
+ }
+}
diff --git a/src/mingw.zig b/src/mingw.zig
index 264740c333..f555634459 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -408,7 +408,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
errdefer comp.gpa.free(lib_final_path);
const llvm = @import("codegen/llvm/bindings.zig");
- const arch_type = @import("target.zig").archToLLVM(target.cpu.arch);
+ const arch_type = target_util.archToLLVM(target.cpu.arch);
const def_final_path_z = try arena.dupeZ(u8, def_final_path);
const lib_final_path_z = try arena.dupeZ(u8, lib_final_path);
if (llvm.WriteImportLibrary(def_final_path_z.ptr, arch_type, lib_final_path_z.ptr, true)) {
diff --git a/src/print_air.zig b/src/print_air.zig
index cc4acfa279..3101c109cf 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -156,6 +156,7 @@ const Writer = struct {
.ret,
.ret_load,
.tag_name,
+ .error_name,
=> try w.writeUnOp(s, inst),
.breakpoint,
diff --git a/src/tracy.zig b/src/tracy.zig
index 033cf1bcf0..4ecae74481 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -1,9 +1,10 @@
const std = @import("std");
const builtin = @import("builtin");
+const build_options = @import("build_options");
-pub const enable = if (builtin.is_test) false else @import("build_options").enable_tracy;
-pub const enable_allocation = enable and @import("build_options").enable_tracy_allocation;
-pub const enable_callstack = enable and @import("build_options").enable_tracy_callstack;
+pub const enable = if (builtin.is_test) false else build_options.enable_tracy;
+pub const enable_allocation = enable and build_options.enable_tracy_allocation;
+pub const enable_callstack = enable and build_options.enable_tracy_callstack;
// TODO: make this configurable
const callstack_depth = 10;
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 79d68ff51b..4d993e62ea 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -820,7 +820,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
// The C language specification states that variables with static or threadlocal
// storage without an initializer are initialized to a zero value.
- // @import("std").mem.zeroes(T)
+ // std.mem.zeroes(T)
init_node = try Tag.std_mem_zeroes.create(c.arena, type_node);
}
@@ -5211,7 +5211,7 @@ const MacroSlicer = struct {
// mapped function exists in `std.zig.c_translation.Macros`
test "Macro matching" {
const helper = struct {
- const MacroFunctions = @import("std").zig.c_translation.Macros;
+ const MacroFunctions = std.zig.c_translation.Macros;
fn checkMacro(allocator: mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
diff --git a/src/type.zig b/src/type.zig
index b948093994..167248a179 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -1916,7 +1916,7 @@ pub const Type = extern union {
const fields = self.structFields();
const is_packed = if (self.castTag(.@"struct")) |payload| p: {
const struct_obj = payload.data;
- assert(struct_obj.status == .have_layout);
+ assert(struct_obj.haveLayout());
break :p struct_obj.layout == .Packed;
} else false;
@@ -2220,7 +2220,7 @@ pub const Type = extern union {
if (field_count == 0) return 0;
const struct_obj = ty.castTag(.@"struct").?.data;
- assert(struct_obj.status == .have_layout);
+ assert(struct_obj.haveLayout());
var total: u64 = 0;
for (struct_obj.fields.values()) |field| {
@@ -2922,6 +2922,15 @@ pub const Type = extern union {
}
}
+ pub fn containerLayout(ty: Type) std.builtin.TypeInfo.ContainerLayout {
+ return switch (ty.tag()) {
+ .@"struct" => ty.castTag(.@"struct").?.data.layout,
+ .@"union" => ty.castTag(.@"union").?.data.layout,
+ .union_tagged => ty.castTag(.union_tagged).?.data.layout,
+ else => unreachable,
+ };
+ }
+
/// Asserts that the type is an error union.
pub fn errorUnionPayload(self: Type) Type {
return switch (self.tag()) {
@@ -3617,6 +3626,20 @@ pub const Type = extern union {
};
}
+ // Asserts that `ty` is an error set and not `anyerror`.
+ pub fn errorSetNames(ty: Type) []const []const u8 {
+ return switch (ty.tag()) {
+ .error_set_single => blk: {
+ // Work around coercion problems
+ const tmp: *const [1][]const u8 = &ty.castTag(.error_set_single).?.data;
+ break :blk tmp;
+ },
+ .error_set_merged => ty.castTag(.error_set_merged).?.data.keys(),
+ .error_set => ty.castTag(.error_set).?.data.names.keys(),
+ else => unreachable,
+ };
+ }
+
pub fn enumFields(ty: Type) Module.EnumFull.NameMap {
return switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields,
@@ -3751,74 +3774,153 @@ pub const Type = extern union {
}
}
+ pub const PackedFieldOffset = struct {
+ field: usize,
+ offset: u64,
+ running_bits: u16,
+ };
+
+ pub const PackedStructOffsetIterator = struct {
+ field: usize = 0,
+ offset: u64 = 0,
+ big_align: u32 = 0,
+ running_bits: u16 = 0,
+ struct_obj: *Module.Struct,
+ target: Target,
+
+ pub fn next(it: *PackedStructOffsetIterator) ?PackedFieldOffset {
+ comptime assert(Type.packed_struct_layout_version == 1);
+ if (it.struct_obj.fields.count() <= it.field)
+ return null;
+
+ const field = it.struct_obj.fields.values()[it.field];
+ defer it.field += 1;
+ if (!field.ty.hasCodeGenBits()) {
+ return PackedFieldOffset{
+ .field = it.field,
+ .offset = it.offset,
+ .running_bits = it.running_bits,
+ };
+ }
+
+ const field_align = field.packedAlignment();
+ if (field_align == 0) {
+ defer it.running_bits += @intCast(u16, field.ty.bitSize(it.target));
+ return PackedFieldOffset{
+ .field = it.field,
+ .offset = it.offset,
+ .running_bits = it.running_bits,
+ };
+ } else {
+ it.big_align = @maximum(it.big_align, field_align);
+
+ if (it.running_bits != 0) {
+ var int_payload: Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = it.running_bits,
+ };
+ const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
+ const int_align = int_ty.abiAlignment(it.target);
+ it.big_align = @maximum(it.big_align, int_align);
+ it.offset = std.mem.alignForwardGeneric(u64, it.offset, int_align);
+ it.offset += int_ty.abiSize(it.target);
+ it.running_bits = 0;
+ }
+ it.offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
+ defer it.offset += field.ty.abiSize(it.target);
+ return PackedFieldOffset{
+ .field = it.field,
+ .offset = it.offset,
+ .running_bits = it.running_bits,
+ };
+ }
+ }
+ };
+
+ /// Get an iterator that iterates over all the struct field, returning the field and
+ /// offset of that field. Asserts that the type is a none packed struct.
+ pub fn iteratePackedStructOffsets(ty: Type, target: Target) PackedStructOffsetIterator {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveLayout());
+ assert(struct_obj.layout == .Packed);
+ return .{ .struct_obj = struct_obj, .target = target };
+ }
+
+ pub const FieldOffset = struct {
+ field: usize,
+ offset: u64,
+ };
+
+ pub const StructOffsetIterator = struct {
+ field: usize = 0,
+ offset: u64 = 0,
+ big_align: u32 = 0,
+ struct_obj: *Module.Struct,
+ target: Target,
+
+ pub fn next(it: *StructOffsetIterator) ?FieldOffset {
+ if (it.struct_obj.fields.count() <= it.field)
+ return null;
+
+ const field = it.struct_obj.fields.values()[it.field];
+ defer it.field += 1;
+ if (!field.ty.hasCodeGenBits())
+ return FieldOffset{ .field = it.field, .offset = it.offset };
+
+ const field_align = field.normalAlignment(it.target);
+ it.big_align = @maximum(it.big_align, field_align);
+ it.offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
+ defer it.offset += field.ty.abiSize(it.target);
+ return FieldOffset{ .field = it.field, .offset = it.offset };
+ }
+ };
+
+ /// Get an iterator that iterates over all the struct field, returning the field and
+ /// offset of that field. Asserts that the type is a none packed struct.
+ pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveLayout());
+ assert(struct_obj.layout != .Packed);
+ return .{ .struct_obj = struct_obj, .target = target };
+ }
+
/// Supports structs and unions.
/// For packed structs, it returns the byte offset of the containing integer.
pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
- assert(struct_obj.status == .have_layout);
+ assert(struct_obj.haveLayout());
const is_packed = struct_obj.layout == .Packed;
if (!is_packed) {
- var offset: u64 = 0;
- var big_align: u32 = 0;
- for (struct_obj.fields.values()) |field, i| {
- if (!field.ty.hasCodeGenBits()) continue;
-
- const field_align = field.normalAlignment(target);
- big_align = @maximum(big_align, field_align);
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- if (i == index) return offset;
- offset += field.ty.abiSize(target);
+ var it = ty.iterateStructOffsets(target);
+ while (it.next()) |field_offset| {
+ if (index == field_offset.field)
+ return field_offset.offset;
}
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- return offset;
- }
- comptime assert(Type.packed_struct_layout_version == 1);
- var offset: u64 = 0;
- var big_align: u32 = 0;
- var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |field, i| {
- if (!field.ty.hasCodeGenBits()) continue;
-
- const field_align = field.packedAlignment();
- if (field_align == 0) {
- if (i == index) return offset;
- running_bits += @intCast(u16, field.ty.bitSize(target));
- } else {
- big_align = @maximum(big_align, field_align);
+ return std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
+ }
- if (running_bits != 0) {
- var int_payload: Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = running_bits,
- };
- const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
- const int_align = int_ty.abiAlignment(target);
- big_align = @maximum(big_align, int_align);
- offset = std.mem.alignForwardGeneric(u64, offset, int_align);
- offset += int_ty.abiSize(target);
- running_bits = 0;
- }
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- if (i == index) return offset;
- offset += field.ty.abiSize(target);
- }
+ var it = ty.iteratePackedStructOffsets(target);
+ while (it.next()) |field_offset| {
+ if (index == field_offset.field)
+ return field_offset.offset;
}
- if (running_bits != 0) {
+
+ if (it.running_bits != 0) {
var int_payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
- .data = running_bits,
+ .data = it.running_bits,
};
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
const int_align = int_ty.abiAlignment(target);
- big_align = @maximum(big_align, int_align);
- offset = std.mem.alignForwardGeneric(u64, offset, int_align);
- offset += int_ty.abiSize(target);
+ it.big_align = @maximum(it.big_align, int_align);
+ it.offset = std.mem.alignForwardGeneric(u64, it.offset, int_align);
+ it.offset += int_ty.abiSize(target);
}
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- return offset;
+ it.offset = std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
+ return it.offset;
},
.@"union" => return 0,
.union_tagged => {
diff --git a/src/value.zig b/src/value.zig
index 9da9fa6307..c4d35ad006 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -2635,9 +2635,17 @@ pub const Value = extern union {
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const shift = @intCast(usize, rhs.toUnsignedInt());
+
+ const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
+ if (result_limbs == 0) {
+ // The shift is enough to remove all the bits from the number, which means the
+ // result is zero.
+ return Value.zero;
+ }
+
const limbs = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len - (shift / (@sizeOf(std.math.big.Limb) * 8)),
+ result_limbs,
);
var result_bigint = BigIntMutable{
.limbs = limbs,
diff --git a/test/behavior.zig b/test/behavior.zig
index 0c4f2debb4..0b785a4bbd 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -8,203 +8,209 @@ test {
_ = @import("behavior/bugs/679.zig");
_ = @import("behavior/bugs/6850.zig");
_ = @import("behavior/fn_in_struct_in_comptime.zig");
- _ = @import("behavior/hasfield.zig");
_ = @import("behavior/hasdecl.zig");
+ _ = @import("behavior/hasfield.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/type_info.zig");
_ = @import("behavior/type.zig");
- if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
- // Tests that pass for stage1, llvm backend, C backend, wasm backend.
- _ = @import("behavior/bugs/3586.zig");
- _ = @import("behavior/basic.zig");
- _ = @import("behavior/bitcast.zig");
+ if (builtin.zig_backend != .stage2_x86_64) {
+ // Tests that pass for stage1, llvm backend, C backend, wasm backend, and arm backend.
_ = @import("behavior/bool.zig");
- _ = @import("behavior/bugs/624.zig");
- _ = @import("behavior/bugs/655.zig");
- _ = @import("behavior/bugs/704.zig");
- _ = @import("behavior/bugs/1486.zig");
- _ = @import("behavior/bugs/2692.zig");
- _ = @import("behavior/bugs/2889.zig");
- _ = @import("behavior/bugs/3046.zig");
- _ = @import("behavior/bugs/4769_a.zig");
- _ = @import("behavior/bugs/4769_b.zig");
- _ = @import("behavior/bugs/4954.zig");
- _ = @import("behavior/byval_arg_var.zig");
- _ = @import("behavior/call.zig");
- _ = @import("behavior/defer.zig");
- _ = @import("behavior/enum.zig");
- _ = @import("behavior/error.zig");
- _ = @import("behavior/generics.zig");
- _ = @import("behavior/if.zig");
- _ = @import("behavior/import.zig");
- _ = @import("behavior/incomplete_struct_param_tld.zig");
- _ = @import("behavior/inttoptr.zig");
- _ = @import("behavior/member_func.zig");
- _ = @import("behavior/null.zig");
- _ = @import("behavior/pointers.zig");
- _ = @import("behavior/ptrcast.zig");
- _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
- _ = @import("behavior/struct.zig");
- _ = @import("behavior/this.zig");
- _ = @import("behavior/truncate.zig");
- _ = @import("behavior/underscore.zig");
- _ = @import("behavior/usingnamespace.zig");
- _ = @import("behavior/void.zig");
- _ = @import("behavior/while.zig");
- if (builtin.zig_backend != .stage2_wasm) {
- // Tests that pass for stage1, llvm backend, C backend
+ if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
+ // Tests that pass for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/align.zig");
_ = @import("behavior/array.zig");
+ _ = @import("behavior/basic.zig");
+ _ = @import("behavior/bitcast.zig");
+ _ = @import("behavior/bugs/624.zig");
+ _ = @import("behavior/bugs/655.zig");
+ _ = @import("behavior/bugs/704.zig");
+ _ = @import("behavior/bugs/1486.zig");
+ _ = @import("behavior/bugs/2692.zig");
+ _ = @import("behavior/bugs/2889.zig");
+ _ = @import("behavior/bugs/3046.zig");
+ _ = @import("behavior/bugs/3586.zig");
_ = @import("behavior/bugs/4560.zig");
+ _ = @import("behavior/bugs/4769_a.zig");
+ _ = @import("behavior/bugs/4769_b.zig");
+ _ = @import("behavior/bugs/4954.zig");
+ _ = @import("behavior/byval_arg_var.zig");
+ _ = @import("behavior/call.zig");
_ = @import("behavior/cast.zig");
+ _ = @import("behavior/defer.zig");
+ _ = @import("behavior/enum.zig");
+ _ = @import("behavior/error.zig");
_ = @import("behavior/for.zig");
- _ = @import("behavior/int128.zig");
+ _ = @import("behavior/generics.zig");
+ _ = @import("behavior/if.zig");
+ _ = @import("behavior/import.zig");
+ _ = @import("behavior/incomplete_struct_param_tld.zig");
+ _ = @import("behavior/inttoptr.zig");
+ _ = @import("behavior/member_func.zig");
+ _ = @import("behavior/null.zig");
_ = @import("behavior/optional.zig");
- _ = @import("behavior/translate_c_macros.zig");
+ _ = @import("behavior/pointers.zig");
+ _ = @import("behavior/ptrcast.zig");
+ _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
+ _ = @import("behavior/src.zig");
+ _ = @import("behavior/struct.zig");
+ _ = @import("behavior/this.zig");
+ _ = @import("behavior/truncate.zig");
+ _ = @import("behavior/try.zig");
+ _ = @import("behavior/undefined.zig");
+ _ = @import("behavior/underscore.zig");
+ _ = @import("behavior/usingnamespace.zig");
+ _ = @import("behavior/void.zig");
+ _ = @import("behavior/while.zig");
- if (builtin.zig_backend != .stage2_c) {
- // Tests that pass for stage1 and the llvm backend.
- _ = @import("behavior/align_llvm.zig");
- _ = @import("behavior/alignof.zig");
- _ = @import("behavior/array_llvm.zig");
- _ = @import("behavior/atomics.zig");
- _ = @import("behavior/basic_llvm.zig");
- _ = @import("behavior/bugs/394.zig");
- _ = @import("behavior/bugs/656.zig");
- _ = @import("behavior/bugs/1277.zig");
- _ = @import("behavior/bugs/1310.zig");
- _ = @import("behavior/bugs/1381.zig");
- _ = @import("behavior/bugs/1500.zig");
- _ = @import("behavior/bugs/1741.zig");
- _ = @import("behavior/bugs/2006.zig");
- _ = @import("behavior/bugs/2578.zig");
- _ = @import("behavior/bugs/3007.zig");
- _ = @import("behavior/bugs/3112.zig");
- _ = @import("behavior/bugs/3367.zig");
- _ = @import("behavior/bugs/7250.zig");
- _ = @import("behavior/bugs/9584.zig");
- _ = @import("behavior/cast_llvm.zig");
- _ = @import("behavior/enum_llvm.zig");
- _ = @import("behavior/eval.zig");
- _ = @import("behavior/floatop.zig");
- _ = @import("behavior/fn.zig");
- _ = @import("behavior/generics_llvm.zig");
- _ = @import("behavior/math.zig");
- _ = @import("behavior/maximum_minimum.zig");
- _ = @import("behavior/namespace_depends_on_compile_var.zig");
- _ = @import("behavior/null_llvm.zig");
- _ = @import("behavior/optional_llvm.zig");
- _ = @import("behavior/popcount.zig");
- _ = @import("behavior/saturating_arithmetic.zig");
- _ = @import("behavior/sizeof_and_typeof.zig");
- _ = @import("behavior/slice.zig");
- _ = @import("behavior/struct_llvm.zig");
- _ = @import("behavior/switch.zig");
- _ = @import("behavior/undefined.zig");
- _ = @import("behavior/union.zig");
- _ = @import("behavior/widening.zig");
+ if (builtin.zig_backend != .stage2_wasm) {
+ // Tests that pass for stage1, llvm backend, C backend
+ _ = @import("behavior/cast_int.zig");
+ _ = @import("behavior/int128.zig");
+ _ = @import("behavior/translate_c_macros.zig");
- if (builtin.zig_backend != .stage1) {
- // When all comptime_memory.zig tests pass, #9646 can be closed.
- // _ = @import("behavior/comptime_memory.zig");
- _ = @import("behavior/slice_stage2.zig");
- } else {
- // Tests that only pass for the stage1 backend.
- _ = @import("behavior/align_stage1.zig");
- _ = @import("behavior/array_stage1.zig");
- if (builtin.os.tag != .wasi) {
- _ = @import("behavior/asm.zig");
- _ = @import("behavior/async_fn.zig");
- }
- _ = @import("behavior/await_struct.zig");
+ if (builtin.zig_backend != .stage2_c) {
+ // Tests that pass for stage1 and the llvm backend.
+ _ = @import("behavior/align_llvm.zig");
+ _ = @import("behavior/alignof.zig");
+ _ = @import("behavior/array_llvm.zig");
+ _ = @import("behavior/atomics.zig");
+ _ = @import("behavior/basic_llvm.zig");
_ = @import("behavior/bit_shifting.zig");
- _ = @import("behavior/bitcast_stage1.zig");
- _ = @import("behavior/bitreverse.zig");
- _ = @import("behavior/bugs/421.zig");
- _ = @import("behavior/bugs/529.zig");
- _ = @import("behavior/bugs/718.zig");
- _ = @import("behavior/bugs/726.zig");
- _ = @import("behavior/bugs/828.zig");
- _ = @import("behavior/bugs/920.zig");
- _ = @import("behavior/bugs/1025.zig");
- _ = @import("behavior/bugs/1076.zig");
- _ = @import("behavior/bugs/1120.zig");
- _ = @import("behavior/bugs/1421.zig");
- _ = @import("behavior/bugs/1442.zig");
- _ = @import("behavior/bugs/1607.zig");
+ _ = @import("behavior/bugs/394.zig");
+ _ = @import("behavior/bugs/656.zig");
+ _ = @import("behavior/bugs/1277.zig");
+ _ = @import("behavior/bugs/1310.zig");
+ _ = @import("behavior/bugs/1381.zig");
+ _ = @import("behavior/bugs/1500.zig");
_ = @import("behavior/bugs/1735.zig");
- _ = @import("behavior/bugs/1851.zig");
- _ = @import("behavior/bugs/1914.zig");
- _ = @import("behavior/bugs/2114.zig");
- _ = @import("behavior/bugs/3384.zig");
- _ = @import("behavior/bugs/3742.zig");
- _ = @import("behavior/bugs/3779.zig");
- _ = @import("behavior/bugs/4328.zig");
- _ = @import("behavior/bugs/5398.zig");
- _ = @import("behavior/bugs/5413.zig");
- _ = @import("behavior/bugs/5474.zig");
- _ = @import("behavior/bugs/5487.zig");
- _ = @import("behavior/bugs/6456.zig");
- _ = @import("behavior/bugs/6781.zig");
- _ = @import("behavior/bugs/7003.zig");
- _ = @import("behavior/bugs/7027.zig");
- _ = @import("behavior/bugs/7047.zig");
- _ = @import("behavior/bugs/10147.zig");
- _ = @import("behavior/byteswap.zig");
- _ = @import("behavior/call_stage1.zig");
- _ = @import("behavior/cast_stage1.zig");
- _ = @import("behavior/const_slice_child.zig");
- _ = @import("behavior/defer_stage1.zig");
- _ = @import("behavior/enum_stage1.zig");
- _ = @import("behavior/error_stage1.zig");
- _ = @import("behavior/eval_stage1.zig");
- _ = @import("behavior/field_parent_ptr.zig");
- _ = @import("behavior/floatop_stage1.zig");
- _ = @import("behavior/fn_stage1.zig");
- _ = @import("behavior/fn_delegation.zig");
- _ = @import("behavior/for_stage1.zig");
- _ = @import("behavior/if_stage1.zig");
- _ = @import("behavior/ir_block_deps.zig");
- _ = @import("behavior/math_stage1.zig");
+ _ = @import("behavior/bugs/1741.zig");
+ _ = @import("behavior/bugs/2006.zig");
+ _ = @import("behavior/bugs/2578.zig");
+ _ = @import("behavior/bugs/3007.zig");
+ _ = @import("behavior/bugs/3112.zig");
+ _ = @import("behavior/bugs/3367.zig");
+ _ = @import("behavior/bugs/7250.zig");
+ _ = @import("behavior/bugs/9584.zig");
+ _ = @import("behavior/cast_llvm.zig");
+ _ = @import("behavior/enum_llvm.zig");
+ _ = @import("behavior/error_llvm.zig");
+ _ = @import("behavior/eval.zig");
+ _ = @import("behavior/floatop.zig");
+ _ = @import("behavior/fn.zig");
+ _ = @import("behavior/generics_llvm.zig");
+ _ = @import("behavior/math.zig");
+ _ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/merge_error_sets.zig");
- _ = @import("behavior/misc.zig");
- _ = @import("behavior/muladd.zig");
- _ = @import("behavior/null_stage1.zig");
- _ = @import("behavior/optional_stage1.zig");
- _ = @import("behavior/pointers_stage1.zig");
- _ = @import("behavior/popcount_stage1.zig");
- _ = @import("behavior/prefetch.zig");
- _ = @import("behavior/ptrcast_stage1.zig");
- _ = @import("behavior/reflection.zig");
- _ = @import("behavior/saturating_arithmetic_stage1.zig");
- _ = @import("behavior/select.zig");
- _ = @import("behavior/shuffle.zig");
- _ = @import("behavior/sizeof_and_typeof_stage1.zig");
- _ = @import("behavior/slice_stage1.zig");
- _ = @import("behavior/struct_contains_null_ptr_itself.zig");
- _ = @import("behavior/struct_contains_slice_of_itself.zig");
- _ = @import("behavior/struct_stage1.zig");
- _ = @import("behavior/switch_prong_err_enum.zig");
- _ = @import("behavior/switch_prong_implicit_cast.zig");
- _ = @import("behavior/switch_stage1.zig");
- _ = @import("behavior/truncate_stage1.zig");
- _ = @import("behavior/try.zig");
- _ = @import("behavior/tuple.zig");
- _ = @import("behavior/type_stage1.zig");
- _ = @import("behavior/type_info_stage1.zig");
- _ = @import("behavior/typename.zig");
- _ = @import("behavior/union_stage1.zig");
- _ = @import("behavior/union_with_members.zig");
- _ = @import("behavior/var_args.zig");
- _ = @import("behavior/vector.zig");
- if (builtin.target.cpu.arch == .wasm32) {
- _ = @import("behavior/wasm.zig");
+ _ = @import("behavior/namespace_depends_on_compile_var.zig");
+ _ = @import("behavior/null_llvm.zig");
+ _ = @import("behavior/optional_llvm.zig");
+ _ = @import("behavior/popcount.zig");
+ _ = @import("behavior/saturating_arithmetic.zig");
+ _ = @import("behavior/sizeof_and_typeof.zig");
+ _ = @import("behavior/slice.zig");
+ _ = @import("behavior/struct_llvm.zig");
+ _ = @import("behavior/switch.zig");
+ _ = @import("behavior/union.zig");
+ _ = @import("behavior/widening.zig");
+
+ if (builtin.zig_backend != .stage1) {
+ // When all comptime_memory.zig tests pass, #9646 can be closed.
+ // _ = @import("behavior/comptime_memory.zig");
+ _ = @import("behavior/slice_stage2.zig");
+ } else {
+ // Tests that only pass for the stage1 backend.
+ _ = @import("behavior/align_stage1.zig");
+ _ = @import("behavior/array_stage1.zig");
+ if (builtin.os.tag != .wasi) {
+ _ = @import("behavior/asm.zig");
+ _ = @import("behavior/async_fn.zig");
+ }
+ _ = @import("behavior/await_struct.zig");
+ _ = @import("behavior/bitcast_stage1.zig");
+ _ = @import("behavior/bitreverse.zig");
+ _ = @import("behavior/bugs/421.zig");
+ _ = @import("behavior/bugs/529.zig");
+ _ = @import("behavior/bugs/718.zig");
+ _ = @import("behavior/bugs/726.zig");
+ _ = @import("behavior/bugs/828.zig");
+ _ = @import("behavior/bugs/920.zig");
+ _ = @import("behavior/bugs/1025.zig");
+ _ = @import("behavior/bugs/1076.zig");
+ _ = @import("behavior/bugs/1120.zig");
+ _ = @import("behavior/bugs/1421.zig");
+ _ = @import("behavior/bugs/1442.zig");
+ _ = @import("behavior/bugs/1607.zig");
+ _ = @import("behavior/bugs/1851.zig");
+ _ = @import("behavior/bugs/1914.zig");
+ _ = @import("behavior/bugs/2114.zig");
+ _ = @import("behavior/bugs/3384.zig");
+ _ = @import("behavior/bugs/3742.zig");
+ _ = @import("behavior/bugs/3779.zig");
+ _ = @import("behavior/bugs/4328.zig");
+ _ = @import("behavior/bugs/5398.zig");
+ _ = @import("behavior/bugs/5413.zig");
+ _ = @import("behavior/bugs/5474.zig");
+ _ = @import("behavior/bugs/5487.zig");
+ _ = @import("behavior/bugs/6456.zig");
+ _ = @import("behavior/bugs/6781.zig");
+ _ = @import("behavior/bugs/7003.zig");
+ _ = @import("behavior/bugs/7027.zig");
+ _ = @import("behavior/bugs/7047.zig");
+ _ = @import("behavior/bugs/10147.zig");
+ _ = @import("behavior/byteswap.zig");
+ _ = @import("behavior/call_stage1.zig");
+ _ = @import("behavior/cast_stage1.zig");
+ _ = @import("behavior/const_slice_child.zig");
+ _ = @import("behavior/defer_stage1.zig");
+ _ = @import("behavior/enum_stage1.zig");
+ _ = @import("behavior/error_stage1.zig");
+ _ = @import("behavior/eval_stage1.zig");
+ _ = @import("behavior/field_parent_ptr.zig");
+ _ = @import("behavior/floatop_stage1.zig");
+ _ = @import("behavior/fn_stage1.zig");
+ _ = @import("behavior/fn_delegation.zig");
+ _ = @import("behavior/for_stage1.zig");
+ _ = @import("behavior/if_stage1.zig");
+ _ = @import("behavior/ir_block_deps.zig");
+ _ = @import("behavior/math_stage1.zig");
+ _ = @import("behavior/misc.zig");
+ _ = @import("behavior/muladd.zig");
+ _ = @import("behavior/null_stage1.zig");
+ _ = @import("behavior/optional_stage1.zig");
+ _ = @import("behavior/pointers_stage1.zig");
+ _ = @import("behavior/popcount_stage1.zig");
+ _ = @import("behavior/prefetch.zig");
+ _ = @import("behavior/ptrcast_stage1.zig");
+ _ = @import("behavior/reflection.zig");
+ _ = @import("behavior/saturating_arithmetic_stage1.zig");
+ _ = @import("behavior/select.zig");
+ _ = @import("behavior/shuffle.zig");
+ _ = @import("behavior/sizeof_and_typeof_stage1.zig");
+ _ = @import("behavior/slice_stage1.zig");
+ _ = @import("behavior/struct_contains_null_ptr_itself.zig");
+ _ = @import("behavior/struct_contains_slice_of_itself.zig");
+ _ = @import("behavior/struct_stage1.zig");
+ _ = @import("behavior/switch_prong_err_enum.zig");
+ _ = @import("behavior/switch_prong_implicit_cast.zig");
+ _ = @import("behavior/switch_stage1.zig");
+ _ = @import("behavior/truncate_stage1.zig");
+ _ = @import("behavior/tuple.zig");
+ _ = @import("behavior/type_stage1.zig");
+ _ = @import("behavior/type_info_stage1.zig");
+ _ = @import("behavior/typename.zig");
+ _ = @import("behavior/union_stage1.zig");
+ _ = @import("behavior/union_with_members.zig");
+ _ = @import("behavior/var_args.zig");
+ _ = @import("behavior/vector.zig");
+ if (builtin.target.cpu.arch == .wasm32) {
+ _ = @import("behavior/wasm.zig");
+ }
+ _ = @import("behavior/while_stage1.zig");
+ _ = @import("behavior/translate_c_macros_stage1.zig");
}
- _ = @import("behavior/while_stage1.zig");
- _ = @import("behavior/src.zig");
- _ = @import("behavior/translate_c_macros_stage1.zig");
}
}
}
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index c1f84a99d4..0ac0ab965e 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -72,6 +72,7 @@ test "sharded table" {
try testShardedTable(u0, 0, 1);
}
+
fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime node_count: comptime_int) !void {
const Table = ShardedTable(Key, mask_bit_count, void);
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index d1edefeb2d..c2eb49f854 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const expect = std.testing.expect;
const mem = std.mem;
const maxInt = std.math.maxInt;
+const builtin = @import("builtin");
test "int to ptr cast" {
const x = @as(usize, 13);
@@ -42,13 +43,6 @@ fn testResolveUndefWithInt(b: bool, x: i32) !void {
}
}
-test "@intCast i32 to u7" {
- var x: u128 = maxInt(u128);
- var y: i32 = 120;
- var z = x >> @intCast(u7, y);
- try expect(z == 0xff);
-}
-
test "@intCast to comptime_int" {
try expect(@intCast(comptime_int, 0) == 0);
}
@@ -252,10 +246,7 @@ test "array coersion to undefined at runtime" {
@setRuntimeSafety(true);
// TODO implement @setRuntimeSafety in stage2
- if (@import("builtin").zig_is_stage2 and
- @import("builtin").mode != .Debug and
- @import("builtin").mode != .ReleaseSafe)
- {
+ if (builtin.zig_is_stage2 and builtin.mode != .Debug and builtin.mode != .ReleaseSafe) {
return error.SkipZigTest;
}
diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig
new file mode 100644
index 0000000000..c82c3ba3c0
--- /dev/null
+++ b/test/behavior/cast_int.zig
@@ -0,0 +1,10 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const maxInt = std.math.maxInt;
+
+test "@intCast i32 to u7" {
+ var x: u128 = maxInt(u128);
+ var y: i32 = 120;
+ var z = x >> @intCast(u7, y);
+ try expect(z == 0xff);
+}
diff --git a/test/behavior/enum_stage1.zig b/test/behavior/enum_stage1.zig
index c9658918f8..f8d9d0a1b2 100644
--- a/test/behavior/enum_stage1.zig
+++ b/test/behavior/enum_stage1.zig
@@ -1,6 +1,7 @@
-const expect = @import("std").testing.expect;
-const mem = @import("std").mem;
-const Tag = @import("std").meta.Tag;
+const std = @import("std");
+const expect = std.testing.expect;
+const mem = std.mem;
+const Tag = std.meta.Tag;
test "enum value allocation" {
const LargeEnum = enum(u32) {
diff --git a/test/behavior/error_llvm.zig b/test/behavior/error_llvm.zig
new file mode 100644
index 0000000000..edebd5f629
--- /dev/null
+++ b/test/behavior/error_llvm.zig
@@ -0,0 +1,24 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const mem = std.mem;
+
+fn gimmeItBroke() anyerror {
+ return error.ItBroke;
+}
+
+test "@errorName" {
+ try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
+ try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
+ try expect(mem.eql(u8, @errorName(gimmeItBroke()), "ItBroke"));
+}
+
+test "@errorName sentinel length matches slice length" {
+ const name = testBuiltinErrorName(error.FooBar);
+ const length: usize = 6;
+ try expect(length == std.mem.indexOfSentinel(u8, 0, name.ptr));
+ try expect(length == name.len);
+}
+
+pub fn testBuiltinErrorName(err: anyerror) [:0]const u8 {
+ return @errorName(err);
+}
diff --git a/test/behavior/error_stage1.zig b/test/behavior/error_stage1.zig
index 5980b23156..2e4a6facf0 100644
--- a/test/behavior/error_stage1.zig
+++ b/test/behavior/error_stage1.zig
@@ -4,27 +4,6 @@ const expectError = std.testing.expectError;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
-fn gimmeItBroke() anyerror {
- return error.ItBroke;
-}
-
-test "@errorName" {
- try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
- try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
- try expect(mem.eql(u8, @errorName(gimmeItBroke()), "ItBroke"));
-}
-
-test "@errorName sentinel length matches slice length" {
- const name = testBuiltinErrorName(error.FooBar);
- const length: usize = 6;
- try expectEqual(length, std.mem.indexOfSentinel(u8, 0, name.ptr));
- try expectEqual(length, name.len);
-}
-
-pub fn testBuiltinErrorName(err: anyerror) [:0]const u8 {
- return @errorName(err);
-}
-
test "error union type " {
try testErrorUnionType();
comptime try testErrorUnionType();
diff --git a/test/behavior/import.zig b/test/behavior/import.zig
index e6c3d81111..c36aea7d2a 100644
--- a/test/behavior/import.zig
+++ b/test/behavior/import.zig
@@ -1,5 +1,6 @@
-const expect = @import("std").testing.expect;
-const expectEqual = @import("std").testing.expectEqual;
+const std = @import("std");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
const a_namespace = @import("import/a_namespace.zig");
test "call fn via namespace lookup" {
diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig
index 12367b2e9c..6bf0eca11e 100644
--- a/test/behavior/int128.zig
+++ b/test/behavior/int128.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const expect = std.testing.expect;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
+const builtin = @import("builtin");
test "uint128" {
var buff: u128 = maxInt(u128);
@@ -21,10 +22,7 @@ test "undefined 128 bit int" {
@setRuntimeSafety(true);
// TODO implement @setRuntimeSafety in stage2
- if (@import("builtin").zig_is_stage2 and
- @import("builtin").mode != .Debug and
- @import("builtin").mode != .ReleaseSafe)
- {
+ if (builtin.zig_is_stage2 and builtin.mode != .Debug and builtin.mode != .ReleaseSafe) {
return error.SkipZigTest;
}
diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
index 488bc1a232..df116c0565 100644
--- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
+++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
@@ -1,5 +1,6 @@
-const expect = @import("std").testing.expect;
-const mem = @import("std").mem;
+const std = @import("std");
+const expect = std.testing.expect;
+const mem = std.mem;
var ok: bool = false;
test "reference a variable in an if after an if in the 2nd switch prong" {
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index e8596c2844..18ee9d5c8b 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -1,5 +1,6 @@
-const expect = @import("std").testing.expect;
-const mem = @import("std").mem;
+const std = @import("std");
+const expect = std.testing.expect;
+const mem = std.mem;
const reflection = @This();
test "reflection: function return type, var args, and param types" {
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index a0fe4855cb..2dbd4b3495 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -47,3 +47,116 @@ fn fn1(alpha: bool) void {
test "lazy @sizeOf result is checked for definedness" {
_ = fn1;
}
+
+const A = struct {
+ a: u8,
+ b: u32,
+ c: u8,
+ d: u3,
+ e: u5,
+ f: u16,
+ g: u16,
+ h: u9,
+ i: u7,
+};
+
+const P = packed struct {
+ a: u8,
+ b: u32,
+ c: u8,
+ d: u3,
+ e: u5,
+ f: u16,
+ g: u16,
+ h: u9,
+ i: u7,
+};
+
+test "@offsetOf" {
+
+ // Packed structs have fixed memory layout
+ try expect(@offsetOf(P, "a") == 0);
+ try expect(@offsetOf(P, "b") == 1);
+ try expect(@offsetOf(P, "c") == 5);
+ try expect(@offsetOf(P, "d") == 6);
+ try expect(@offsetOf(P, "e") == 6);
+ try expect(@offsetOf(P, "f") == 7);
+ try expect(@offsetOf(P, "g") == 9);
+ try expect(@offsetOf(P, "h") == 11);
+ try expect(@offsetOf(P, "i") == 12);
+
+ // // Normal struct fields can be moved/padded
+ var a: A = undefined;
+ try expect(@ptrToInt(&a.a) - @ptrToInt(&a) == @offsetOf(A, "a"));
+ try expect(@ptrToInt(&a.b) - @ptrToInt(&a) == @offsetOf(A, "b"));
+ try expect(@ptrToInt(&a.c) - @ptrToInt(&a) == @offsetOf(A, "c"));
+ try expect(@ptrToInt(&a.d) - @ptrToInt(&a) == @offsetOf(A, "d"));
+ try expect(@ptrToInt(&a.e) - @ptrToInt(&a) == @offsetOf(A, "e"));
+ try expect(@ptrToInt(&a.f) - @ptrToInt(&a) == @offsetOf(A, "f"));
+ try expect(@ptrToInt(&a.g) - @ptrToInt(&a) == @offsetOf(A, "g"));
+ try expect(@ptrToInt(&a.h) - @ptrToInt(&a) == @offsetOf(A, "h"));
+ try expect(@ptrToInt(&a.i) - @ptrToInt(&a) == @offsetOf(A, "i"));
+}
+
+test "@offsetOf packed struct, array length not power of 2 or multiple of native pointer width in bytes" {
+ const p3a_len = 3;
+ const P3 = packed struct {
+ a: [p3a_len]u8,
+ b: usize,
+ };
+ try std.testing.expect(0 == @offsetOf(P3, "a"));
+ try std.testing.expect(p3a_len == @offsetOf(P3, "b"));
+
+ const p5a_len = 5;
+ const P5 = packed struct {
+ a: [p5a_len]u8,
+ b: usize,
+ };
+ try std.testing.expect(0 == @offsetOf(P5, "a"));
+ try std.testing.expect(p5a_len == @offsetOf(P5, "b"));
+
+ const p6a_len = 6;
+ const P6 = packed struct {
+ a: [p6a_len]u8,
+ b: usize,
+ };
+ try std.testing.expect(0 == @offsetOf(P6, "a"));
+ try std.testing.expect(p6a_len == @offsetOf(P6, "b"));
+
+ const p7a_len = 7;
+ const P7 = packed struct {
+ a: [p7a_len]u8,
+ b: usize,
+ };
+ try std.testing.expect(0 == @offsetOf(P7, "a"));
+ try std.testing.expect(p7a_len == @offsetOf(P7, "b"));
+
+ const p9a_len = 9;
+ const P9 = packed struct {
+ a: [p9a_len]u8,
+ b: usize,
+ };
+ try std.testing.expect(0 == @offsetOf(P9, "a"));
+ try std.testing.expect(p9a_len == @offsetOf(P9, "b"));
+
+ // 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 25 etc. are further cases
+}
+
+test "@bitOffsetOf" {
+ // Packed structs have fixed memory layout
+ try expect(@bitOffsetOf(P, "a") == 0);
+ try expect(@bitOffsetOf(P, "b") == 8);
+ try expect(@bitOffsetOf(P, "c") == 40);
+ try expect(@bitOffsetOf(P, "d") == 48);
+ try expect(@bitOffsetOf(P, "e") == 51);
+ try expect(@bitOffsetOf(P, "f") == 56);
+ try expect(@bitOffsetOf(P, "g") == 72);
+
+ try expect(@offsetOf(A, "a") * 8 == @bitOffsetOf(A, "a"));
+ try expect(@offsetOf(A, "b") * 8 == @bitOffsetOf(A, "b"));
+ try expect(@offsetOf(A, "c") * 8 == @bitOffsetOf(A, "c"));
+ try expect(@offsetOf(A, "d") * 8 == @bitOffsetOf(A, "d"));
+ try expect(@offsetOf(A, "e") * 8 == @bitOffsetOf(A, "e"));
+ try expect(@offsetOf(A, "f") * 8 == @bitOffsetOf(A, "f"));
+ try expect(@offsetOf(A, "g") * 8 == @bitOffsetOf(A, "g"));
+}
diff --git a/test/behavior/sizeof_and_typeof_stage1.zig b/test/behavior/sizeof_and_typeof_stage1.zig
index 429b530a93..20cefef0e7 100644
--- a/test/behavior/sizeof_and_typeof_stage1.zig
+++ b/test/behavior/sizeof_and_typeof_stage1.zig
@@ -2,118 +2,6 @@ const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
-const A = struct {
- a: u8,
- b: u32,
- c: u8,
- d: u3,
- e: u5,
- f: u16,
- g: u16,
- h: u9,
- i: u7,
-};
-
-const P = packed struct {
- a: u8,
- b: u32,
- c: u8,
- d: u3,
- e: u5,
- f: u16,
- g: u16,
- h: u9,
- i: u7,
-};
-
-test "@offsetOf" {
- // Packed structs have fixed memory layout
- try expect(@offsetOf(P, "a") == 0);
- try expect(@offsetOf(P, "b") == 1);
- try expect(@offsetOf(P, "c") == 5);
- try expect(@offsetOf(P, "d") == 6);
- try expect(@offsetOf(P, "e") == 6);
- try expect(@offsetOf(P, "f") == 7);
- try expect(@offsetOf(P, "g") == 9);
- try expect(@offsetOf(P, "h") == 11);
- try expect(@offsetOf(P, "i") == 12);
-
- // Normal struct fields can be moved/padded
- var a: A = undefined;
- try expect(@ptrToInt(&a.a) - @ptrToInt(&a) == @offsetOf(A, "a"));
- try expect(@ptrToInt(&a.b) - @ptrToInt(&a) == @offsetOf(A, "b"));
- try expect(@ptrToInt(&a.c) - @ptrToInt(&a) == @offsetOf(A, "c"));
- try expect(@ptrToInt(&a.d) - @ptrToInt(&a) == @offsetOf(A, "d"));
- try expect(@ptrToInt(&a.e) - @ptrToInt(&a) == @offsetOf(A, "e"));
- try expect(@ptrToInt(&a.f) - @ptrToInt(&a) == @offsetOf(A, "f"));
- try expect(@ptrToInt(&a.g) - @ptrToInt(&a) == @offsetOf(A, "g"));
- try expect(@ptrToInt(&a.h) - @ptrToInt(&a) == @offsetOf(A, "h"));
- try expect(@ptrToInt(&a.i) - @ptrToInt(&a) == @offsetOf(A, "i"));
-}
-
-test "@offsetOf packed struct, array length not power of 2 or multiple of native pointer width in bytes" {
- const p3a_len = 3;
- const P3 = packed struct {
- a: [p3a_len]u8,
- b: usize,
- };
- try std.testing.expectEqual(0, @offsetOf(P3, "a"));
- try std.testing.expectEqual(p3a_len, @offsetOf(P3, "b"));
-
- const p5a_len = 5;
- const P5 = packed struct {
- a: [p5a_len]u8,
- b: usize,
- };
- try std.testing.expectEqual(0, @offsetOf(P5, "a"));
- try std.testing.expectEqual(p5a_len, @offsetOf(P5, "b"));
-
- const p6a_len = 6;
- const P6 = packed struct {
- a: [p6a_len]u8,
- b: usize,
- };
- try std.testing.expectEqual(0, @offsetOf(P6, "a"));
- try std.testing.expectEqual(p6a_len, @offsetOf(P6, "b"));
-
- const p7a_len = 7;
- const P7 = packed struct {
- a: [p7a_len]u8,
- b: usize,
- };
- try std.testing.expectEqual(0, @offsetOf(P7, "a"));
- try std.testing.expectEqual(p7a_len, @offsetOf(P7, "b"));
-
- const p9a_len = 9;
- const P9 = packed struct {
- a: [p9a_len]u8,
- b: usize,
- };
- try std.testing.expectEqual(0, @offsetOf(P9, "a"));
- try std.testing.expectEqual(p9a_len, @offsetOf(P9, "b"));
-
- // 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 25 etc. are further cases
-}
-
-test "@bitOffsetOf" {
- // Packed structs have fixed memory layout
- try expect(@bitOffsetOf(P, "a") == 0);
- try expect(@bitOffsetOf(P, "b") == 8);
- try expect(@bitOffsetOf(P, "c") == 40);
- try expect(@bitOffsetOf(P, "d") == 48);
- try expect(@bitOffsetOf(P, "e") == 51);
- try expect(@bitOffsetOf(P, "f") == 56);
- try expect(@bitOffsetOf(P, "g") == 72);
-
- try expect(@offsetOf(A, "a") * 8 == @bitOffsetOf(A, "a"));
- try expect(@offsetOf(A, "b") * 8 == @bitOffsetOf(A, "b"));
- try expect(@offsetOf(A, "c") * 8 == @bitOffsetOf(A, "c"));
- try expect(@offsetOf(A, "d") * 8 == @bitOffsetOf(A, "d"));
- try expect(@offsetOf(A, "e") * 8 == @bitOffsetOf(A, "e"));
- try expect(@offsetOf(A, "f") * 8 == @bitOffsetOf(A, "f"));
- try expect(@offsetOf(A, "g") * 8 == @bitOffsetOf(A, "g"));
-}
-
test "@sizeOf(T) == 0 doesn't force resolving struct size" {
const S = struct {
const Foo = struct {
diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig
index 0baaf24283..0e4841020e 100644
--- a/test/behavior/translate_c_macros.zig
+++ b/test/behavior/translate_c_macros.zig
@@ -1,5 +1,6 @@
-const expect = @import("std").testing.expect;
-const expectEqual = @import("std").testing.expectEqual;
+const std = @import("std");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
const h = @cImport(@cInclude("behavior/translate_c_macros.h"));
diff --git a/test/behavior/translate_c_macros_stage1.zig b/test/behavior/translate_c_macros_stage1.zig
index 40fbbf5263..c380508a37 100644
--- a/test/behavior/translate_c_macros_stage1.zig
+++ b/test/behavior/translate_c_macros_stage1.zig
@@ -1,5 +1,6 @@
-const expect = @import("std").testing.expect;
-const expectEqual = @import("std").testing.expectEqual;
+const std = @import("std");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
const h = @cImport(@cInclude("behavior/translate_c_macros.h"));
diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig
index 3074e96e61..afca1ae143 100644
--- a/test/behavior/widening.zig
+++ b/test/behavior/widening.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const expect = std.testing.expect;
const mem = std.mem;
+const builtin = @import("builtin");
test "integer widening" {
var a: u8 = 250;
@@ -30,8 +31,8 @@ test "float widening" {
test "float widening f16 to f128" {
// TODO https://github.com/ziglang/zig/issues/3282
- if (@import("builtin").cpu.arch == .aarch64) return error.SkipZigTest;
- if (@import("builtin").cpu.arch == .powerpc64le) return error.SkipZigTest;
+ if (builtin.cpu.arch == .aarch64) return error.SkipZigTest;
+ if (builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
var x: f16 = 12.34;
var y: f128 = x;
diff --git a/test/stage2/x86_64.zig b/test/stage2/x86_64.zig
index 878964e8a2..b0fe97e8f9 100644
--- a/test/stage2/x86_64.zig
+++ b/test/stage2/x86_64.zig
@@ -1761,6 +1761,25 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+
+ {
+ var case = ctx.exe("access slice element by index - slice_elem_val", target);
+ case.addCompareOutput(
+ \\var array = [_]usize{ 0, 42, 123, 34 };
+ \\var slice: []const usize = &array;
+ \\
+ \\pub fn main() void {
+ \\ assert(slice[0] == 0);
+ \\ assert(slice[1] == 42);
+ \\ assert(slice[2] == 123);
+ \\ assert(slice[3] == 34);
+ \\}
+ \\
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ , "");
+ }
}
}
@@ -2014,26 +2033,6 @@ fn addLinuxTestCases(ctx: *TestContext) !void {
\\}
, "");
}
-
- {
- // TODO fixing this will enable zig test on macOS
- var case = ctx.exe("access slice element by index - slice_elem_val", linux_x64);
- case.addCompareOutput(
- \\var array = [_]usize{ 0, 42, 123, 34 };
- \\var slice: []const usize = &array;
- \\
- \\pub fn main() void {
- \\ assert(slice[0] == 0);
- \\ assert(slice[1] == 42);
- \\ assert(slice[2] == 123);
- \\ assert(slice[3] == 34);
- \\}
- \\
- \\fn assert(ok: bool) void {
- \\ if (!ok) unreachable;
- \\}
- , "");
- }
}
fn addMacOsTestCases(ctx: *TestContext) !void {
diff --git a/test/standalone/install_raw_hex/build.zig b/test/standalone/install_raw_hex/build.zig
index 9a0cba7ae8..901cff11f0 100644
--- a/test/standalone/install_raw_hex/build.zig
+++ b/test/standalone/install_raw_hex/build.zig
@@ -1,9 +1,8 @@
-const Builder = @import("std").build.Builder;
const builtin = @import("builtin");
const std = @import("std");
const CheckFileStep = std.build.CheckFileStep;
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.build.Builder) void {
const target = .{
.cpu_arch = .thumb,
.cpu_model = .{ .explicit = &std.Target.arm.cpu.cortex_m4 },