aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/langref.html.in10
-rw-r--r--lib/std/c.zig5
-rw-r--r--lib/std/child_process.zig4
-rw-r--r--lib/std/coff.zig66
-rw-r--r--lib/std/fmt.zig2
-rw-r--r--lib/std/fmt/parse_float.zig5
-rw-r--r--lib/std/log.zig4
-rw-r--r--lib/std/meta.zig16
-rw-r--r--lib/std/net.zig3
-rw-r--r--lib/std/os.zig106
-rw-r--r--lib/std/os/bits/darwin.zig8
-rw-r--r--lib/std/os/bits/dragonfly.zig13
-rw-r--r--lib/std/os/bits/freebsd.zig8
-rw-r--r--lib/std/os/bits/linux.zig8
-rw-r--r--lib/std/os/bits/linux/x86_64.zig5
-rw-r--r--lib/std/os/linux.zig82
-rw-r--r--lib/std/os/test.zig36
-rw-r--r--lib/std/os/windows/kernel32.zig2
-rw-r--r--lib/std/process.zig8
-rw-r--r--lib/std/progress.zig6
-rw-r--r--lib/std/special/test_runner.zig2
-rw-r--r--lib/std/target.zig58
-rw-r--r--lib/std/zig/tokenizer.zig3
-rw-r--r--src-self-hosted/Module.zig217
-rw-r--r--src-self-hosted/astgen.zig100
-rw-r--r--src-self-hosted/codegen.zig341
-rw-r--r--src-self-hosted/codegen/c.zig2
-rw-r--r--src-self-hosted/link.zig22
-rw-r--r--src-self-hosted/link/Coff.zig792
-rw-r--r--src-self-hosted/link/Elf.zig16
-rw-r--r--src-self-hosted/link/MachO.zig661
-rw-r--r--src-self-hosted/link/msdos-stub.binbin0 -> 128 bytes
-rw-r--r--src-self-hosted/main.zig14
-rw-r--r--src-self-hosted/type.zig118
-rw-r--r--src-self-hosted/value.zig6
-rw-r--r--src-self-hosted/zir.zig24
-rw-r--r--src-self-hosted/zir_sema.zig24
-rw-r--r--src/ir.cpp4
-rw-r--r--test/compile_errors.zig2
39 files changed, 2335 insertions, 468 deletions
diff --git a/doc/langref.html.in b/doc/langref.html.in
index b01b543a40..10bc81e6df 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6889,7 +6889,7 @@ fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@@ -6899,7 +6899,7 @@ fn func(y: *i32) void {
This builtin function atomically modifies memory and then returns the previous value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>
@@ -6925,7 +6925,7 @@ fn func(y: *i32) void {
This builtin function atomically stores a value.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@@ -7208,7 +7208,7 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
more efficiently in machine instructions.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
@@ -7237,7 +7237,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
</p>
<p>
- {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
+ {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
diff --git a/lib/std/c.zig b/lib/std/c.zig
index b4e5fc7392..1b3f403ab5 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -330,3 +330,8 @@ pub const FILE = @Type(.Opaque);
pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void;
pub extern "c" fn dlclose(handle: *c_void) c_int;
pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void;
+
+pub extern "c" fn sync() void;
+pub extern "c" fn syncfs(fd: c_int) c_int;
+pub extern "c" fn fsync(fd: c_int) c_int;
+pub extern "c" fn fdatasync(fd: c_int) c_int;
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index b5ed2c72c6..ed6a3a739e 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -44,10 +44,10 @@ pub const ChildProcess = struct {
stderr_behavior: StdIo,
/// Set to change the user id when spawning the child process.
- uid: if (builtin.os.tag == .windows) void else ?u32,
+ uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t,
/// Set to change the group id when spawning the child process.
- gid: if (builtin.os.tag == .windows) void else ?u32,
+ gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t,
/// Set to change the current working directory when spawning the child process.
cwd: ?[]const u8,
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index cd567b3a6e..ea3a232187 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c;
const IMAGE_FILE_MACHINE_IA64 = 0x0200;
const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
+pub const MachineType = enum(u16) {
+ Unknown = 0x0,
+ /// Matsushita AM33
+ AM33 = 0x1d3,
+ /// x64
+ X64 = 0x8664,
+ /// ARM little endian
+ ARM = 0x1c0,
+ /// ARM64 little endian
+ ARM64 = 0xaa64,
+ /// ARM Thumb-2 little endian
+ ARMNT = 0x1c4,
+ /// EFI byte code
+ EBC = 0xebc,
+ /// Intel 386 or later processors and compatible processors
+ I386 = 0x14c,
+ /// Intel Itanium processor family
+ IA64 = 0x200,
+ /// Mitsubishi M32R little endian
+ M32R = 0x9041,
+ /// MIPS16
+ MIPS16 = 0x266,
+ /// MIPS with FPU
+ MIPSFPU = 0x366,
+ /// MIPS16 with FPU
+ MIPSFPU16 = 0x466,
+ /// Power PC little endian
+ POWERPC = 0x1f0,
+ /// Power PC with floating point support
+ POWERPCFP = 0x1f1,
+ /// MIPS little endian
+ R4000 = 0x166,
+ /// RISC-V 32-bit address space
+ RISCV32 = 0x5032,
+ /// RISC-V 64-bit address space
+ RISCV64 = 0x5064,
+ /// RISC-V 128-bit address space
+ RISCV128 = 0x5128,
+ /// Hitachi SH3
+ SH3 = 0x1a2,
+ /// Hitachi SH3 DSP
+ SH3DSP = 0x1a3,
+ /// Hitachi SH4
+ SH4 = 0x1a6,
+ /// Hitachi SH5
+ SH5 = 0x1a8,
+ /// Thumb
+ Thumb = 0x1c2,
+ /// MIPS little-endian WCE v2
+ WCEMIPSV2 = 0x169,
+};
+
// OptionalHeader.magic values
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
+// Image Characteristics
+pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
+pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
+pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
+pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
+pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
+
+// Section flags
+pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
+pub const IMAGE_SCN_MEM_READ = 0x40000000;
+pub const IMAGE_SCN_CNT_CODE = 0x20;
+pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
+pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
+
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 5b18c8731b..a652bd8c21 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -82,6 +82,8 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// This allows user types to be formatted in a logical manner instead of dumping all fields of the type.
///
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
+///
+/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`.
pub fn format(
writer: anytype,
comptime fmt: []const u8,
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index e4d3c10d92..de17c60db6 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -37,7 +37,9 @@
const std = @import("../std.zig");
const ascii = std.ascii;
-const max_digits = 25;
+// The mantissa field in FloatRepr is 64bit wide and holds only 19 digits
+// without overflowing
+const max_digits = 19;
const f64_plus_zero: u64 = 0x0000000000000000;
const f64_minus_zero: u64 = 0x8000000000000000;
@@ -409,6 +411,7 @@ test "fmt.parseFloat" {
expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon));
+ expect(approxEq(T, try parseFloat(T, "2.71828182845904523536"), @as(T, 2.718281828459045), epsilon));
}
}
}
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 50bdfdc068..7b677f698a 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -127,6 +127,10 @@ fn log(
if (@enumToInt(message_level) <= @enumToInt(level)) {
if (@hasDecl(root, "log")) {
root.log(message_level, scope, format, args);
+ } else if (std.Target.current.os.tag == .freestanding) {
+ // On freestanding one must provide a log function; we do not have
+ // any I/O configured.
+ return;
} else if (builtin.mode != .ReleaseSmall) {
const held = std.debug.getStderrMutex().acquire();
defer held.release();
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index aaa8e7ca78..73e0661498 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -705,34 +705,34 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
pub fn cast(comptime DestType: type, target: anytype) DestType {
const TargetType = @TypeOf(target);
switch (@typeInfo(DestType)) {
- .Pointer => {
+ .Pointer => |dest_ptr| {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
- return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
- return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target));
+ return @ptrCast(DestType, @alignCast(dest_ptr, target));
}
},
else => {},
}
},
- .Optional => |opt| {
- if (@typeInfo(opt.child) == .Pointer) {
+ .Optional => |dest_opt| {
+ if (@typeInfo(dest_opt.child) == .Pointer) {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
- .Pointer => |ptr| {
- return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ .Pointer => {
+ return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
},
.Optional => |target_opt| {
if (@typeInfo(target_opt.child) == .Pointer) {
- return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target));
+ return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
}
},
else => {},
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 5a1407c35f..45d8f07f04 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1641,6 +1641,9 @@ pub const StreamServer = struct {
/// by the socket buffer limits, not by the system memory.
SystemResources,
+ /// Socket is not listening for new connections.
+ SocketNotListening,
+
ProtocolFailure,
/// Firewall rules forbid connection.
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 81c059c220..181bf4930d 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -2512,13 +2512,14 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
}
}
-pub const SetIdError = error{
- ResourceLimitReached,
+pub const SetEidError = error{
InvalidUserId,
PermissionDenied,
-} || UnexpectedError;
+};
-pub fn setuid(uid: u32) SetIdError!void {
+pub const SetIdError = error{ResourceLimitReached} || SetEidError || UnexpectedError;
+
+pub fn setuid(uid: uid_t) SetIdError!void {
switch (errno(system.setuid(uid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2528,7 +2529,16 @@ pub fn setuid(uid: u32) SetIdError!void {
}
}
-pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
+pub fn seteuid(uid: uid_t) SetEidError!void {
+ switch (errno(system.seteuid(uid))) {
+ 0 => return,
+ EINVAL => return error.InvalidUserId,
+ EPERM => return error.PermissionDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void {
switch (errno(system.setreuid(ruid, euid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2538,7 +2548,7 @@ pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
}
}
-pub fn setgid(gid: u32) SetIdError!void {
+pub fn setgid(gid: gid_t) SetIdError!void {
switch (errno(system.setgid(gid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2548,7 +2558,16 @@ pub fn setgid(gid: u32) SetIdError!void {
}
}
-pub fn setregid(rgid: u32, egid: u32) SetIdError!void {
+pub fn setegid(uid: uid_t) SetEidError!void {
+ switch (errno(system.setegid(uid))) {
+ 0 => return,
+ EINVAL => return error.InvalidUserId,
+ EPERM => return error.PermissionDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void {
switch (errno(system.setregid(rgid, egid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2802,6 +2821,9 @@ pub const AcceptError = error{
/// by the socket buffer limits, not by the system memory.
SystemResources,
+ /// Socket is not listening for new connections.
+ SocketNotListening,
+
ProtocolFailure,
/// Firewall rules forbid connection.
@@ -2870,7 +2892,7 @@ pub fn accept(
EBADF => unreachable, // always a race condition
ECONNABORTED => return error.ConnectionAborted,
EFAULT => unreachable,
- EINVAL => unreachable,
+ EINVAL => return error.SocketNotListening,
ENOTSOCK => unreachable,
EMFILE => return error.ProcessFdQuotaExceeded,
ENFILE => return error.SystemFdQuotaExceeded,
@@ -5328,3 +5350,71 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
else => |err| return std.os.unexpectedErrno(err),
}
}
+
+pub const SyncError = error{
+ InputOutput,
+ NoSpaceLeft,
+ DiskQuota,
+ AccessDenied,
+} || UnexpectedError;
+
+/// Write all pending file contents and metadata modifications to all filesystems.
+pub fn sync() void {
+ system.sync();
+}
+
+/// Write all pending file contents and metadata modifications to the filesystem which contains the specified file.
+pub fn syncfs(fd: fd_t) SyncError!void {
+ const rc = system.syncfs(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
+
+/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
+pub fn fsync(fd: fd_t) SyncError!void {
+ if (std.Target.current.os.tag == .windows) {
+ if (windows.kernel32.FlushFileBuffers(fd) != 0)
+ return;
+ switch (windows.kernel32.GetLastError()) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => unreachable,
+ .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
+ .UNEXP_NET_ERR => return error.InputOutput,
+ else => return error.InputOutput,
+ }
+ }
+ const rc = system.fsync(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
+
+/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
+pub fn fdatasync(fd: fd_t) SyncError!void {
+ if (std.Target.current.os.tag == .windows) {
+ return fsync(fd) catch |err| switch (err) {
+ SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
+ else => return err,
+ };
+ }
+ const rc = system.fdatasync(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
diff --git a/lib/std/os/bits/darwin.zig b/lib/std/os/bits/darwin.zig
index 375127f278..ce73d2a6dc 100644
--- a/lib/std/os/bits/darwin.zig
+++ b/lib/std/os/bits/darwin.zig
@@ -7,9 +7,13 @@ const std = @import("../../std.zig");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
+// See: https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/sys/_types.h.auto.html
+// TODO: audit mode_t/pid_t, should likely be u16/i32
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const in_port_t = u16;
pub const sa_family_t = u8;
@@ -79,8 +83,8 @@ pub const Stat = extern struct {
mode: u16,
nlink: u16,
ino: ino_t,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
rdev: i32,
atimesec: isize,
atimensec: isize,
diff --git a/lib/std/os/bits/dragonfly.zig b/lib/std/os/bits/dragonfly.zig
index 8b6d6be212..1412aa5c41 100644
--- a/lib/std/os/bits/dragonfly.zig
+++ b/lib/std/os/bits/dragonfly.zig
@@ -9,10 +9,17 @@ const maxInt = std.math.maxInt;
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
+
+// See:
+// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h
+// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h
+// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;
@@ -151,8 +158,8 @@ pub const Stat = extern struct {
dev: c_uint,
mode: c_ushort,
padding1: u16,
- uid: c_uint,
- gid: c_uint,
+ uid: uid_t,
+ gid: gid_t,
rdev: c_uint,
atim: timespec,
mtim: timespec,
@@ -511,7 +518,7 @@ pub const siginfo_t = extern struct {
si_errno: c_int,
si_code: c_int,
si_pid: c_int,
- si_uid: c_uint,
+ si_uid: uid_t,
si_status: c_int,
si_addr: ?*c_void,
si_value: union_sigval,
diff --git a/lib/std/os/bits/freebsd.zig b/lib/std/os/bits/freebsd.zig
index 22edf4b9d1..32936f7515 100644
--- a/lib/std/os/bits/freebsd.zig
+++ b/lib/std/os/bits/freebsd.zig
@@ -6,8 +6,12 @@
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
+// See https://svnweb.freebsd.org/base/head/sys/sys/_types.h?view=co
+// TODO: audit pid_t/mode_t. They should likely be i32 and u16, respectively
pub const fd_t = c_int;
pub const pid_t = c_int;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const mode_t = c_uint;
pub const socklen_t = u32;
@@ -128,8 +132,8 @@ pub const Stat = extern struct {
mode: u16,
__pad0: u16,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
__pad1: u32,
rdev: u64,
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index eff1cc7c02..6d85d06236 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -29,7 +29,7 @@ const is_mips = builtin.arch.isMIPS();
pub const pid_t = i32;
pub const fd_t = i32;
-pub const uid_t = i32;
+pub const uid_t = u32;
pub const gid_t = u32;
pub const clock_t = isize;
@@ -853,7 +853,7 @@ pub const signalfd_siginfo = extern struct {
errno: i32,
code: i32,
pid: u32,
- uid: u32,
+ uid: uid_t,
fd: i32,
tid: u32,
band: u32,
@@ -1491,10 +1491,10 @@ pub const Statx = extern struct {
nlink: u32,
/// User ID of owner
- uid: u32,
+ uid: uid_t,
/// Group ID of owner
- gid: u32,
+ gid: gid_t,
/// File type and mode
mode: u16,
diff --git a/lib/std/os/bits/linux/x86_64.zig b/lib/std/os/bits/linux/x86_64.zig
index 0800feeddf..0f01c40813 100644
--- a/lib/std/os/bits/linux/x86_64.zig
+++ b/lib/std/os/bits/linux/x86_64.zig
@@ -7,6 +7,7 @@
const std = @import("../../../std.zig");
const pid_t = linux.pid_t;
const uid_t = linux.uid_t;
+const gid_t = linux.gid_t;
const clock_t = linux.clock_t;
const stack_t = linux.stack_t;
const sigset_t = linux.sigset_t;
@@ -523,8 +524,8 @@ pub const Stat = extern struct {
nlink: usize,
mode: u32,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
__pad0: u32,
rdev: u64,
size: off_t,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 134b117e85..8f697fb967 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -655,7 +655,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
-pub fn setuid(uid: u32) usize {
+pub fn setuid(uid: uid_t) usize {
if (@hasField(SYS, "setuid32")) {
return syscall1(.setuid32, uid);
} else {
@@ -663,7 +663,7 @@ pub fn setuid(uid: u32) usize {
}
}
-pub fn setgid(gid: u32) usize {
+pub fn setgid(gid: gid_t) usize {
if (@hasField(SYS, "setgid32")) {
return syscall1(.setgid32, gid);
} else {
@@ -671,7 +671,7 @@ pub fn setgid(gid: u32) usize {
}
}
-pub fn setreuid(ruid: u32, euid: u32) usize {
+pub fn setreuid(ruid: uid_t, euid: uid_t) usize {
if (@hasField(SYS, "setreuid32")) {
return syscall2(.setreuid32, ruid, euid);
} else {
@@ -679,7 +679,7 @@ pub fn setreuid(ruid: u32, euid: u32) usize {
}
}
-pub fn setregid(rgid: u32, egid: u32) usize {
+pub fn setregid(rgid: gid_t, egid: gid_t) usize {
if (@hasField(SYS, "setregid32")) {
return syscall2(.setregid32, rgid, egid);
} else {
@@ -687,47 +687,61 @@ pub fn setregid(rgid: u32, egid: u32) usize {
}
}
-pub fn getuid() u32 {
+pub fn getuid() uid_t {
if (@hasField(SYS, "getuid32")) {
- return @as(u32, syscall0(.getuid32));
+ return @as(uid_t, syscall0(.getuid32));
} else {
- return @as(u32, syscall0(.getuid));
+ return @as(uid_t, syscall0(.getuid));
}
}
-pub fn getgid() u32 {
+pub fn getgid() gid_t {
if (@hasField(SYS, "getgid32")) {
- return @as(u32, syscall0(.getgid32));
+ return @as(gid_t, syscall0(.getgid32));
} else {
- return @as(u32, syscall0(.getgid));
+ return @as(gid_t, syscall0(.getgid));
}
}
-pub fn geteuid() u32 {
+pub fn geteuid() uid_t {
if (@hasField(SYS, "geteuid32")) {
- return @as(u32, syscall0(.geteuid32));
+ return @as(uid_t, syscall0(.geteuid32));
} else {
- return @as(u32, syscall0(.geteuid));
+ return @as(uid_t, syscall0(.geteuid));
}
}
-pub fn getegid() u32 {
+pub fn getegid() gid_t {
if (@hasField(SYS, "getegid32")) {
- return @as(u32, syscall0(.getegid32));
+ return @as(gid_t, syscall0(.getegid32));
} else {
- return @as(u32, syscall0(.getegid));
+ return @as(gid_t, syscall0(.getegid));
}
}
-pub fn seteuid(euid: u32) usize {
- return setreuid(std.math.maxInt(u32), euid);
+pub fn seteuid(euid: uid_t) usize {
+ // We use setresuid here instead of setreuid to ensure that the saved uid
+ // is not changed. This is what musl and recent glibc versions do as well.
+ //
+ // The setresuid(2) man page says that if -1 is passed the corresponding
+ // id will not be changed. Since uid_t is unsigned, this wraps around to the
+ // max value in C.
+ comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
+ return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t));
}
-pub fn setegid(egid: u32) usize {
- return setregid(std.math.maxInt(u32), egid);
+pub fn setegid(egid: gid_t) usize {
+ // We use setresgid here instead of setregid to ensure that the saved uid
+ // is not changed. This is what musl and recent glibc versions do as well.
+ //
+ // The setresgid(2) man page says that if -1 is passed the corresponding
+ // id will not be changed. Since gid_t is unsigned, this wraps around to the
+ // max value in C.
+ comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
+ return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t));
}
-pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
+pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize {
if (@hasField(SYS, "getresuid32")) {
return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
} else {
@@ -735,7 +749,7 @@ pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
}
}
-pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
+pub fn getresgid(rgid: *gid_t, egid: *gid_t, sgid: *gid_t) usize {
if (@hasField(SYS, "getresgid32")) {
return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
} else {
@@ -743,7 +757,7 @@ pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
}
}
-pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
+pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) usize {
if (@hasField(SYS, "setresuid32")) {
return syscall3(.setresuid32, ruid, euid, suid);
} else {
@@ -751,7 +765,7 @@ pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
}
}
-pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
+pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) usize {
if (@hasField(SYS, "setresgid32")) {
return syscall3(.setresgid32, rgid, egid, sgid);
} else {
@@ -759,7 +773,7 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
}
}
-pub fn getgroups(size: usize, list: *u32) usize {
+pub fn getgroups(size: usize, list: *gid_t) usize {
if (@hasField(SYS, "getgroups32")) {
return syscall2(.getgroups32, size, @ptrToInt(list));
} else {
@@ -767,7 +781,7 @@ pub fn getgroups(size: usize, list: *u32) usize {
}
}
-pub fn setgroups(size: usize, list: *const u32) usize {
+pub fn setgroups(size: usize, list: *const gid_t) usize {
if (@hasField(SYS, "setgroups32")) {
return syscall2(.setgroups32, size, @ptrToInt(list));
} else {
@@ -1228,6 +1242,22 @@ pub fn bpf(cmd: BPF.Cmd, attr: *BPF.Attr, size: u32) usize {
return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size);
}
+pub fn sync() void {
+ _ = syscall0(.sync);
+}
+
+pub fn syncfs(fd: fd_t) usize {
+ return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
+}
+
+pub fn fsync(fd: fd_t) usize {
+ return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
+}
+
+pub fn fdatasync(fd: fd_t) usize {
+ return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
+}
+
test "" {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 576125e2a3..0a453d8b2e 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -555,3 +555,39 @@ test "signalfd" {
return error.SkipZigTest;
_ = std.os.signalfd;
}
+
+test "sync" {
+ if (builtin.os.tag != .linux)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const test_out_file = "os_tmp_test";
+ const file = try tmp.dir.createFile(test_out_file, .{});
+ defer {
+ file.close();
+ tmp.dir.deleteFile(test_out_file) catch {};
+ }
+
+ os.sync();
+ try os.syncfs(file.handle);
+}
+
+test "fsync" {
+ if (builtin.os.tag != .linux and builtin.os.tag != .windows)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const test_out_file = "os_tmp_test";
+ const file = try tmp.dir.createFile(test_out_file, .{});
+ defer {
+ file.close();
+ tmp.dir.deleteFile(test_out_file) catch {};
+ }
+
+ try os.fsync(file.handle);
+ try os.fdatasync(file.handle);
+}
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index fce9eea908..05d160485d 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -287,3 +287,5 @@ pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSA
pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
+
+pub extern "kernel32" fn FlushFileBuffers(hFile: HANDLE) callconv(.Stdcall) BOOL;
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 69befa2fc8..9cb571714c 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -578,8 +578,8 @@ fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []cons
}
pub const UserInfo = struct {
- uid: u32,
- gid: u32,
+ uid: os.uid_t,
+ gid: os.gid_t,
};
/// POSIX function which gets a uid from username.
@@ -607,8 +607,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
var buf: [std.mem.page_size]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
- var uid: u32 = 0;
- var gid: u32 = 0;
+ var uid: os.uid_t = 0;
+ var gid: os.gid_t = 0;
while (true) {
const amt_read = try reader.read(buf[0..]);
diff --git a/lib/std/progress.zig b/lib/std/progress.zig
index 654d8cc228..82f2801fa1 100644
--- a/lib/std/progress.zig
+++ b/lib/std/progress.zig
@@ -197,7 +197,7 @@ pub const Progress = struct {
var maybe_node: ?*Node = &self.root;
while (maybe_node) |node| {
if (need_ellipse) {
- self.bufWrite(&end, "...", .{});
+ self.bufWrite(&end, "... ", .{});
}
need_ellipse = false;
if (node.name.len != 0 or node.estimated_total_items != null) {
@@ -218,7 +218,7 @@ pub const Progress = struct {
maybe_node = node.recently_updated_child;
}
if (need_ellipse) {
- self.bufWrite(&end, "...", .{});
+ self.bufWrite(&end, "... ", .{});
}
}
@@ -253,7 +253,7 @@ pub const Progress = struct {
const bytes_needed_for_esc_codes_at_end = if (std.builtin.os.tag == .windows) 0 else 11;
const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end;
if (end.* > max_end) {
- const suffix = "...";
+ const suffix = "... ";
self.columns_written = self.columns_written - (end.* - max_end) + suffix.len;
std.mem.copy(u8, self.output_buffer[max_end..], suffix);
end.* = max_end + suffix.len;
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 87b011ede8..b9452b79cc 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -40,7 +40,7 @@ pub fn main() anyerror!void {
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
- std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
+ std.debug.print("{}/{} {}... ", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 034ab780d0..37425a9a29 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -468,6 +468,7 @@ pub const Target = struct {
/// TODO Get rid of this one.
unknown,
coff,
+ pe,
elf,
macho,
wasm,
@@ -771,6 +772,63 @@ pub const Target = struct {
};
}
+ pub fn toCoffMachine(arch: Arch) std.coff.MachineType {
+ return switch (arch) {
+ .avr => .Unknown,
+ .msp430 => .Unknown,
+ .arc => .Unknown,
+ .arm => .ARM,
+ .armeb => .Unknown,
+ .hexagon => .Unknown,
+ .le32 => .Unknown,
+ .mips => .Unknown,
+ .mipsel => .Unknown,
+ .powerpc => .POWERPC,
+ .r600 => .Unknown,
+ .riscv32 => .RISCV32,
+ .sparc => .Unknown,
+ .sparcel => .Unknown,
+ .tce => .Unknown,
+ .tcele => .Unknown,
+ .thumb => .Thumb,
+ .thumbeb => .Thumb,
+ .i386 => .I386,
+ .xcore => .Unknown,
+ .nvptx => .Unknown,
+ .amdil => .Unknown,
+ .hsail => .Unknown,
+ .spir => .Unknown,
+ .kalimba => .Unknown,
+ .shave => .Unknown,
+ .lanai => .Unknown,
+ .wasm32 => .Unknown,
+ .renderscript32 => .Unknown,
+ .aarch64_32 => .ARM64,
+ .aarch64 => .ARM64,
+ .aarch64_be => .Unknown,
+ .mips64 => .Unknown,
+ .mips64el => .Unknown,
+ .powerpc64 => .Unknown,
+ .powerpc64le => .Unknown,
+ .riscv64 => .RISCV64,
+ .x86_64 => .X64,
+ .nvptx64 => .Unknown,
+ .le64 => .Unknown,
+ .amdil64 => .Unknown,
+ .hsail64 => .Unknown,
+ .spir64 => .Unknown,
+ .wasm64 => .Unknown,
+ .renderscript64 => .Unknown,
+ .amdgcn => .Unknown,
+ .bpfel => .Unknown,
+ .bpfeb => .Unknown,
+ .sparcv9 => .Unknown,
+ .s390x => .Unknown,
+ .ve => .Unknown,
+ .spu_2 => .Unknown,
+ };
+ }
+
pub fn endian(arch: Arch) builtin.Endian {
return switch (arch) {
.avr,
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 47c7d23b35..86968c73b2 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -1175,6 +1175,7 @@ pub const Tokenizer = struct {
},
.num_dot_dec => switch (c) {
'.' => {
+ result.id = .IntegerLiteral;
self.index -= 1;
state = .start;
break;
@@ -1183,7 +1184,6 @@ pub const Tokenizer = struct {
state = .float_exponent_unsigned;
},
'0'...'9' => {
- result.id = .FloatLiteral;
state = .float_fraction_dec;
},
else => {
@@ -1769,6 +1769,7 @@ test "tokenizer - number literals decimal" {
testTokenize("7", &[_]Token.Id{.IntegerLiteral});
testTokenize("8", &[_]Token.Id{.IntegerLiteral});
testTokenize("9", &[_]Token.Id{.IntegerLiteral});
+ testTokenize("1..", &[_]Token.Id{ .IntegerLiteral, .Ellipsis2 });
testTokenize("0a", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("9b", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("1z", &[_]Token.Id{ .Invalid, .Identifier });
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 24dcb541b4..16f465c9d0 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -125,7 +125,7 @@ pub const Decl = struct {
/// mapping them to an address in the output file.
/// Memory owned by this decl, using Module's allocator.
name: [*:0]const u8,
- /// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`.
+ /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`.
/// Reference to externally owned memory.
scope: *Scope,
/// The AST Node decl index or ZIR Inst index that contains this declaration.
@@ -217,9 +217,10 @@ pub const Decl = struct {
pub fn src(self: Decl) usize {
switch (self.scope.tag) {
- .file => {
- const file = @fieldParentPtr(Scope.File, "base", self.scope);
- const tree = file.contents.tree;
+ .container => {
+ const container = @fieldParentPtr(Scope.Container, "base", self.scope);
+ const tree = container.file_scope.contents.tree;
+ // TODO Container should have it's own decls()
const decl_node = tree.root_node.decls()[self.src_index];
return tree.token_locs[decl_node.firstToken()].start;
},
@@ -229,7 +230,7 @@ pub const Decl = struct {
const src_decl = module.decls[self.src_index];
return src_decl.inst.src;
},
- .block => unreachable,
+ .file, .block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
.local_ptr => unreachable,
@@ -359,6 +360,7 @@ pub const Scope = struct {
.local_ptr => return self.cast(LocalPtr).?.gen_zir.arena,
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
.file => unreachable,
+ .container => unreachable,
}
}
@@ -368,15 +370,16 @@ pub const Scope = struct {
return switch (self.tag) {
.block => self.cast(Block).?.decl,
.gen_zir => self.cast(GenZIR).?.decl,
- .local_val => return self.cast(LocalVal).?.gen_zir.decl,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl,
+ .local_val => self.cast(LocalVal).?.gen_zir.decl,
+ .local_ptr => self.cast(LocalPtr).?.gen_zir.decl,
.decl => self.cast(DeclAnalysis).?.decl,
.zir_module => null,
.file => null,
+ .container => null,
};
}
- /// Asserts the scope has a parent which is a ZIRModule or File and
+ /// Asserts the scope has a parent which is a ZIRModule or Container and
/// returns it.
pub fn namespace(self: *Scope) *Scope {
switch (self.tag) {
@@ -385,7 +388,8 @@ pub const Scope = struct {
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope,
.decl => return self.cast(DeclAnalysis).?.decl.scope,
- .zir_module, .file => return self,
+ .file => return &self.cast(File).?.root_container.base,
+ .zir_module, .container => return self,
}
}
@@ -399,8 +403,9 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
+ .file => unreachable,
.zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
- .file => return self.cast(File).?.fullyQualifiedNameHash(name),
+ .container => return self.cast(Container).?.fullyQualifiedNameHash(name),
}
}
@@ -409,11 +414,12 @@ pub const Scope = struct {
switch (self.tag) {
.file => return self.cast(File).?.contents.tree,
.zir_module => unreachable,
- .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree,
- .block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree,
- .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree,
- .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(File).?.contents.tree,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(File).?.contents.tree,
+ .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .container => return self.cast(Container).?.file_scope.contents.tree,
}
}
@@ -427,13 +433,15 @@ pub const Scope = struct {
.decl => unreachable,
.zir_module => unreachable,
.file => unreachable,
+ .container => unreachable,
};
}
- /// Asserts the scope has a parent which is a ZIRModule or File and
+ /// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and
/// returns the sub_file_path field.
pub fn subFilePath(base: *Scope) []const u8 {
switch (base.tag) {
+ .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
.file => return @fieldParentPtr(File, "base", base).sub_file_path,
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
.block => unreachable,
@@ -453,11 +461,13 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
+ .container => unreachable,
}
}
pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
switch (base.tag) {
+ .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
.file => return @fieldParentPtr(File, "base", base).getSource(module),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
.gen_zir => unreachable,
@@ -471,8 +481,9 @@ pub const Scope = struct {
/// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
pub fn removeDecl(base: *Scope, child: *Decl) void {
switch (base.tag) {
- .file => return @fieldParentPtr(File, "base", base).removeDecl(child),
+ .container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
+ .file => unreachable,
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@@ -499,6 +510,7 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
+ .container => unreachable,
}
}
@@ -515,6 +527,8 @@ pub const Scope = struct {
zir_module,
/// .zig source code.
file,
+ /// struct, enum or union, every .file contains one of these.
+ container,
block,
decl,
gen_zir,
@@ -522,6 +536,33 @@ pub const Scope = struct {
local_ptr,
};
+ pub const Container = struct {
+ pub const base_tag: Tag = .container;
+ base: Scope = Scope{ .tag = base_tag },
+
+ file_scope: *Scope.File,
+
+ /// Direct children of the file.
+ decls: std.AutoArrayHashMapUnmanaged(*Decl, void),
+
+ // TODO implement container types and put this in a status union
+ // ty: Type
+
+ pub fn deinit(self: *Container, gpa: *Allocator) void {
+ self.decls.deinit(gpa);
+ self.* = undefined;
+ }
+
+ pub fn removeDecl(self: *Container, child: *Decl) void {
+ _ = self.decls.remove(child);
+ }
+
+ pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash {
+ // TODO container scope qualified names.
+ return std.zig.hashSrc(name);
+ }
+ };
+
pub const File = struct {
pub const base_tag: Tag = .file;
base: Scope = Scope{ .tag = base_tag },
@@ -544,8 +585,7 @@ pub const Scope = struct {
loaded_success,
},
- /// Direct children of the file.
- decls: ArrayListUnmanaged(*Decl),
+ root_container: Container,
pub fn unload(self: *File, gpa: *Allocator) void {
switch (self.status) {
@@ -569,20 +609,11 @@ pub const Scope = struct {
}
pub fn deinit(self: *File, gpa: *Allocator) void {
- self.decls.deinit(gpa);
+ self.root_container.deinit(gpa);
self.unload(gpa);
self.* = undefined;
}
- pub fn removeDecl(self: *File, child: *Decl) void {
- for (self.decls.items) |item, i| {
- if (item == child) {
- _ = self.decls.swapRemove(i);
- return;
- }
- }
- }
-
pub fn dumpSrc(self: *File, src: usize) void {
const loc = std.zig.findLineColumn(self.source.bytes, src);
std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
@@ -604,11 +635,6 @@ pub const Scope = struct {
.bytes => |bytes| return bytes,
}
}
-
- pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash {
- // We don't have struct scopes yet so this is currently just a simple name hash.
- return std.zig.hashSrc(name);
- }
};
pub const ZIRModule = struct {
@@ -861,7 +887,10 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.status = .never_loaded,
- .decls = .{},
+ .root_container = .{
+ .file_scope = root_scope,
+ .decls = .{},
+ },
};
break :blk &root_scope.base;
} else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
@@ -969,7 +998,7 @@ pub fn update(self: *Module) !void {
// to force a refresh we unload now.
if (self.root_scope.cast(Scope.File)) |zig_file| {
zig_file.unload(self.gpa);
- self.analyzeRootSrcFile(zig_file) catch |err| switch (err) {
+ self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
@@ -1237,8 +1266,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const tracy = trace(@src());
defer tracy.end();
- const file_scope = decl.scope.cast(Scope.File).?;
- const tree = try self.getAstTree(file_scope);
+ const container_scope = decl.scope.cast(Scope.Container).?;
+ const tree = try self.getAstTree(container_scope);
const ast_node = tree.root_node.decls()[decl.src_index];
switch (ast_node.tag) {
.FnProto => {
@@ -1698,10 +1727,12 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
}
}
-fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
+fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
const tracy = trace(@src());
defer tracy.end();
+ const root_scope = container_scope.file_scope;
+
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
@@ -1743,25 +1774,25 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
}
}
-fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
+fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const tracy = trace(@src());
defer tracy.end();
// We may be analyzing it for the first time, or this may be
// an incremental update. This code handles both cases.
- const tree = try self.getAstTree(root_scope);
+ const tree = try self.getAstTree(container_scope);
const decls = tree.root_node.decls();
try self.work_queue.ensureUnusedCapacity(decls.len);
- try root_scope.decls.ensureCapacity(self.gpa, decls.len);
+ try container_scope.decls.ensureCapacity(self.gpa, decls.len);
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
- try deleted_decls.ensureCapacity(root_scope.decls.items.len);
- for (root_scope.decls.items) |file_decl| {
- deleted_decls.putAssumeCapacityNoClobber(file_decl, {});
+ try deleted_decls.ensureCapacity(container_scope.decls.items().len);
+ for (container_scope.decls.items()) |entry| {
+ deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
}
for (decls) |src_decl, decl_i| {
@@ -1773,7 +1804,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name_loc = tree.token_locs[name_tok];
const name = tree.tokenSliceLoc(name_loc);
- const name_hash = root_scope.fullyQualifiedNameHash(name);
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@@ -1789,6 +1820,9 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash;
} else switch (self.bin_file.tag) {
+ .coff => {
+ // TODO Implement for COFF
+ },
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
@@ -1801,8 +1835,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
}
}
} else {
- const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
- root_scope.decls.appendAssumeCapacity(new_decl);
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@@ -1812,7 +1846,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
} else if (src_decl.castTag(.VarDecl)) |var_decl| {
const name_loc = tree.token_locs[var_decl.name_token];
const name = tree.tokenSliceLoc(name_loc);
- const name_hash = root_scope.fullyQualifiedNameHash(name);
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@@ -1828,8 +1862,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
decl.contents_hash = contents_hash;
}
} else {
- const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
- root_scope.decls.appendAssumeCapacity(new_decl);
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
if (var_decl.getExternExportToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@@ -1841,11 +1875,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index});
defer self.gpa.free(name);
- const name_hash = root_scope.fullyQualifiedNameHash(name);
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
- const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
- root_scope.decls.appendAssumeCapacity(new_decl);
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} else if (src_decl.castTag(.ContainerField)) |container_field| {
log.err("TODO: analyze container field", .{});
@@ -2047,12 +2081,14 @@ fn allocateNewDecl(
.deletion_flag = false,
.contents_hash = contents_hash,
.link = switch (self.bin_file.tag) {
+ .coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
+ .coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
@@ -2591,6 +2627,72 @@ pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) In
return self.fail(scope, src, "TODO implement analysis of iserr", .{});
}
+pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst {
+ const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
+ .Pointer => array_ptr.ty.elemType(),
+ else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}),
+ };
+
+ var array_type = ptr_child;
+ const elem_type = switch (ptr_child.zigTypeTag()) {
+ .Array => ptr_child.elemType(),
+ .Pointer => blk: {
+ if (ptr_child.isSinglePointer()) {
+ if (ptr_child.elemType().zigTypeTag() == .Array) {
+ array_type = ptr_child.elemType();
+ break :blk ptr_child.elemType().elemType();
+ }
+
+ return self.fail(scope, src, "slice of single-item pointer", .{});
+ }
+ break :blk ptr_child.elemType();
+ },
+ else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}),
+ };
+
+ const slice_sentinel = if (sentinel_opt) |sentinel| blk: {
+ const casted = try self.coerce(scope, elem_type, sentinel);
+ break :blk try self.resolveConstValue(scope, casted);
+ } else null;
+
+ var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
+ var return_elem_type = elem_type;
+ if (end_opt) |end| {
+ if (end.value()) |end_val| {
+ if (start.value()) |start_val| {
+ const start_u64 = start_val.toUnsignedInt();
+ const end_u64 = end_val.toUnsignedInt();
+ if (start_u64 > end_u64) {
+ return self.fail(scope, src, "out of bounds slice", .{});
+ }
+
+ const len = end_u64 - start_u64;
+ const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
+ array_type.sentinel()
+ else
+ slice_sentinel;
+ return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type);
+ return_ptr_size = .One;
+ }
+ }
+ }
+ const return_type = try self.ptrType(
+ scope,
+ src,
+ return_elem_type,
+ if (end_opt == null) slice_sentinel else null,
+ 0, // TODO alignment
+ 0,
+ 0,
+ !ptr_child.isConstPtr(),
+ ptr_child.isAllowzeroPtr(),
+ ptr_child.isVolatilePtr(),
+ return_ptr_size,
+ );
+
+ return self.fail(scope, src, "TODO implement analysis of slice", .{});
+}
+
/// Asserts that lhs and rhs types are both numeric.
pub fn cmpNumeric(
self: *Module,
@@ -2801,6 +2903,12 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty
prev_inst = next_inst;
continue;
}
+ if (next_inst.ty.zigTypeTag() == .Undefined)
+ continue;
+ if (prev_inst.ty.zigTypeTag() == .Undefined) {
+ prev_inst = next_inst;
+ continue;
+ }
if (prev_inst.ty.isInt() and
next_inst.ty.isInt() and
prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
@@ -3052,6 +3160,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
},
.file => unreachable,
+ .container => unreachable,
}
return error.AnalysisFail;
}
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 17db584e56..2c091a86ec 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -275,16 +275,16 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.ErrorType => return rlWrap(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)),
.For => return forExpr(mod, scope, rl, node.castTag(.For).?),
.ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?),
+ .Slice => return rlWrap(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)),
.Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?),
.Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?),
+ .OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?),
.Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
.Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}),
- .OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}),
.Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}),
.Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}),
.Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
- .Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}),
.ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}),
.ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}),
.StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}),
@@ -790,13 +790,31 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*
}
fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst {
+ return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .iserr, .unwrap_err_unsafe, node.rhs, node.payload);
+}
+
+fn orelseExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfixOp) InnerError!*zir.Inst {
+ return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .isnull, .unwrap_optional_unsafe, node.rhs, null);
+}
+
+fn orelseCatchExpr(
+ mod: *Module,
+ scope: *Scope,
+ rl: ResultLoc,
+ lhs: *ast.Node,
+ op_token: ast.TokenIndex,
+ cond_op: zir.Inst.Tag,
+ unwrap_op: zir.Inst.Tag,
+ rhs: *ast.Node,
+ payload_node: ?*ast.Node,
+) InnerError!*zir.Inst {
const tree = scope.tree();
- const src = tree.token_locs[node.op_token].start;
+ const src = tree.token_locs[op_token].start;
- const err_union_ptr = try expr(mod, scope, .ref, node.lhs);
- // TODO we could avoid an unnecessary copy if .iserr took a pointer
- const err_union = try addZIRUnOp(mod, scope, src, .deref, err_union_ptr);
- const cond = try addZIRUnOp(mod, scope, src, .iserr, err_union);
+ const operand_ptr = try expr(mod, scope, .ref, lhs);
+ // TODO we could avoid an unnecessary copy if .iserr, .isnull took a pointer
+ const err_union = try addZIRUnOp(mod, scope, src, .deref, operand_ptr);
+ const cond = try addZIRUnOp(mod, scope, src, cond_op, err_union);
var block_scope: Scope.GenZIR = .{
.parent = scope,
@@ -825,55 +843,55 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch)
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block },
};
- var err_scope: Scope.GenZIR = .{
+ var then_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
- defer err_scope.instructions.deinit(mod.gpa);
+ defer then_scope.instructions.deinit(mod.gpa);
var err_val_scope: Scope.LocalVal = undefined;
- const err_sub_scope = blk: {
- const payload = node.payload orelse
- break :blk &err_scope.base;
+ const then_sub_scope = blk: {
+ const payload = payload_node orelse
+ break :blk &then_scope.base;
const err_name = tree.tokenSlice(payload.castTag(.Payload).?.error_symbol.firstToken());
if (mem.eql(u8, err_name, "_"))
- break :blk &err_scope.base;
+ break :blk &then_scope.base;
- const unwrapped_err_ptr = try addZIRUnOp(mod, &err_scope.base, src, .unwrap_err_code, err_union_ptr);
+ const unwrapped_err_ptr = try addZIRUnOp(mod, &then_scope.base, src, .unwrap_err_code, operand_ptr);
err_val_scope = .{
- .parent = &err_scope.base,
- .gen_zir = &err_scope,
+ .parent = &then_scope.base,
+ .gen_zir = &then_scope,
.name = err_name,
- .inst = try addZIRUnOp(mod, &err_scope.base, src, .deref, unwrapped_err_ptr),
+ .inst = try addZIRUnOp(mod, &then_scope.base, src, .deref, unwrapped_err_ptr),
};
break :blk &err_val_scope.base;
};
- _ = try addZIRInst(mod, &err_scope.base, src, zir.Inst.Break, .{
+ _ = try addZIRInst(mod, &then_scope.base, src, zir.Inst.Break, .{
.block = block,
- .operand = try expr(mod, err_sub_scope, branch_rl, node.rhs),
+ .operand = try expr(mod, then_sub_scope, branch_rl, rhs),
}, .{});
- var not_err_scope: Scope.GenZIR = .{
+ var else_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
- defer not_err_scope.instructions.deinit(mod.gpa);
+ defer else_scope.instructions.deinit(mod.gpa);
- const unwrapped_payload = try addZIRUnOp(mod, &not_err_scope.base, src, .unwrap_err_unsafe, err_union_ptr);
- _ = try addZIRInst(mod, &not_err_scope.base, src, zir.Inst.Break, .{
+ const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand_ptr);
+ _ = try addZIRInst(mod, &else_scope.base, src, zir.Inst.Break, .{
.block = block,
.operand = unwrapped_payload,
}, .{});
- condbr.positionals.then_body = .{ .instructions = try err_scope.arena.dupe(*zir.Inst, err_scope.instructions.items) };
- condbr.positionals.else_body = .{ .instructions = try not_err_scope.arena.dupe(*zir.Inst, not_err_scope.instructions.items) };
- return rlWrap(mod, scope, rl, &block.base);
+ condbr.positionals.then_body = .{ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items) };
+ condbr.positionals.else_body = .{ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items) };
+ return rlWrapPtr(mod, scope, rl, &block.base);
}
/// Return whether the identifier names of two tokens are equal. Resolves @"" tokens without allocating.
@@ -933,6 +951,36 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array
return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ElemPtr, .{ .array_ptr = array_ptr, .index = index }, .{}));
}
+fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.Slice) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[node.rtoken].start;
+
+ const usize_type = try addZIRInstConst(mod, scope, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.usize_type),
+ });
+
+ const array_ptr = try expr(mod, scope, .ref, node.lhs);
+ const start = try expr(mod, scope, .{ .ty = usize_type }, node.start);
+
+ if (node.end == null and node.sentinel == null) {
+ return try addZIRBinOp(mod, scope, src, .slice_start, array_ptr, start);
+ }
+
+ const end = if (node.end) |end| try expr(mod, scope, .{ .ty = usize_type }, end) else null;
+ // we could get the child type here, but it is easier to just do it in semantic analysis.
+ const sentinel = if (node.sentinel) |sentinel| try expr(mod, scope, .none, sentinel) else null;
+
+ return try addZIRInst(
+ mod,
+ scope,
+ src,
+ zir.Inst.Slice,
+ .{ .array_ptr = array_ptr, .start = start },
+ .{ .end = end, .sentinel = sentinel },
+ );
+}
+
fn deref(mod: *Module, scope: *Scope, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index d6e3194c12..9405a5f72c 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -59,14 +59,21 @@ pub const GenerateSymbolError = error{
AnalysisFail,
};
+pub const DebugInfoOutput = union(enum) {
+ dwarf: struct {
+ dbg_line: *std.ArrayList(u8),
+ dbg_info: *std.ArrayList(u8),
+ dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ },
+ none,
+};
+
pub fn generateSymbol(
bin_file: *link.File,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -76,70 +83,70 @@ pub fn generateSymbol(
switch (bin_file.options.target.cpu.arch) {
.wasm32 => unreachable, // has its own code path
.wasm64 => unreachable, // has its own code path
- .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ .arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output),
+ //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
.Array => {
// TODO populate .debug_info for the array
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
- if (typed_value.ty.arraySentinel()) |sentinel| {
+ if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
const prev_len = code.items.len;
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
- }, code, dbg_line, dbg_info, dbg_info_type_relocs)) {
+ }, code, debug_output)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@@ -239,9 +246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -419,9 +424,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
- dbg_line: *std.ArrayList(u8),
- dbg_info: *std.ArrayList(u8),
- dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
+ debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@@ -436,8 +439,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try branch_stack.append(.{});
const src_data: struct {lbrace_src: usize, rbrace_src: usize, source: []const u8} = blk: {
- if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| {
- const tree = scope_file.contents.tree;
+ if (module_fn.owner_decl.scope.cast(Module.Scope.Container)) |container_scope| {
+ const tree = container_scope.file_scope.contents.tree;
const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const lbrace_src = tree.token_locs[block.lbrace].start;
@@ -457,9 +460,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
- .dbg_line = dbg_line,
- .dbg_info = dbg_info,
- .dbg_info_type_relocs = dbg_info_type_relocs,
+ .debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -598,35 +599,50 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
- try self.dbg_line.append(DW.LNS_set_prologue_end);
- try self.dbgAdvancePCAndLine(self.prev_di_src);
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
+ try self.dbgAdvancePCAndLine(self.prev_di_src);
+ },
+ .none => {},
+ }
}
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
- try self.dbg_line.append(DW.LNS_set_epilogue_begin);
- try self.dbgAdvancePCAndLine(self.prev_di_src);
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
+ try self.dbgAdvancePCAndLine(self.prev_di_src);
+ },
+ .none => {},
+ }
}
fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void {
- // TODO Look into improving the performance here by adding a token-index-to-line
- // lookup table, and changing ir.Inst from storing byte offset to token. Currently
- // this involves scanning over the source code for newlines
- // (but only from the previous byte offset to the new one).
- const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
- const delta_pc = self.code.items.len - self.prev_di_pc;
self.prev_di_src = src;
self.prev_di_pc = self.code.items.len;
- // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
- // single-byte opcodes that add different numbers to both the PC and the line number
- // at the same time.
- try self.dbg_line.ensureCapacity(self.dbg_line.items.len + 11);
- self.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
- leb128.writeULEB128(self.dbg_line.writer(), delta_pc) catch unreachable;
- if (delta_line != 0) {
- self.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
- leb128.writeILEB128(self.dbg_line.writer(), delta_line) catch unreachable;
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ // TODO Look into improving the performance here by adding a token-index-to-line
+ // lookup table, and changing ir.Inst from storing byte offset to token. Currently
+ // this involves scanning over the source code for newlines
+ // (but only from the previous byte offset to the new one).
+ const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
+ const delta_pc = self.code.items.len - self.prev_di_pc;
+ // TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
+ // single-byte opcodes that add different numbers to both the PC and the line number
+ // at the same time.
+ try dbg_out.dbg_line.ensureCapacity(dbg_out.dbg_line.items.len + 11);
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
+ leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
+ if (delta_line != 0) {
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
+ leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
+ }
+ dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
+ },
+ .none => {},
}
- self.dbg_line.appendAssumeCapacity(DW.LNS_copy);
}
/// Asserts there is already capacity to insert into top branch inst_table.
@@ -654,18 +670,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
- assert(ty.hasCodeGenBits());
- const index = self.dbg_info.items.len;
- try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
-
- const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty);
- if (!gop.found_existing) {
- gop.entry.value = .{
- .off = undefined,
- .relocs = .{},
- };
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ assert(ty.hasCodeGenBits());
+ const index = dbg_out.dbg_info.items.len;
+ try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
+
+ const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
+ if (!gop.found_existing) {
+ gop.entry.value = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
+ },
+ .none => {},
}
- try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
}
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
@@ -1258,14 +1279,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base);
self.markRegUsed(reg);
- try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len);
- self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
- self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
- 1, // ULEB128 dwarf expression length
- reg.dwarfLocOp(),
- });
- try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
- self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ switch (self.debug_output) {
+ .dwarf => |dbg_out| {
+ try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len);
+ dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
+ dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
+ 1, // ULEB128 dwarf expression length
+ reg.dwarfLocOp(),
+ });
+ try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
+ dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
+ },
+ .none => {},
+ }
},
else => {},
}
@@ -1302,7 +1328,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
@@ -1341,10 +1367,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ @intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
+ else
+ unreachable;
+
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
@@ -1362,10 +1395,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
+ else
+ unreachable;
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
@@ -1383,8 +1422,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ @intCast(u16, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * 2)
+ else
+ unreachable;
+
const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType();
// First, push the return address, then jump; if noreturn, don't bother with the first step
// TODO: implement packed struct -> u16 at comptime and move the bitcast here
@@ -1420,10 +1465,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
+ else
+ unreachable;
// TODO only works with leaf functions
// at the moment, which works fine for
@@ -1443,7 +1493,57 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
switch (arch) {
- .x86_64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for x86_64 arch", .{}),
+ .x86_64 => {
+ for (info.args) |mc_arg, arg_i| {
+ const arg = inst.args[arg_i];
+ const arg_mcv = try self.resolveInst(inst.args[arg_i]);
+ // Here we do not use setRegOrMem even though the logic is similar, because
+ // the function call will move the stack pointer, so the offsets are different.
+ switch (mc_arg) {
+ .none => continue,
+ .register => |reg| {
+ try self.genSetReg(arg.src, reg, arg_mcv);
+ // TODO interact with the register allocator to mark the instruction as moved.
+ },
+ .stack_offset => {
+ // Here we need to emit instructions like this:
+ // mov qword ptr [rsp + stack_offset], x
+ return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{});
+ },
+ .ptr_stack_offset => {
+ return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{});
+ },
+ .ptr_embedded_in_code => {
+ return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
+ },
+ .undef => unreachable,
+ .immediate => unreachable,
+ .unreach => unreachable,
+ .dead => unreachable,
+ .embedded_in_code => unreachable,
+ .memory => unreachable,
+ .compare_flags_signed => unreachable,
+ .compare_flags_unsigned => unreachable,
+ }
+ }
+
+ if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
+ if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
+ const func = func_val.func;
+ const got = &macho_file.sections.items[macho_file.got_section_index.?];
+ const ptr_bytes = 8;
+ const got_addr = @intCast(u32, got.addr + func.owner_decl.link.macho.offset_table_index.? * ptr_bytes);
+ // ff 14 25 xx xx xx xx call [addr]
+ try self.code.ensureCapacity(self.code.items.len + 7);
+ self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
+ mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
+ } else {
+ return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
+ }
+ } else {
+ return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{});
+ }
+ },
.aarch64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for aarch64 arch", .{}),
else => unreachable,
}
@@ -1933,7 +2033,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (mem.eql(u8, inst.asm_source, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
- } else {
+ } else if (inst.asm_source.len != 0) {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
@@ -2486,6 +2586,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const decl = payload.decl;
+ const got = &macho_file.sections.items[macho_file.got_section_index.?];
+ const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes;
+ return MCValue{ .memory = got_addr };
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const decl = payload.decl;
+ const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
+ return MCValue{ .memory = got_addr };
} else {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index c037c55289..34ddcfbb3b 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -85,7 +85,7 @@ fn genArray(file: *C, decl: *Decl) !void {
const name = try map(file.base.allocator, mem.span(decl.name));
defer file.base.allocator.free(name);
if (tv.val.cast(Value.Payload.Bytes)) |payload|
- if (tv.ty.arraySentinel()) |sentinel|
+ if (tv.ty.sentinel()) |sentinel|
if (sentinel.toUnsignedInt() == 0)
try file.constants.writer().print("const char *const {} = \"{}\";\n", .{ name, payload.data })
else
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index ecf3876582..fff69a6bbd 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -34,6 +34,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
+ coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
@@ -41,6 +42,7 @@ pub const File = struct {
pub const LinkFn = union {
elf: Elf.SrcFn,
+ coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
@@ -66,7 +68,7 @@ pub const File = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
.unknown => unreachable,
- .coff => return error.TODOImplementCoff,
+ .coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
@@ -85,7 +87,7 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
- .elf, .macho => {
+ .coff, .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
@@ -112,6 +114,7 @@ pub const File = struct {
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
@@ -121,6 +124,7 @@ pub const File = struct {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
@@ -131,6 +135,7 @@ pub const File = struct {
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
@@ -140,6 +145,7 @@ pub const File = struct {
pub fn deinit(base: *File) void {
if (base.file) |f| f.close();
switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).deinit(),
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
@@ -149,6 +155,11 @@ pub const File = struct {
pub fn destroy(base: *File) void {
switch (base.tag) {
+ .coff => {
+ const parent = @fieldParentPtr(Coff, "base", base);
+ parent.deinit();
+ base.allocator.destroy(parent);
+ },
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
@@ -177,6 +188,7 @@ pub const File = struct {
defer tracy.end();
try switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).flush(module),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
@@ -186,6 +198,7 @@ pub const File = struct {
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
@@ -195,6 +208,7 @@ pub const File = struct {
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
+ .coff => @fieldParentPtr(Coff, "base", base).error_flags,
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
@@ -211,6 +225,7 @@ pub const File = struct {
exports: []const *Module.Export,
) !void {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
@@ -220,6 +235,7 @@ pub const File = struct {
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
+ .coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
@@ -228,6 +244,7 @@ pub const File = struct {
}
pub const Tag = enum {
+ coff,
elf,
macho,
c,
@@ -239,6 +256,7 @@ pub const File = struct {
};
pub const C = @import("link/C.zig");
+ pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");
diff --git a/src-self-hosted/link/Coff.zig b/src-self-hosted/link/Coff.zig
new file mode 100644
index 0000000000..4d1f95e567
--- /dev/null
+++ b/src-self-hosted/link/Coff.zig
@@ -0,0 +1,792 @@
+const Coff = @This();
+
+const std = @import("std");
+const log = std.log.scoped(.link);
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const fs = std.fs;
+
+const trace = @import("../tracy.zig").trace;
+const Module = @import("../Module.zig");
+const codegen = @import("../codegen.zig");
+const link = @import("../link.zig");
+
+const allocation_padding = 4 / 3;
+const minimum_text_block_size = 64 * allocation_padding;
+
+const section_alignment = 4096;
+const file_alignment = 512;
+const image_base = 0x400_000;
+const section_table_size = 2 * 40;
+comptime {
+ std.debug.assert(std.mem.isAligned(image_base, section_alignment));
+}
+
+pub const base_tag: link.File.Tag = .coff;
+
+const msdos_stub = @embedFile("msdos-stub.bin");
+
+base: link.File,
+ptr_width: enum { p32, p64 },
+error_flags: link.File.ErrorFlags = .{},
+
+text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
+last_text_block: ?*TextBlock = null,
+
+/// Section table file pointer.
+section_table_offset: u32 = 0,
+/// Section data file pointer.
+section_data_offset: u32 = 0,
+/// Optiona header file pointer.
+optional_header_offset: u32 = 0,
+
+/// Absolute virtual address of the offset table when the executable is loaded in memory.
+offset_table_virtual_address: u32 = 0,
+/// Current size of the offset table on disk, must be a multiple of `file_alignment`
+offset_table_size: u32 = 0,
+/// Contains absolute virtual addresses
+offset_table: std.ArrayListUnmanaged(u64) = .{},
+/// Free list of offset table indices
+offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
+
+/// Virtual address of the entry point procedure relative to `image_base`
+entry_addr: ?u32 = null,
+
+/// Absolute virtual address of the text section when the executable is loaded in memory.
+text_section_virtual_address: u32 = 0,
+/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
+text_section_size: u32 = 0,
+
+offset_table_size_dirty: bool = false,
+text_section_size_dirty: bool = false,
+/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
+/// and needs to be updated in the optional header.
+size_of_image_dirty: bool = false,
+
+pub const TextBlock = struct {
+ /// Offset of the code relative to the start of the text section
+ text_offset: u32,
+ /// Used size of the text block
+ size: u32,
+ /// This field is undefined for symbols with size = 0.
+ offset_table_index: u32,
+ /// Points to the previous and next neighbors, based on the `text_offset`.
+ /// This can be used to find, for example, the capacity of this `TextBlock`.
+ prev: ?*TextBlock,
+ next: ?*TextBlock,
+
+ pub const empty = TextBlock{
+ .text_offset = 0,
+ .size = 0,
+ .offset_table_index = undefined,
+ .prev = null,
+ .next = null,
+ };
+
+ /// Returns how much room there is to grow in virtual address space.
+ fn capacity(self: TextBlock) u64 {
+ if (self.next) |next| {
+ return next.text_offset - self.text_offset;
+ }
+ // This is the last block, the capacity is only limited by the address space.
+ return std.math.maxInt(u32) - self.text_offset;
+ }
+
+ fn freeListEligible(self: TextBlock) bool {
+ // No need to keep a free list node for the last block.
+ const next = self.next orelse return false;
+ const cap = next.text_offset - self.text_offset;
+ const ideal_cap = self.size * allocation_padding;
+ if (cap <= ideal_cap) return false;
+ const surplus = cap - ideal_cap;
+ return surplus >= minimum_text_block_size;
+ }
+
+ /// Absolute virtual address of the text block when the file is loaded in memory.
+ fn getVAddr(self: TextBlock, coff: Coff) u32 {
+ return coff.text_section_virtual_address + self.text_offset;
+ }
+};
+
+pub const SrcFn = void;
+
+pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
+ assert(options.object_format == .coff);
+
+ const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
+ errdefer file.close();
+
+ var coff_file = try allocator.create(Coff);
+ errdefer allocator.destroy(coff_file);
+
+ coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
+ error.IncrFailed => try createFile(allocator, file, options),
+ else => |e| return e,
+ };
+
+ return &coff_file.base;
+}
+
+/// Returns error.IncrFailed if incremental update could not be performed.
+fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => return error.IncrFailed,
+ .Lib => return error.IncrFailed,
+ }
+ var self: Coff = .{
+ .base = .{
+ .file = file,
+ .tag = .coff,
+ .options = options,
+ .allocator = allocator,
+ },
+ .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
+ 32 => .p32,
+ 64 => .p64,
+ else => return error.UnsupportedELFArchitecture,
+ },
+ };
+ errdefer self.deinit();
+
+ // TODO implement reading the PE/COFF file
+ return error.IncrFailed;
+}
+
+/// Truncates the existing file contents and overwrites the contents.
+/// Returns an error if `file` is not already open with +read +write +seek abilities.
+fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
+ // TODO Write object specific relocations, COFF symbol table, then enable object file output.
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => return error.TODOImplementWritingObjFiles,
+ .Lib => return error.TODOImplementWritingLibFiles,
+ }
+ var self: Coff = .{
+ .base = .{
+ .tag = .coff,
+ .options = options,
+ .allocator = allocator,
+ .file = file,
+ },
+ .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
+ 32 => .p32,
+ 64 => .p64,
+ else => return error.UnsupportedCOFFArchitecture,
+ },
+ };
+ errdefer self.deinit();
+
+ var coff_file_header_offset: u32 = 0;
+ if (options.output_mode == .Exe) {
+ // Write the MS-DOS stub and the PE signature
+ try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
+ coff_file_header_offset = msdos_stub.len + 4;
+ }
+
+ // COFF file header
+ const data_directory_count = 0;
+ var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
+ var index: usize = 0;
+
+ const machine = self.base.options.target.cpu.arch.toCoffMachine();
+ if (machine == .Unknown) {
+ return error.UnsupportedCOFFArchitecture;
+ }
+ std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
+ index += 2;
+
+ // Number of sections (we only use .got, .text)
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
+ index += 2;
+ // TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+
+ const optional_header_size = switch (options.output_mode) {
+ .Exe => data_directory_count * 8 + switch (self.ptr_width) {
+ .p32 => @as(u16, 96),
+ .p64 => 112,
+ },
+ else => 0,
+ };
+
+ const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
+ const default_offset_table_size = file_alignment;
+ const default_size_of_code = 0;
+
+ self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
+ const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
+ self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
+ self.offset_table_size = default_offset_table_size;
+ self.section_table_offset = section_table_offset;
+ self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
+ self.text_section_size = default_size_of_code;
+
+ // Size of file when loaded in memory
+ const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
+
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
+ index += 2;
+
+ // Characteristics
+ var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
+ if (options.output_mode == .Exe) {
+ characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
+ }
+ switch (self.ptr_width) {
+ .p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
+ .p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
+ }
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
+ index += 2;
+
+ assert(index == 20);
+ try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
+
+ if (options.output_mode == .Exe) {
+ self.optional_header_offset = coff_file_header_offset + 20;
+ // Optional header
+ index = 0;
+ std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
+ .p32 => @as(u16, 0x10b),
+ .p64 => 0x20b,
+ });
+ index += 2;
+
+ // Linker version (u8 + u8)
+ std.mem.set(u8, hdr_data[index..][0..2], 0);
+ index += 2;
+
+ // SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
+ std.mem.set(u8, hdr_data[index..][0..20], 0);
+ index += 20;
+
+ if (self.ptr_width == .p32) {
+ // Base of data relative to the image base (UNUSED)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+
+ // Image base address
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
+ index += 4;
+ } else {
+ // Image base address
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
+ index += 8;
+ }
+
+ // Section alignment
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
+ index += 4;
+ // File alignment
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
+ index += 4;
+ // Required OS version, 6.0 is vista
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
+ index += 2;
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
+ index += 2;
+ // Image version
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ // Required subsystem version, same as OS version
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
+ index += 2;
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
+ index += 2;
+ // Reserved zeroes (u32)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
+ index += 4;
+ // CheckSum (u32)
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+ // Subsystem, TODO: Let users specify the subsystem, always CUI for now
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
+ index += 2;
+ // DLL characteristics
+ std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
+ index += 2;
+
+ switch (self.ptr_width) {
+ .p32 => {
+ // Size of stack reserve + commit
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
+ index += 4;
+ // Size of heap reserve + commit
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
+ index += 4;
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
+ index += 4;
+ },
+ .p64 => {
+ // Size of stack reserve + commit
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
+ index += 8;
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
+ index += 8;
+ // Size of heap reserve + commit
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
+ index += 8;
+ std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
+ index += 8;
+ },
+ }
+
+ // Reserved zeroes
+ std.mem.set(u8, hdr_data[index..][0..4], 0);
+ index += 4;
+
+ // Number of data directories
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
+ index += 4;
+ // Initialize data directories to zero
+ std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
+ index += data_directory_count * 8;
+
+ assert(index == optional_header_size);
+ }
+
+ // Write section table.
+ // First, the .got section
+ hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
+ index += 8;
+ if (options.output_mode == .Exe) {
+ // Virtual size (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
+ index += 4;
+ // Virtual address (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
+ index += 4;
+ } else {
+ std.mem.set(u8, hdr_data[index..][0..8], 0);
+ index += 8;
+ }
+ // Size of raw data (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
+ index += 4;
+ // File pointer to the start of the section
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
+ index += 4;
+ // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+ // Section flags
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
+ index += 4;
+ // Then, the .text section
+ hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
+ index += 8;
+ if (options.output_mode == .Exe) {
+ // Virtual size (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
+ index += 4;
+ // Virtual address (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
+ index += 4;
+ } else {
+ std.mem.set(u8, hdr_data[index..][0..8], 0);
+ index += 8;
+ }
+ // Size of raw data (u32)
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
+ index += 4;
+ // File pointer to the start of the section
+ std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
+ index += 4;
+ // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
+ std.mem.set(u8, hdr_data[index..][0..12], 0);
+ index += 12;
+ // Section flags
+ std.mem.writeIntLittle(
+ u32,
+ hdr_data[index..][0..4],
+ std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
+ );
+ index += 4;
+
+ assert(index == optional_header_size + section_table_size);
+ try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
+ try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
+
+ return self;
+}
+
+pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
+ try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
+
+ if (self.offset_table_free_list.popOrNull()) |i| {
+ decl.link.coff.offset_table_index = i;
+ } else {
+ decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
+ _ = self.offset_table.addOneAssumeCapacity();
+
+ const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
+ if (self.offset_table.items.len > self.offset_table_size / entry_size) {
+ self.offset_table_size_dirty = true;
+ }
+ }
+
+ self.offset_table.items[decl.link.coff.offset_table_index] = 0;
+}
+
+fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const new_block_min_capacity = new_block_size * allocation_padding;
+
+ // We use these to indicate our intention to update metadata, placing the new block,
+ // and possibly removing a free list node.
+ // It would be simpler to do it inside the for loop below, but that would cause a
+ // problem if an error was returned later in the function. So this action
+ // is actually carried out at the end of the function, when errors are no longer possible.
+ var block_placement: ?*TextBlock = null;
+ var free_list_removal: ?usize = null;
+
+ const vaddr = blk: {
+ var i: usize = 0;
+ while (i < self.text_block_free_list.items.len) {
+ const free_block = self.text_block_free_list.items[i];
+
+ const next_block_text_offset = free_block.text_offset + free_block.capacity();
+ const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
+ if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
+ block_placement = free_block;
+
+ const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
+ if (remaining_capacity < minimum_text_block_size) {
+ free_list_removal = i;
+ }
+
+ break :blk new_block_text_offset + self.text_section_virtual_address;
+ } else {
+ if (!free_block.freeListEligible()) {
+ _ = self.text_block_free_list.swapRemove(i);
+ } else {
+ i += 1;
+ }
+ continue;
+ }
+ } else if (self.last_text_block) |last| {
+ const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
+ block_placement = last;
+ break :blk new_block_vaddr;
+ } else {
+ break :blk self.text_section_virtual_address;
+ }
+ };
+
+ const expand_text_section = block_placement == null or block_placement.?.next == null;
+ if (expand_text_section) {
+ const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
+ if (needed_size > self.text_section_size) {
+ const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
+ const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
+ if (current_text_section_virtual_size != new_text_section_virtual_size) {
+ self.size_of_image_dirty = true;
+ // Write new virtual size
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
+ try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
+ }
+
+ self.text_section_size = needed_size;
+ self.text_section_size_dirty = true;
+ }
+ self.last_text_block = text_block;
+ }
+ text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
+ text_block.size = @intCast(u32, new_block_size);
+
+ // This function can also reallocate a text block.
+ // In this case we need to "unplug" it from its previous location before
+ // plugging it in to its new location.
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+ }
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
+
+ if (block_placement) |big_block| {
+ text_block.prev = big_block;
+ text_block.next = big_block.next;
+ big_block.next = text_block;
+ } else {
+ text_block.prev = null;
+ text_block.next = null;
+ }
+ if (free_list_removal) |i| {
+ _ = self.text_block_free_list.swapRemove(i);
+ }
+ return vaddr;
+}
+
+fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const block_vaddr = text_block.getVAddr(self.*);
+ const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
+ const need_realloc = !align_ok or new_block_size > text_block.capacity();
+ if (!need_realloc) return @as(u64, block_vaddr);
+ return self.allocateTextBlock(text_block, new_block_size, alignment);
+}
+
+fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
+ text_block.size = @intCast(u32, new_block_size);
+ if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
+ self.text_block_free_list.append(self.base.allocator, text_block) catch {};
+ }
+}
+
+fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
+ var already_have_free_list_node = false;
+ {
+ var i: usize = 0;
+ // TODO turn text_block_free_list into a hash map
+ while (i < self.text_block_free_list.items.len) {
+ if (self.text_block_free_list.items[i] == text_block) {
+ _ = self.text_block_free_list.swapRemove(i);
+ continue;
+ }
+ if (self.text_block_free_list.items[i] == text_block.prev) {
+ already_have_free_list_node = true;
+ }
+ i += 1;
+ }
+ }
+ if (self.last_text_block == text_block) {
+ self.last_text_block = text_block.prev;
+ }
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+
+ if (!already_have_free_list_node and prev.freeListEligible()) {
+ // The free list is heuristics, it doesn't have to be perfect, so we can
+ // ignore the OOM here.
+ self.text_block_free_list.append(self.base.allocator, prev) catch {};
+ }
+ }
+
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
+}
+
+fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
+ const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
+ const endian = self.base.options.target.cpu.arch.endian();
+
+ const offset_table_start = self.section_data_offset;
+ if (self.offset_table_size_dirty) {
+ const current_raw_size = self.offset_table_size;
+ const new_raw_size = self.offset_table_size * 2;
+ log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
+
+ // Move the text section to a new place in the executable
+ const current_text_section_start = self.section_data_offset + current_raw_size;
+ const new_text_section_start = self.section_data_offset + new_raw_size;
+
+ const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
+ if (amt != self.text_section_size) return error.InputOutput;
+
+ // Write the new raw size in the .got header
+ var buf: [8]u8 = undefined;
+ std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
+ // Write the new .text section file offset in the .text section header
+ std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
+
+ const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
+ const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
+ // If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
+ // and the virutal size of the `.got` section
+
+ if (new_virtual_size != current_virtual_size) {
+ log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
+ self.size_of_image_dirty = true;
+ const va_offset = new_virtual_size - current_virtual_size;
+
+ // Write .got virtual size
+ std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
+
+ // Write .text new virtual address
+ self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
+ std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
+ try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
+
+ // Fix the VAs in the offset table
+ for (self.offset_table.items) |*va, idx| {
+ if (va.* != 0) {
+ va.* += va_offset;
+
+ switch (entry_size) {
+ 4 => {
+ std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
+ try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
+ },
+ 8 => {
+ std.mem.writeInt(u64, &buf, va.*, endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
+ },
+ else => unreachable,
+ }
+ }
+ }
+ }
+ self.offset_table_size = new_raw_size;
+ self.offset_table_size_dirty = false;
+ }
+ // Write the new entry
+ switch (entry_size) {
+ 4 => {
+ var buf: [4]u8 = undefined;
+ std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
+ },
+ 8 => {
+ var buf: [8]u8 = undefined;
+ std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
+ try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
+ },
+ else => unreachable,
+ }
+}
+
+pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ // TODO COFF/PE debug information
+ // TODO Implement exports
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
+
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+
+ const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const curr_size = decl.link.coff.size;
+ if (curr_size != 0) {
+ const capacity = decl.link.coff.capacity();
+ const need_realloc = code.len > capacity or
+ !std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
+ if (need_realloc) {
+ const curr_vaddr = self.getDeclVAddr(decl);
+ const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
+ log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
+ if (vaddr != curr_vaddr) {
+ log.debug(" (writing new offset table entry)\n", .{});
+ self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
+ }
+ } else if (code.len < curr_size) {
+ self.shrinkTextBlock(&decl.link.coff, code.len);
+ }
+ } else {
+ const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
+ log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
+ errdefer self.freeTextBlock(&decl.link.coff);
+ self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
+ }
+
+ // Write the code into the file
+ try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
+
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ return self.updateDeclExports(module, decl, decl_exports);
+}
+
+pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
+ // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
+ self.freeTextBlock(&decl.link.coff);
+ self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
+}
+
+pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
+ for (exports) |exp| {
+ if (exp.options.section) |section_name| {
+ if (!std.mem.eql(u8, section_name, ".text")) {
+ try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ );
+ continue;
+ }
+ }
+ if (std.mem.eql(u8, exp.options.name, "_start")) {
+ self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
+ } else {
+ try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
+ );
+ continue;
+ }
+ }
+}
+
+pub fn flush(self: *Coff, module: *Module) !void {
+ if (self.text_section_size_dirty) {
+ // Write the new raw size in the .text header
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, self.text_section_size);
+ try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
+ try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
+ self.text_section_size_dirty = false;
+ }
+
+ if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
+ const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, new_size_of_image);
+ try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
+ self.size_of_image_dirty = false;
+ }
+
+ if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
+ log.debug("flushing. no_entry_point_found = true\n", .{});
+ self.error_flags.no_entry_point_found = true;
+ } else {
+ log.debug("flushing. no_entry_point_found = false\n", .{});
+ self.error_flags.no_entry_point_found = false;
+
+ if (self.base.options.output_mode == .Exe) {
+ // Write AddressOfEntryPoint
+ var buf: [4]u8 = undefined;
+ std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
+ try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
+ }
+ }
+}
+
+pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
+ return self.text_section_virtual_address + decl.link.coff.text_offset;
+}
+
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+ // TODO Implement this
+}
+
+pub fn deinit(self: *Coff) void {
+ self.text_block_free_list.deinit(self.base.allocator);
+ self.offset_table.deinit(self.base.allocator);
+ self.offset_table_free_list.deinit(self.base.allocator);
+}
diff --git a/src-self-hosted/link/Elf.zig b/src-self-hosted/link/Elf.zig
index 69f1260d20..e5acde947c 100644
--- a/src-self-hosted/link/Elf.zig
+++ b/src-self-hosted/link/Elf.zig
@@ -1656,8 +1656,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
- if (decl.scope.cast(Module.Scope.File)) |scope_file| {
- const tree = scope_file.contents.tree;
+ if (decl.scope.cast(Module.Scope.Container)) |container_scope| {
+ const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
@@ -1735,7 +1735,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
} else {
// TODO implement .debug_info for global variables
}
- const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs);
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{
+ .dwarf = .{
+ .dbg_line = &dbg_line_buffer,
+ .dbg_info = &dbg_info_buffer,
+ .dbg_info_type_relocs = &dbg_info_type_relocs,
+ },
+ });
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
@@ -2157,8 +2163,8 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
const tracy = trace(@src());
defer tracy.end();
- const scope_file = decl.scope.cast(Module.Scope.File).?;
- const tree = scope_file.contents.tree;
+ const container_scope = decl.scope.cast(Module.Scope.Container).?;
+ const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig
index 047e62f950..27d0488f25 100644
--- a/src-self-hosted/link/MachO.zig
+++ b/src-self-hosted/link/MachO.zig
@@ -18,36 +18,66 @@ const File = link.File;
pub const base_tag: File.Tag = File.Tag.macho;
+const LoadCommand = union(enum) {
+ Segment: macho.segment_command_64,
+ LinkeditData: macho.linkedit_data_command,
+ Symtab: macho.symtab_command,
+ Dysymtab: macho.dysymtab_command,
+
+ pub fn cmdsize(self: LoadCommand) u32 {
+ return switch (self) {
+ .Segment => |x| x.cmdsize,
+ .LinkeditData => |x| x.cmdsize,
+ .Symtab => |x| x.cmdsize,
+ .Dysymtab => |x| x.cmdsize,
+ };
+ }
+};
+
base: File,
-/// List of all load command headers that are in the file.
-/// We use it to track number and size of all commands needed by the header.
-commands: std.ArrayListUnmanaged(macho.load_command) = std.ArrayListUnmanaged(macho.load_command){},
-command_file_offset: ?u64 = null,
+/// Table of all load commands
+load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
+segment_cmd_index: ?u16 = null,
+symtab_cmd_index: ?u16 = null,
+dysymtab_cmd_index: ?u16 = null,
+data_in_code_cmd_index: ?u16 = null,
-/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
-/// Same order as in the file.
-segments: std.ArrayListUnmanaged(macho.segment_command_64) = std.ArrayListUnmanaged(macho.segment_command_64){},
-/// Section (headers) *always* follow segment (load commands) directly!
-sections: std.ArrayListUnmanaged(macho.section_64) = std.ArrayListUnmanaged(macho.section_64){},
+/// Table of all sections
+sections: std.ArrayListUnmanaged(macho.section_64) = .{},
-/// Offset (index) into __TEXT segment load command.
-text_segment_offset: ?u64 = null,
-/// Offset (index) into __LINKEDIT segment load command.
-linkedit_segment_offset: ?u664 = null,
+/// __TEXT segment sections
+text_section_index: ?u16 = null,
+cstring_section_index: ?u16 = null,
+const_text_section_index: ?u16 = null,
+stubs_section_index: ?u16 = null,
+stub_helper_section_index: ?u16 = null,
+
+/// __DATA segment sections
+got_section_index: ?u16 = null,
+const_data_section_index: ?u16 = null,
-/// Entry point load command
-entry_point_cmd: ?macho.entry_point_command = null,
entry_addr: ?u64 = null,
-/// The first 4GB of process' memory is reserved for the null (__PAGEZERO) segment.
-/// This is also the start address for our binary.
-vm_start_address: u64 = 0x100000000,
+/// Table of all symbols used.
+/// Internally references string table for names (which are optional).
+symbol_table: std.ArrayListUnmanaged(macho.nlist_64) = .{},
+
+/// Table of symbol names aka the string table.
+string_table: std.ArrayListUnmanaged(u8) = .{},
-seg_table_dirty: bool = false,
+/// Table of symbol vaddr values. The values is the absolute vaddr value.
+/// If the vaddr of the executable __TEXT segment vaddr changes, the entire offset
+/// table needs to be rewritten.
+offset_table: std.ArrayListUnmanaged(u64) = .{},
error_flags: File.ErrorFlags = File.ErrorFlags{},
+cmd_table_dirty: bool = false,
+
+/// Pointer to the last allocated text block
+last_text_block: ?*TextBlock = null,
+
/// `alloc_num / alloc_den` is the factor of padding when allocating.
const alloc_num = 4;
const alloc_den = 3;
@@ -67,7 +97,23 @@ const LIB_SYSTEM_NAME: [*:0]const u8 = "System";
const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib";
pub const TextBlock = struct {
- pub const empty = TextBlock{};
+ /// Index into the symbol table
+ symbol_table_index: ?u32,
+ /// Index into offset table
+ offset_table_index: ?u32,
+ /// Size of this text block
+ size: u64,
+ /// Points to the previous and next neighbours
+ prev: ?*TextBlock,
+ next: ?*TextBlock,
+
+ pub const empty = TextBlock{
+ .symbol_table_index = null,
+ .offset_table_index = null,
+ .size = 0,
+ .prev = null,
+ .next = null,
+ };
};
pub const SrcFn = struct {
@@ -117,6 +163,12 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => {},
+ .Lib => return error.TODOImplementWritingLibFiles,
+ }
+
var self: MachO = .{
.base = .{
.file = file,
@@ -127,104 +179,15 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach
};
errdefer self.deinit();
- switch (options.output_mode) {
- .Exe => {
- // The first segment command for executables is always a __PAGEZERO segment.
- const pagezero = .{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = commandSize(@sizeOf(macho.segment_command_64)),
- .segname = makeString("__PAGEZERO"),
- .vmaddr = 0,
- .vmsize = self.vm_start_address,
- .fileoff = 0,
- .filesize = 0,
- .maxprot = macho.VM_PROT_NONE,
- .initprot = macho.VM_PROT_NONE,
- .nsects = 0,
- .flags = 0,
- };
- try self.commands.append(allocator, .{
- .cmd = pagezero.cmd,
- .cmdsize = pagezero.cmdsize,
- });
- try self.segments.append(allocator, pagezero);
- },
- .Obj => return error.TODOImplementWritingObjFiles,
- .Lib => return error.TODOImplementWritingLibFiles,
- }
-
try self.populateMissingMetadata();
return self;
}
-fn writeMachOHeader(self: *MachO) !void {
- var hdr: macho.mach_header_64 = undefined;
- hdr.magic = macho.MH_MAGIC_64;
-
- const CpuInfo = struct {
- cpu_type: macho.cpu_type_t,
- cpu_subtype: macho.cpu_subtype_t,
- };
-
- const cpu_info: CpuInfo = switch (self.base.options.target.cpu.arch) {
- .aarch64 => .{
- .cpu_type = macho.CPU_TYPE_ARM64,
- .cpu_subtype = macho.CPU_SUBTYPE_ARM_ALL,
- },
- .x86_64 => .{
- .cpu_type = macho.CPU_TYPE_X86_64,
- .cpu_subtype = macho.CPU_SUBTYPE_X86_64_ALL,
- },
- else => return error.UnsupportedMachOArchitecture,
- };
- hdr.cputype = cpu_info.cpu_type;
- hdr.cpusubtype = cpu_info.cpu_subtype;
-
- const filetype: u32 = switch (self.base.options.output_mode) {
- .Exe => macho.MH_EXECUTE,
- .Obj => macho.MH_OBJECT,
- .Lib => switch (self.base.options.link_mode) {
- .Static => return error.TODOStaticLibMachOType,
- .Dynamic => macho.MH_DYLIB,
- },
- };
- hdr.filetype = filetype;
-
- const ncmds = try math.cast(u32, self.commands.items.len);
- hdr.ncmds = ncmds;
-
- var sizeof_cmds: u32 = 0;
- for (self.commands.items) |cmd| {
- sizeof_cmds += cmd.cmdsize;
- }
- hdr.sizeofcmds = sizeof_cmds;
-
- // TODO should these be set to something else?
- hdr.flags = 0;
- hdr.reserved = 0;
-
- try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0);
-}
-
pub fn flush(self: *MachO, module: *Module) !void {
- // Save segments first
- {
- const buf = try self.base.allocator.alloc(macho.segment_command_64, self.segments.items.len);
- defer self.base.allocator.free(buf);
-
- self.command_file_offset = @sizeOf(macho.mach_header_64);
-
- for (buf) |*seg, i| {
- seg.* = self.segments.items[i];
- self.command_file_offset.? += self.segments.items[i].cmdsize;
- }
-
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), @sizeOf(macho.mach_header_64));
- }
-
switch (self.base.options.output_mode) {
.Exe => {
+ var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
{
// Specify path to dynamic linker dyld
const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH));
@@ -235,18 +198,14 @@ pub fn flush(self: *MachO, module: *Module) !void {
.name = @sizeOf(macho.dylinker_command),
},
};
- try self.commands.append(self.base.allocator, .{
- .cmd = macho.LC_LOAD_DYLINKER,
- .cmdsize = cmdsize,
- });
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), self.command_file_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), last_cmd_offset);
- const file_offset = self.command_file_offset.? + @sizeOf(macho.dylinker_command);
+ const file_offset = last_cmd_offset + @sizeOf(macho.dylinker_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset);
- self.command_file_offset.? += cmdsize;
+ last_cmd_offset += cmdsize;
}
{
@@ -268,21 +227,44 @@ pub fn flush(self: *MachO, module: *Module) !void {
.dylib = dylib,
},
};
- try self.commands.append(self.base.allocator, .{
- .cmd = macho.LC_LOAD_DYLIB,
- .cmdsize = cmdsize,
- });
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), self.command_file_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), last_cmd_offset);
- const file_offset = self.command_file_offset.? + @sizeOf(macho.dylib_command);
+ const file_offset = last_cmd_offset + @sizeOf(macho.dylib_command);
try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset);
try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset);
- self.command_file_offset.? += cmdsize;
+ last_cmd_offset += cmdsize;
+ }
+ },
+ .Obj => {
+ {
+ const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
+ symtab.nsyms = @intCast(u32, self.symbol_table.items.len);
+ const allocated_size = self.allocatedSize(symtab.stroff);
+ const needed_size = self.string_table.items.len;
+ log.debug("allocated_size = 0x{x}, needed_size = 0x{x}\n", .{ allocated_size, needed_size });
+
+ if (needed_size > allocated_size) {
+ symtab.strsize = 0;
+ symtab.stroff = @intCast(u32, self.findFreeSpace(needed_size, 1));
+ }
+ symtab.strsize = @intCast(u32, needed_size);
+
+ log.debug("writing string table from 0x{x} to 0x{x}\n", .{ symtab.stroff, symtab.stroff + symtab.strsize });
+
+ try self.base.file.?.pwriteAll(self.string_table.items, symtab.stroff);
+ }
+
+ var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
+ for (self.load_commands.items) |cmd| {
+ const cmd_to_write = [1]@TypeOf(cmd){cmd};
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(cmd_to_write[0..1]), last_cmd_offset);
+ last_cmd_offset += cmd.cmdsize();
}
+ const off = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items), off);
},
- .Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
@@ -297,14 +279,87 @@ pub fn flush(self: *MachO, module: *Module) !void {
}
pub fn deinit(self: *MachO) void {
- self.commands.deinit(self.base.allocator);
- self.segments.deinit(self.base.allocator);
+ self.offset_table.deinit(self.base.allocator);
+ self.string_table.deinit(self.base.allocator);
+ self.symbol_table.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
+ self.load_commands.deinit(self.base.allocator);
+}
+
+pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
+ if (decl.link.macho.symbol_table_index) |_| return;
+
+ try self.symbol_table.ensureCapacity(self.base.allocator, self.symbol_table.items.len + 1);
+ try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
+
+ log.debug("allocating symbol index {} for {}\n", .{ self.symbol_table.items.len, decl.name });
+ decl.link.macho.symbol_table_index = @intCast(u32, self.symbol_table.items.len);
+ _ = self.symbol_table.addOneAssumeCapacity();
+
+ decl.link.macho.offset_table_index = @intCast(u32, self.offset_table.items.len);
+ _ = self.offset_table.addOneAssumeCapacity();
+
+ self.symbol_table.items[decl.link.macho.symbol_table_index.?] = .{
+ .n_strx = 0,
+ .n_type = 0,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = 0,
+ };
+ self.offset_table.items[decl.link.macho.offset_table_index.?] = 0;
}
-pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
+pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
-pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
+
+ const code = switch (res) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try module.failed_decls.put(module.gpa, decl, em);
+ return;
+ },
+ };
+ log.debug("generated code {}\n", .{code});
+
+ const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const symbol = &self.symbol_table.items[decl.link.macho.symbol_table_index.?];
+
+ const decl_name = mem.spanZ(decl.name);
+ const name_str_index = try self.makeString(decl_name);
+ const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment);
+ log.debug("allocated text block for {} at 0x{x}\n", .{ decl_name, addr });
+ log.debug("updated text section {}\n", .{self.sections.items[self.text_section_index.?]});
+
+ symbol.* = .{
+ .n_strx = name_str_index,
+ .n_type = macho.N_SECT,
+ .n_sect = @intCast(u8, self.text_section_index.?) + 1,
+ .n_desc = 0,
+ .n_value = addr,
+ };
+ self.offset_table.items[decl.link.macho.offset_table_index.?] = addr;
+
+ try self.writeSymbol(decl.link.macho.symbol_table_index.?);
+
+ const text_section = self.sections.items[self.text_section_index.?];
+ const section_offset = symbol.n_value - text_section.addr;
+ const file_offset = text_section.offset + section_offset;
+ log.debug("file_offset 0x{x}\n", .{file_offset});
+ try self.base.file.?.pwriteAll(code, file_offset);
+
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ return self.updateDeclExports(module, decl, decl_exports);
+}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
@@ -313,51 +368,191 @@ pub fn updateDeclExports(
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
-) !void {}
+) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ if (decl.link.macho.symbol_table_index == null) return;
+
+ var decl_sym = self.symbol_table.items[decl.link.macho.symbol_table_index.?];
+ // TODO implement
+ if (exports.len == 0) return;
+
+ const exp = exports[0];
+ self.entry_addr = decl_sym.n_value;
+ decl_sym.n_type |= macho.N_EXT;
+ exp.link.sym_index = 0;
+}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
- @panic("TODO implement getDeclVAddr for MachO");
+ return self.symbol_table.items[decl.link.macho.symbol_table_index.?].n_value;
}
pub fn populateMissingMetadata(self: *MachO) !void {
- if (self.text_segment_offset == null) {
- self.text_segment_offset = @intCast(u64, self.segments.items.len);
- const file_size = alignSize(u64, self.base.options.program_code_size_hint, 0x1000);
- log.debug("vmsize/filesize = {}", .{file_size});
- const file_offset = 0;
- const vm_address = self.vm_start_address; // the end of __PAGEZERO segment in VM
- const protection = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE;
- const cmdsize = commandSize(@sizeOf(macho.segment_command_64));
- const text_segment = .{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = cmdsize,
- .segname = makeString("__TEXT"),
- .vmaddr = vm_address,
- .vmsize = file_size,
- .fileoff = 0, // __TEXT segment *always* starts at 0 file offset
- .filesize = 0, //file_size,
- .maxprot = protection,
- .initprot = protection,
- .nsects = 0,
- .flags = 0,
- };
- try self.commands.append(self.base.allocator, .{
- .cmd = macho.LC_SEGMENT_64,
- .cmdsize = cmdsize,
+ if (self.segment_cmd_index == null) {
+ self.segment_cmd_index = @intCast(u16, self.load_commands.items.len);
+ try self.load_commands.append(self.base.allocator, .{
+ .Segment = .{
+ .cmd = macho.LC_SEGMENT_64,
+ .cmdsize = @sizeOf(macho.segment_command_64),
+ .segname = makeStaticString(""),
+ .vmaddr = 0,
+ .vmsize = 0,
+ .fileoff = 0,
+ .filesize = 0,
+ .maxprot = 0,
+ .initprot = 0,
+ .nsects = 0,
+ .flags = 0,
+ },
+ });
+ self.cmd_table_dirty = true;
+ }
+ if (self.symtab_cmd_index == null) {
+ self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len);
+ try self.load_commands.append(self.base.allocator, .{
+ .Symtab = .{
+ .cmd = macho.LC_SYMTAB,
+ .cmdsize = @sizeOf(macho.symtab_command),
+ .symoff = 0,
+ .nsyms = 0,
+ .stroff = 0,
+ .strsize = 0,
+ },
+ });
+ self.cmd_table_dirty = true;
+ }
+ if (self.text_section_index == null) {
+ self.text_section_index = @intCast(u16, self.sections.items.len);
+ const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment;
+ segment.cmdsize += @sizeOf(macho.section_64);
+ segment.nsects += 1;
+
+ const file_size = self.base.options.program_code_size_hint;
+ const off = @intCast(u32, self.findFreeSpace(file_size, 1));
+ const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS;
+
+ log.debug("found __text section free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+
+ try self.sections.append(self.base.allocator, .{
+ .sectname = makeStaticString("__text"),
+ .segname = makeStaticString("__TEXT"),
+ .addr = 0,
+ .size = file_size,
+ .offset = off,
+ .@"align" = 0x1000,
+ .reloff = 0,
+ .nreloc = 0,
+ .flags = flags,
+ .reserved1 = 0,
+ .reserved2 = 0,
+ .reserved3 = 0,
});
- try self.segments.append(self.base.allocator, text_segment);
+
+ segment.vmsize += file_size;
+ segment.filesize += file_size;
+ segment.fileoff = off;
+
+ log.debug("initial text section {}\n", .{self.sections.items[self.text_section_index.?]});
+ }
+ {
+ const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
+ if (symtab.symoff == 0) {
+ const p_align = @sizeOf(macho.nlist_64);
+ const nsyms = self.base.options.symbol_count_hint;
+ const file_size = p_align * nsyms;
+ const off = @intCast(u32, self.findFreeSpace(file_size, p_align));
+ log.debug("found symbol table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ symtab.symoff = off;
+ symtab.nsyms = @intCast(u32, nsyms);
+ }
+ if (symtab.stroff == 0) {
+ try self.string_table.append(self.base.allocator, 0);
+ const file_size = @intCast(u32, self.string_table.items.len);
+ const off = @intCast(u32, self.findFreeSpace(file_size, 1));
+ log.debug("found string table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ symtab.stroff = off;
+ symtab.strsize = file_size;
+ }
+ }
+}
+
+fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment;
+ const text_section = &self.sections.items[self.text_section_index.?];
+ const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den;
+
+ var block_placement: ?*TextBlock = null;
+ const addr = blk: {
+ if (self.last_text_block) |last| {
+ const last_symbol = self.symbol_table.items[last.symbol_table_index.?];
+ const ideal_capacity = last.size * alloc_num / alloc_den;
+ const ideal_capacity_end_addr = last_symbol.n_value + ideal_capacity;
+ const new_start_addr = mem.alignForwardGeneric(u64, ideal_capacity_end_addr, alignment);
+ block_placement = last;
+ break :blk new_start_addr;
+ } else {
+ break :blk text_section.addr;
+ }
+ };
+ log.debug("computed symbol address 0x{x}\n", .{addr});
+
+ const expand_text_section = block_placement == null or block_placement.?.next == null;
+ if (expand_text_section) {
+ const text_capacity = self.allocatedSize(text_section.offset);
+ const needed_size = (addr + new_block_size) - text_section.addr;
+ log.debug("text capacity 0x{x}, needed size 0x{x}\n", .{ text_capacity, needed_size });
+
+ if (needed_size > text_capacity) {
+ // TODO handle growth
+ }
+
+ self.last_text_block = text_block;
+ text_section.size = needed_size;
+ segment.vmsize = needed_size;
+ segment.filesize = needed_size;
+ if (alignment < text_section.@"align") {
+ text_section.@"align" = @intCast(u32, alignment);
+ }
+ }
+ text_block.size = new_block_size;
+
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+ }
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
+
+ if (block_placement) |big_block| {
+ text_block.prev = big_block;
+ text_block.next = big_block.next;
+ big_block.next = text_block;
+ } else {
+ text_block.prev = null;
+ text_block.next = null;
}
+
+ return addr;
}
-fn makeString(comptime bytes: []const u8) [16]u8 {
+fn makeStaticString(comptime bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
- if (bytes.len > buf.len) @compileError("MachO segment/section name too long");
+ if (bytes.len > buf.len) @compileError("string too long; max 16 bytes");
mem.copy(u8, buf[0..], bytes);
return buf;
}
+fn makeString(self: *MachO, bytes: []const u8) !u32 {
+ try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1);
+ const result = self.string_table.items.len;
+ self.string_table.appendSliceAssumeCapacity(bytes);
+ self.string_table.appendAssumeCapacity(0);
+ return @intCast(u32, result);
+}
+
fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int {
const size = @intCast(Int, min_size);
if (size % alignment == 0) return size;
@@ -370,7 +565,7 @@ fn commandSize(min_size: anytype) u32 {
return alignSize(u32, min_size, @sizeOf(u64));
}
-fn addPadding(self: *MachO, size: u32, file_offset: u64) !void {
+fn addPadding(self: *MachO, size: u64, file_offset: u64) !void {
if (size == 0) return;
const buf = try self.base.allocator.alloc(u8, size);
@@ -380,3 +575,151 @@ fn addPadding(self: *MachO, size: u32, file_offset: u64) !void {
try self.base.file.?.pwriteAll(buf, file_offset);
}
+
+fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
+ const hdr_size: u64 = @sizeOf(macho.mach_header_64);
+ if (start < hdr_size)
+ return hdr_size;
+
+ const end = start + satMul(size, alloc_num) / alloc_den;
+
+ {
+ const off = @sizeOf(macho.mach_header_64);
+ var tight_size: u64 = 0;
+ for (self.load_commands.items) |cmd| {
+ tight_size += cmd.cmdsize();
+ }
+ const increased_size = satMul(tight_size, alloc_num) / alloc_den;
+ const test_end = off + increased_size;
+ if (end > off and start < test_end) {
+ return test_end;
+ }
+ }
+
+ for (self.sections.items) |section| {
+ const increased_size = satMul(section.size, alloc_num) / alloc_den;
+ const test_end = section.offset + increased_size;
+ if (end > section.offset and start < test_end) {
+ return test_end;
+ }
+ }
+
+ if (self.symtab_cmd_index) |symtab_index| {
+ const symtab = self.load_commands.items[symtab_index].Symtab;
+ {
+ const tight_size = @sizeOf(macho.nlist_64) * symtab.nsyms;
+ const increased_size = satMul(tight_size, alloc_num) / alloc_den;
+ const test_end = symtab.symoff + increased_size;
+ if (end > symtab.symoff and start < test_end) {
+ return test_end;
+ }
+ }
+ {
+ const increased_size = satMul(symtab.strsize, alloc_num) / alloc_den;
+ const test_end = symtab.stroff + increased_size;
+ if (end > symtab.stroff and start < test_end) {
+ return test_end;
+ }
+ }
+ }
+
+ return null;
+}
+
+fn allocatedSize(self: *MachO, start: u64) u64 {
+ if (start == 0)
+ return 0;
+ var min_pos: u64 = std.math.maxInt(u64);
+ {
+ const off = @sizeOf(macho.mach_header_64);
+ if (off > start and off < min_pos) min_pos = off;
+ }
+ for (self.sections.items) |section| {
+ if (section.offset <= start) continue;
+ if (section.offset < min_pos) min_pos = section.offset;
+ }
+ if (self.symtab_cmd_index) |symtab_index| {
+ const symtab = self.load_commands.items[symtab_index].Symtab;
+ if (symtab.symoff > start and symtab.symoff < min_pos) min_pos = symtab.symoff;
+ if (symtab.stroff > start and symtab.stroff < min_pos) min_pos = symtab.stroff;
+ }
+ return min_pos - start;
+}
+
+fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u16) u64 {
+ var start: u64 = 0;
+ while (self.detectAllocCollision(start, object_size)) |item_end| {
+ start = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ }
+ return start;
+}
+
+fn writeSymbol(self: *MachO, index: usize) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
+ var sym = [1]macho.nlist_64{self.symbol_table.items[index]};
+ const off = symtab.symoff + @sizeOf(macho.nlist_64) * index;
+ log.debug("writing symbol {} at 0x{x}\n", .{ sym[0], off });
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+}
+
+/// Writes Mach-O file header.
+/// Should be invoked last as it needs up-to-date values of ncmds and sizeof_cmds bookkeeping
+/// variables.
+fn writeMachOHeader(self: *MachO) !void {
+ var hdr: macho.mach_header_64 = undefined;
+ hdr.magic = macho.MH_MAGIC_64;
+
+ const CpuInfo = struct {
+ cpu_type: macho.cpu_type_t,
+ cpu_subtype: macho.cpu_subtype_t,
+ };
+
+ const cpu_info: CpuInfo = switch (self.base.options.target.cpu.arch) {
+ .aarch64 => .{
+ .cpu_type = macho.CPU_TYPE_ARM64,
+ .cpu_subtype = macho.CPU_SUBTYPE_ARM_ALL,
+ },
+ .x86_64 => .{
+ .cpu_type = macho.CPU_TYPE_X86_64,
+ .cpu_subtype = macho.CPU_SUBTYPE_X86_64_ALL,
+ },
+ else => return error.UnsupportedMachOArchitecture,
+ };
+ hdr.cputype = cpu_info.cpu_type;
+ hdr.cpusubtype = cpu_info.cpu_subtype;
+
+ const filetype: u32 = switch (self.base.options.output_mode) {
+ .Exe => macho.MH_EXECUTE,
+ .Obj => macho.MH_OBJECT,
+ .Lib => switch (self.base.options.link_mode) {
+ .Static => return error.TODOStaticLibMachOType,
+ .Dynamic => macho.MH_DYLIB,
+ },
+ };
+ hdr.filetype = filetype;
+ hdr.ncmds = @intCast(u32, self.load_commands.items.len);
+
+ var sizeofcmds: u32 = 0;
+ for (self.load_commands.items) |cmd| {
+ sizeofcmds += cmd.cmdsize();
+ }
+
+ hdr.sizeofcmds = sizeofcmds;
+
+ // TODO should these be set to something else?
+ hdr.flags = 0;
+ hdr.reserved = 0;
+
+ log.debug("writing Mach-O header {}\n", .{hdr});
+
+ try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0);
+}
+
+/// Saturating multiplication
+fn satMul(a: anytype, b: anytype) @TypeOf(a, b) {
+ const T = @TypeOf(a, b);
+ return std.math.mul(T, a, b) catch std.math.maxInt(T);
+}
diff --git a/src-self-hosted/link/msdos-stub.bin b/src-self-hosted/link/msdos-stub.bin
new file mode 100644
index 0000000000..96ad91198f
--- /dev/null
+++ b/src-self-hosted/link/msdos-stub.bin
Binary files differ
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8c5c034238..5af4460ade 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -153,8 +153,8 @@ const usage_build_generic =
\\ elf Executable and Linking Format
\\ c Compile to C source code
\\ wasm WebAssembly
+ \\ pe Portable Executable (Windows)
\\ coff (planned) Common Object File Format (Windows)
- \\ pe (planned) Portable Executable (Windows)
\\ macho (planned) macOS relocatables
\\ hex (planned) Intel IHEX
\\ raw (planned) Dump machine code directly
@@ -451,7 +451,7 @@ fn buildOutputType(
} else if (mem.eql(u8, ofmt, "coff")) {
break :blk .coff;
} else if (mem.eql(u8, ofmt, "pe")) {
- break :blk .coff;
+ break :blk .pe;
} else if (mem.eql(u8, ofmt, "macho")) {
break :blk .macho;
} else if (mem.eql(u8, ofmt, "wasm")) {
@@ -524,17 +524,19 @@ fn buildOutputType(
try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)});
continue;
}) |line| {
- if (mem.eql(u8, line, "update")) {
+ const actual_line = mem.trimRight(u8, line, "\r\n ");
+
+ if (mem.eql(u8, actual_line, "update")) {
if (output_mode == .Exe) {
try module.makeBinFileWritable();
}
try updateModule(gpa, &module, zir_out_path);
- } else if (mem.eql(u8, line, "exit")) {
+ } else if (mem.eql(u8, actual_line, "exit")) {
break;
- } else if (mem.eql(u8, line, "help")) {
+ } else if (mem.eql(u8, actual_line, "help")) {
try stderr.writeAll(repl_help);
} else {
- try stderr.print("unknown command: {}\n", .{line});
+ try stderr.print("unknown command: {}\n", .{actual_line});
}
} else {
break;
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index a9a1acf44b..4966395512 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -163,7 +163,7 @@ pub const Type = extern union {
// Hot path for common case:
if (a.castPointer()) |a_payload| {
if (b.castPointer()) |b_payload| {
- return eql(a_payload.pointee_type, b_payload.pointee_type);
+ return a.tag() == b.tag() and eql(a_payload.pointee_type, b_payload.pointee_type);
}
}
const is_slice_a = isSlice(a);
@@ -189,10 +189,10 @@ pub const Type = extern union {
.Array => {
if (a.arrayLen() != b.arrayLen())
return false;
- if (a.elemType().eql(b.elemType()))
+ if (!a.elemType().eql(b.elemType()))
return false;
- const sentinel_a = a.arraySentinel();
- const sentinel_b = b.arraySentinel();
+ const sentinel_a = a.sentinel();
+ const sentinel_b = b.sentinel();
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
return sa.eql(sb);
@@ -501,9 +501,9 @@ pub const Type = extern union {
.noreturn,
=> return out_stream.writeAll(@tagName(t)),
- .enum_literal => return out_stream.writeAll("@TypeOf(.EnumLiteral)"),
- .@"null" => return out_stream.writeAll("@TypeOf(null)"),
- .@"undefined" => return out_stream.writeAll("@TypeOf(undefined)"),
+ .enum_literal => return out_stream.writeAll("@Type(.EnumLiteral)"),
+ .@"null" => return out_stream.writeAll("@Type(.Null)"),
+ .@"undefined" => return out_stream.writeAll("@Type(.Undefined)"),
.@"anyframe" => return out_stream.writeAll("anyframe"),
.anyerror_void_error_union => return out_stream.writeAll("anyerror!void"),
@@ -630,8 +630,8 @@ pub const Type = extern union {
const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
if (payload.sentinel) |some| switch (payload.size) {
.One, .C => unreachable,
- .Many => try out_stream.writeAll("[*:{}]"),
- .Slice => try out_stream.writeAll("[:{}]"),
+ .Many => try out_stream.print("[*:{}]", .{some}),
+ .Slice => try out_stream.print("[:{}]", .{some}),
} else switch (payload.size) {
.One => try out_stream.writeAll("*"),
.Many => try out_stream.writeAll("[*]"),
@@ -1341,6 +1341,81 @@ pub const Type = extern union {
};
}
+ pub fn isAllowzeroPtr(self: Type) bool {
+ return switch (self.tag()) {
+ .u8,
+ .i8,
+ .u16,
+ .i16,
+ .u32,
+ .i32,
+ .u64,
+ .i64,
+ .usize,
+ .isize,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ .c_longdouble,
+ .f16,
+ .f32,
+ .f64,
+ .f128,
+ .c_void,
+ .bool,
+ .void,
+ .type,
+ .anyerror,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .@"null",
+ .@"undefined",
+ .array,
+ .array_sentinel,
+ .array_u8,
+ .array_u8_sentinel_0,
+ .fn_noreturn_no_args,
+ .fn_void_no_args,
+ .fn_naked_noreturn_no_args,
+ .fn_ccc_void_no_args,
+ .function,
+ .int_unsigned,
+ .int_signed,
+ .single_mut_pointer,
+ .single_const_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ .single_const_pointer_to_comptime_int,
+ .const_slice_u8,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
+ .enum_literal,
+ .error_union,
+ .@"anyframe",
+ .anyframe_T,
+ .anyerror_void_error_union,
+ .error_set,
+ .error_set_single,
+ => false,
+
+ .pointer => {
+ const payload = @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise);
+ return payload.@"allowzero";
+ },
+ };
+ }
+
/// Asserts that the type is an optional
pub fn isPtrLikeOptional(self: Type) bool {
switch (self.tag()) {
@@ -1585,8 +1660,8 @@ pub const Type = extern union {
};
}
- /// Asserts the type is an array or vector.
- pub fn arraySentinel(self: Type) ?Value {
+ /// Asserts the type is an array, pointer or vector.
+ pub fn sentinel(self: Type) ?Value {
return switch (self.tag()) {
.u8,
.i8,
@@ -1626,16 +1701,8 @@ pub const Type = extern union {
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.function,
- .pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
.const_slice,
.mut_slice,
- .single_const_pointer_to_comptime_int,
.const_slice_u8,
.int_unsigned,
.int_signed,
@@ -1651,7 +1718,18 @@ pub const Type = extern union {
.error_set_single,
=> unreachable,
- .array, .array_u8 => return null,
+ .single_const_pointer,
+ .single_mut_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .single_const_pointer_to_comptime_int,
+ .array,
+ .array_u8,
+ => return null,
+
+ .pointer => return self.cast(Payload.Pointer).?.sentinel,
.array_sentinel => return self.cast(Payload.ArraySentinel).?.sentinel,
.array_u8_sentinel_0 => return Value.initTag(.zero),
};
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index bfd205f4d9..b65aa06bea 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -301,15 +301,15 @@ pub const Value = extern union {
.comptime_int_type => return out_stream.writeAll("comptime_int"),
.comptime_float_type => return out_stream.writeAll("comptime_float"),
.noreturn_type => return out_stream.writeAll("noreturn"),
- .null_type => return out_stream.writeAll("@TypeOf(null)"),
- .undefined_type => return out_stream.writeAll("@TypeOf(undefined)"),
+ .null_type => return out_stream.writeAll("@Type(.Null)"),
+ .undefined_type => return out_stream.writeAll("@Type(.Undefined)"),
.fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
.fn_void_no_args_type => return out_stream.writeAll("fn() void"),
.fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
.const_slice_u8_type => return out_stream.writeAll("[]const u8"),
- .enum_literal_type => return out_stream.writeAll("@TypeOf(.EnumLiteral)"),
+ .enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"),
.anyframe_type => return out_stream.writeAll("anyframe"),
.null_value => return out_stream.writeAll("null"),
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 04d3393626..b6d7fab4c5 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -231,6 +231,10 @@ pub const Inst = struct {
const_slice_type,
/// Create a pointer type with attributes
ptr_type,
+ /// Slice operation `array_ptr[start..end:sentinel]`
+ slice,
+ /// Slice operation with just start `lhs[rhs..]`
+ slice_start,
/// Write a value to a pointer. For loading, see `deref`.
store,
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
@@ -343,6 +347,7 @@ pub const Inst = struct {
.xor,
.error_union_type,
.merge_error_sets,
+ .slice_start,
=> BinOp,
.block,
@@ -380,6 +385,7 @@ pub const Inst = struct {
.ptr_type => PtrType,
.enum_literal => EnumLiteral,
.error_set => ErrorSet,
+ .slice => Slice,
};
}
@@ -481,6 +487,8 @@ pub const Inst = struct {
.error_union_type,
.bitnot,
.error_set,
+ .slice,
+ .slice_start,
=> false,
.@"break",
@@ -961,6 +969,20 @@ pub const Inst = struct {
},
kw_args: struct {},
};
+
+ pub const Slice = struct {
+ pub const base_tag = Tag.slice;
+ base: Inst,
+
+ positionals: struct {
+ array_ptr: *Inst,
+ start: *Inst,
+ },
+ kw_args: struct {
+ end: ?*Inst = null,
+ sentinel: ?*Inst = null,
+ },
+ };
};
pub const ErrorMsg = struct {
@@ -2574,7 +2596,7 @@ const EmitZIR = struct {
var len_pl = Value.Payload.Int_u64{ .int = ty.arrayLen() };
const len = Value.initPayload(&len_pl.base);
- const inst = if (ty.arraySentinel()) |sentinel| blk: {
+ const inst = if (ty.sentinel()) |sentinel| blk: {
const inst = try self.arena.allocator.create(Inst.ArrayTypeSentinel);
inst.* = .{
.base = .{
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 676b662077..c99da39c04 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -132,6 +132,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.error_union_type => return analyzeInstErrorUnionType(mod, scope, old_inst.castTag(.error_union_type).?),
.anyframe_type => return analyzeInstAnyframeType(mod, scope, old_inst.castTag(.anyframe_type).?),
.error_set => return analyzeInstErrorSet(mod, scope, old_inst.castTag(.error_set).?),
+ .slice => return analyzeInstSlice(mod, scope, old_inst.castTag(.slice).?),
+ .slice_start => return analyzeInstSliceStart(mod, scope, old_inst.castTag(.slice_start).?),
}
}
@@ -1172,6 +1174,22 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne
return mod.fail(scope, inst.base.src, "TODO implement more analyze elemptr", .{});
}
+fn analyzeInstSlice(mod: *Module, scope: *Scope, inst: *zir.Inst.Slice) InnerError!*Inst {
+ const array_ptr = try resolveInst(mod, scope, inst.positionals.array_ptr);
+ const start = try resolveInst(mod, scope, inst.positionals.start);
+ const end = if (inst.kw_args.end) |end| try resolveInst(mod, scope, end) else null;
+ const sentinel = if (inst.kw_args.sentinel) |sentinel| try resolveInst(mod, scope, sentinel) else null;
+
+ return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, end, sentinel);
+}
+
+fn analyzeInstSliceStart(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst {
+ const array_ptr = try resolveInst(mod, scope, inst.positionals.lhs);
+ const start = try resolveInst(mod, scope, inst.positionals.rhs);
+
+ return mod.analyzeSlice(scope, inst.base.src, array_ptr, start, null, null);
+}
+
fn analyzeInstShl(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) InnerError!*Inst {
return mod.fail(scope, inst.base.src, "TODO implement analyzeInstShl", .{});
}
@@ -1239,6 +1257,12 @@ fn analyzeInstArithmetic(mod: *Module, scope: *Scope, inst: *zir.Inst.BinOp) Inn
if (casted_lhs.value()) |lhs_val| {
if (casted_rhs.value()) |rhs_val| {
+ if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ return mod.constInst(scope, inst.base.src, .{
+ .ty = resolved_type,
+ .val = Value.initTag(.undef),
+ });
+ }
return analyzeInstComptimeOp(mod, scope, scalar_type, inst, lhs_val, rhs_val);
}
}
diff --git a/src/ir.cpp b/src/ir.cpp
index cdca38379d..803b97891f 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -26594,7 +26594,7 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch
if (operand_type->id == ZigTypeIdFloat) {
ir_add_error(ira, &instruction->type_value->child->base,
- buf_sprintf("expected integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name)));
+ buf_sprintf("expected bool, integer, enum or pointer type, found '%s'", buf_ptr(&operand_type->name)));
return ira->codegen->invalid_inst_gen;
}
@@ -30249,7 +30249,7 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) {
return ira->codegen->builtin_types.entry_invalid;
if (operand_ptr_type == nullptr) {
ir_add_error(ira, &op->base,
- buf_sprintf("expected integer, float, enum or pointer type, found '%s'",
+ buf_sprintf("expected bool, integer, float, enum or pointer type, found '%s'",
buf_ptr(&operand_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index d6a0b34911..31f2b57dc8 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -899,7 +899,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ _ = @cmpxchgWeak(f32, &x, 1, 2, .SeqCst, .SeqCst);
\\}
, &[_][]const u8{
- "tmp.zig:3:22: error: expected integer, enum or pointer type, found 'f32'",
+ "tmp.zig:3:22: error: expected bool, integer, enum or pointer type, found 'f32'",
});
cases.add("atomicrmw with float op not .Xchg, .Add or .Sub",