aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/std/crypto/scrypt.zig20
-rw-r--r--lib/std/fs.zig6
-rw-r--r--lib/std/os/linux/io_uring.zig196
-rw-r--r--lib/std/os/test.zig6
-rw-r--r--src/AstGen.zig11
-rw-r--r--src/Module.zig2
-rw-r--r--src/Sema.zig73
-rw-r--r--src/TypedValue.zig11
-rw-r--r--src/Zir.zig5
-rw-r--r--src/type.zig20
-rw-r--r--src/value.zig54
-rw-r--r--test/behavior/basic.zig33
-rw-r--r--test/behavior/struct.zig22
-rw-r--r--test/cases/compile_errors/invalid_store_to_comptime_field.zig20
14 files changed, 339 insertions, 140 deletions
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index 31c8b754ff..e8cb6bab7b 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -73,11 +73,11 @@ fn salsaXor(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16)
}
fn blockMix(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) u32, r: u30) void {
- blockCopy(tmp, in[(2 * r - 1) * 16 ..], 1);
+ blockCopy(tmp, @alignCast(16, in[(2 * r - 1) * 16 ..]), 1);
var i: usize = 0;
while (i < 2 * r) : (i += 2) {
- salsaXor(tmp, in[i * 16 ..], out[i * 8 ..]);
- salsaXor(tmp, in[i * 16 + 16 ..], out[i * 8 + r * 16 ..]);
+ salsaXor(tmp, @alignCast(16, in[i * 16 ..]), @alignCast(16, out[i * 8 ..]));
+ salsaXor(tmp, @alignCast(16, in[i * 16 + 16 ..]), @alignCast(16, out[i * 8 + r * 16 ..]));
}
}
@@ -87,8 +87,8 @@ fn integerify(b: []align(16) const u32, r: u30) u64 {
}
fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) u32) void {
- var x = xy[0 .. 32 * r];
- var y = xy[32 * r ..];
+ var x = @alignCast(16, xy[0 .. 32 * r]);
+ var y = @alignCast(16, xy[32 * r ..]);
for (x) |*v1, j| {
v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]);
@@ -97,21 +97,21 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
var tmp: [16]u32 align(16) = undefined;
var i: usize = 0;
while (i < n) : (i += 2) {
- blockCopy(v[i * (32 * r) ..], x, 2 * r);
+ blockCopy(@alignCast(16, v[i * (32 * r) ..]), x, 2 * r);
blockMix(&tmp, x, y, r);
- blockCopy(v[(i + 1) * (32 * r) ..], y, 2 * r);
+ blockCopy(@alignCast(16, v[(i + 1) * (32 * r) ..]), y, 2 * r);
blockMix(&tmp, y, x, r);
}
i = 0;
while (i < n) : (i += 2) {
var j = @intCast(usize, integerify(x, r) & (n - 1));
- blockXor(x, v[j * (32 * r) ..], 2 * r);
+ blockXor(x, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
blockMix(&tmp, x, y, r);
j = @intCast(usize, integerify(y, r) & (n - 1));
- blockXor(y, v[j * (32 * r) ..], 2 * r);
+ blockXor(y, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
blockMix(&tmp, y, x, r);
}
@@ -201,7 +201,7 @@ pub fn kdf(
try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256);
var i: u32 = 0;
while (i < params.p) : (i += 1) {
- smix(dk[i * 128 * params.r ..], params.r, n, v, xy);
+ smix(@alignCast(16, dk[i * 128 * params.r ..]), params.r, n, v, xy);
}
try pwhash.pbkdf2(derived_key, password, dk, 1, HmacSha256);
}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 31354a2782..4a21af806e 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -887,10 +887,8 @@ pub const Dir = struct {
}
pub fn deinit(self: *Walker) void {
- while (self.stack.popOrNull()) |*item| {
- if (self.stack.items.len != 0) {
- item.iter.dir.close();
- }
+ for (self.stack.items) |*item| {
+ item.iter.dir.close();
}
self.stack.deinit();
self.name_buffer.deinit();
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 31c416c8a1..3672a94c69 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -7,10 +7,6 @@ const os = std.os;
const linux = os.linux;
const testing = std.testing;
-const io_uring_params = linux.io_uring_params;
-const io_uring_sqe = linux.io_uring_sqe;
-const io_uring_cqe = linux.io_uring_cqe;
-
pub const IO_Uring = struct {
fd: os.fd_t = -1,
sq: SubmissionQueue,
@@ -18,24 +14,24 @@ pub const IO_Uring = struct {
flags: u32,
features: u32,
- /// A friendly way to setup an io_uring, with default io_uring_params.
+ /// A friendly way to setup an io_uring, with default linux.io_uring_params.
/// `entries` must be a power of two between 1 and 4096, although the kernel will make the final
/// call on how many entries the submission and completion queues will ultimately have,
/// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L8027-L8050.
/// Matches the interface of io_uring_queue_init() in liburing.
pub fn init(entries: u13, flags: u32) !IO_Uring {
- var params = mem.zeroInit(io_uring_params, .{
+ var params = mem.zeroInit(linux.io_uring_params, .{
.flags = flags,
.sq_thread_idle = 1000,
});
return try IO_Uring.init_params(entries, &params);
}
- /// A powerful way to setup an io_uring, if you want to tweak io_uring_params such as submission
+ /// A powerful way to setup an io_uring, if you want to tweak linux.io_uring_params such as submission
/// queue thread cpu affinity or thread idle timeout (the kernel and our default is 1 second).
/// `params` is passed by reference because the kernel needs to modify the parameters.
/// Matches the interface of io_uring_queue_init_params() in liburing.
- pub fn init_params(entries: u13, p: *io_uring_params) !IO_Uring {
+ pub fn init_params(entries: u13, p: *linux.io_uring_params) !IO_Uring {
if (entries == 0) return error.EntriesZero;
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
@@ -53,7 +49,7 @@ pub const IO_Uring = struct {
.FAULT => return error.ParamsOutsideAccessibleAddressSpace,
// The resv array contains non-zero data, p.flags contains an unsupported flag,
// entries out of bounds, IORING_SETUP_SQ_AFF was specified without IORING_SETUP_SQPOLL,
- // or IORING_SETUP_CQSIZE was specified but io_uring_params.cq_entries was invalid:
+ // or IORING_SETUP_CQSIZE was specified but linux.io_uring_params.cq_entries was invalid:
.INVAL => return error.ArgumentsInvalid,
.MFILE => return error.ProcessFdQuotaExceeded,
.NFILE => return error.SystemFdQuotaExceeded,
@@ -135,7 +131,7 @@ pub const IO_Uring = struct {
/// and the null return in liburing is more a C idiom than anything else, for lack of a better
/// alternative. In Zig, we have first-class error handling... so let's use it.
/// Matches the implementation of io_uring_get_sqe() in liburing.
- pub fn get_sqe(self: *IO_Uring) !*io_uring_sqe {
+ pub fn get_sqe(self: *IO_Uring) !*linux.io_uring_sqe {
const head = @atomicLoad(u32, self.sq.head, .Acquire);
// Remember that these head and tail offsets wrap around every four billion operations.
// We must therefore use wrapping addition and subtraction to avoid a runtime crash.
@@ -268,7 +264,7 @@ pub const IO_Uring = struct {
/// Faster, because we can now amortize the atomic store release to `cq.head` across the batch.
/// See https://github.com/axboe/liburing/issues/103#issuecomment-686665007.
/// Matches the implementation of io_uring_peek_batch_cqe() in liburing, but supports waiting.
- pub fn copy_cqes(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) !u32 {
+ pub fn copy_cqes(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) !u32 {
const count = self.copy_cqes_ready(cqes, wait_nr);
if (count > 0) return count;
if (self.cq_ring_needs_flush() or wait_nr > 0) {
@@ -278,7 +274,7 @@ pub const IO_Uring = struct {
return 0;
}
- fn copy_cqes_ready(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) u32 {
+ fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 {
_ = wait_nr;
const ready = self.cq_ready();
const count = std.math.min(cqes.len, ready);
@@ -298,8 +294,8 @@ pub const IO_Uring = struct {
/// Returns a copy of an I/O completion, waiting for it if necessary, and advancing the CQ ring.
/// A convenience method for `copy_cqes()` for when you don't need to batch or peek.
- pub fn copy_cqe(ring: *IO_Uring) !io_uring_cqe {
- var cqes: [1]io_uring_cqe = undefined;
+ pub fn copy_cqe(ring: *IO_Uring) !linux.io_uring_cqe {
+ var cqes: [1]linux.io_uring_cqe = undefined;
while (true) {
const count = try ring.copy_cqes(&cqes, 1);
if (count > 0) return cqes[0];
@@ -316,7 +312,7 @@ pub const IO_Uring = struct {
/// Must be called exactly once after a zero-copy CQE has been processed by your application.
/// Not idempotent, calling more than once will result in other CQEs being lost.
/// Matches the implementation of cqe_seen() in liburing.
- pub fn cqe_seen(self: *IO_Uring, cqe: *io_uring_cqe) void {
+ pub fn cqe_seen(self: *IO_Uring, cqe: *linux.io_uring_cqe) void {
_ = cqe;
self.cq_advance(1);
}
@@ -339,7 +335,7 @@ pub const IO_Uring = struct {
/// apply to the write, since the fsync may complete before the write is issued to the disk.
/// You should preferably use `link_with_next_sqe()` on a write's SQE to link it with an fsync,
/// or else insert a full write barrier using `drain_previous_sqes()` when queueing an fsync.
- pub fn fsync(self: *IO_Uring, user_data: u64, fd: os.fd_t, flags: u32) !*io_uring_sqe {
+ pub fn fsync(self: *IO_Uring, user_data: u64, fd: os.fd_t, flags: u32) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_fsync(sqe, fd, flags);
sqe.user_data = user_data;
@@ -351,7 +347,7 @@ pub const IO_Uring = struct {
/// A no-op is more useful than may appear at first glance.
/// For example, you could call `drain_previous_sqes()` on the returned SQE, to use the no-op to
/// know when the ring is idle before acting on a kill signal.
- pub fn nop(self: *IO_Uring, user_data: u64) !*io_uring_sqe {
+ pub fn nop(self: *IO_Uring, user_data: u64) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_nop(sqe);
sqe.user_data = user_data;
@@ -387,7 +383,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
buffer: ReadBuffer,
offset: u64,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
switch (buffer) {
.buffer => |slice| io_uring_prep_read(sqe, fd, slice, offset),
@@ -410,7 +406,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
buffer: []const u8,
offset: u64,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_write(sqe, fd, buffer, offset);
sqe.user_data = user_data;
@@ -429,7 +425,7 @@ pub const IO_Uring = struct {
buffer: *os.iovec,
offset: u64,
buffer_index: u16,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_read_fixed(sqe, fd, buffer, offset, buffer_index);
sqe.user_data = user_data;
@@ -446,7 +442,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
iovecs: []const os.iovec_const,
offset: u64,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_writev(sqe, fd, iovecs, offset);
sqe.user_data = user_data;
@@ -465,7 +461,7 @@ pub const IO_Uring = struct {
buffer: *os.iovec,
offset: u64,
buffer_index: u16,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_write_fixed(sqe, fd, buffer, offset, buffer_index);
sqe.user_data = user_data;
@@ -481,7 +477,7 @@ pub const IO_Uring = struct {
addr: *os.sockaddr,
addrlen: *os.socklen_t,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
sqe.user_data = user_data;
@@ -496,7 +492,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
addr: *const os.sockaddr,
addrlen: os.socklen_t,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_connect(sqe, fd, addr, addrlen);
sqe.user_data = user_data;
@@ -512,7 +508,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
op: u32,
ev: ?*linux.epoll_event,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_epoll_ctl(sqe, epfd, fd, op, ev);
sqe.user_data = user_data;
@@ -541,7 +537,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
buffer: RecvBuffer,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
switch (buffer) {
.buffer => |slice| io_uring_prep_recv(sqe, fd, slice, flags),
@@ -564,7 +560,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
buffer: []const u8,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_send(sqe, fd, buffer, flags);
sqe.user_data = user_data;
@@ -579,7 +575,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
msg: *os.msghdr,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_recvmsg(sqe, fd, msg, flags);
sqe.user_data = user_data;
@@ -594,7 +590,7 @@ pub const IO_Uring = struct {
fd: os.fd_t,
msg: *const os.msghdr_const,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_sendmsg(sqe, fd, msg, flags);
sqe.user_data = user_data;
@@ -610,7 +606,7 @@ pub const IO_Uring = struct {
path: [*:0]const u8,
flags: u32,
mode: os.mode_t,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_openat(sqe, fd, path, flags, mode);
sqe.user_data = user_data;
@@ -619,7 +615,7 @@ pub const IO_Uring = struct {
/// Queues (but does not submit) an SQE to perform a `close(2)`.
/// Returns a pointer to the SQE.
- pub fn close(self: *IO_Uring, user_data: u64, fd: os.fd_t) !*io_uring_sqe {
+ pub fn close(self: *IO_Uring, user_data: u64, fd: os.fd_t) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_close(sqe, fd);
sqe.user_data = user_data;
@@ -645,7 +641,7 @@ pub const IO_Uring = struct {
ts: *const os.linux.kernel_timespec,
count: u32,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_timeout(sqe, ts, count, flags);
sqe.user_data = user_data;
@@ -665,7 +661,7 @@ pub const IO_Uring = struct {
user_data: u64,
timeout_user_data: u64,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_timeout_remove(sqe, timeout_user_data, flags);
sqe.user_data = user_data;
@@ -693,7 +689,7 @@ pub const IO_Uring = struct {
user_data: u64,
ts: *const os.linux.kernel_timespec,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_link_timeout(sqe, ts, flags);
sqe.user_data = user_data;
@@ -707,7 +703,7 @@ pub const IO_Uring = struct {
user_data: u64,
fd: os.fd_t,
poll_mask: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_poll_add(sqe, fd, poll_mask);
sqe.user_data = user_data;
@@ -720,7 +716,7 @@ pub const IO_Uring = struct {
self: *IO_Uring,
user_data: u64,
target_user_data: u64,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_poll_remove(sqe, target_user_data);
sqe.user_data = user_data;
@@ -736,7 +732,7 @@ pub const IO_Uring = struct {
new_user_data: u64,
poll_mask: u32,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_poll_update(sqe, old_user_data, new_user_data, poll_mask, flags);
sqe.user_data = user_data;
@@ -752,7 +748,7 @@ pub const IO_Uring = struct {
mode: i32,
offset: u64,
len: u64,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_fallocate(sqe, fd, mode, offset, len);
sqe.user_data = user_data;
@@ -769,7 +765,7 @@ pub const IO_Uring = struct {
flags: u32,
mask: u32,
buf: *linux.Statx,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_statx(sqe, fd, path, flags, mask, buf);
sqe.user_data = user_data;
@@ -789,7 +785,7 @@ pub const IO_Uring = struct {
user_data: u64,
cancel_user_data: u64,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_cancel(sqe, cancel_user_data, flags);
sqe.user_data = user_data;
@@ -805,7 +801,7 @@ pub const IO_Uring = struct {
user_data: u64,
sockfd: os.socket_t,
how: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_shutdown(sqe, sockfd, how);
sqe.user_data = user_data;
@@ -822,7 +818,7 @@ pub const IO_Uring = struct {
new_dir_fd: os.fd_t,
new_path: [*:0]const u8,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_renameat(sqe, old_dir_fd, old_path, new_dir_fd, new_path, flags);
sqe.user_data = user_data;
@@ -837,7 +833,7 @@ pub const IO_Uring = struct {
dir_fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_unlinkat(sqe, dir_fd, path, flags);
sqe.user_data = user_data;
@@ -852,7 +848,7 @@ pub const IO_Uring = struct {
dir_fd: os.fd_t,
path: [*:0]const u8,
mode: os.mode_t,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_mkdirat(sqe, dir_fd, path, mode);
sqe.user_data = user_data;
@@ -867,7 +863,7 @@ pub const IO_Uring = struct {
target: [*:0]const u8,
new_dir_fd: os.fd_t,
link_path: [*:0]const u8,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_symlinkat(sqe, target, new_dir_fd, link_path);
sqe.user_data = user_data;
@@ -884,7 +880,7 @@ pub const IO_Uring = struct {
new_dir_fd: os.fd_t,
new_path: [*:0]const u8,
flags: u32,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_linkat(sqe, old_dir_fd, old_path, new_dir_fd, new_path, flags);
sqe.user_data = user_data;
@@ -905,7 +901,7 @@ pub const IO_Uring = struct {
buffer_size: usize,
group_id: usize,
buffer_id: usize,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_provide_buffers(sqe, buffers, buffers_count, buffer_size, group_id, buffer_id);
sqe.user_data = user_data;
@@ -919,7 +915,7 @@ pub const IO_Uring = struct {
user_data: u64,
buffers_count: usize,
group_id: usize,
- ) !*io_uring_sqe {
+ ) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_remove_buffers(sqe, buffers_count, group_id);
sqe.user_data = user_data;
@@ -1083,7 +1079,7 @@ pub const SubmissionQueue = struct {
flags: *u32,
dropped: *u32,
array: []u32,
- sqes: []io_uring_sqe,
+ sqes: []linux.io_uring_sqe,
mmap: []align(mem.page_size) u8,
mmap_sqes: []align(mem.page_size) u8,
@@ -1094,12 +1090,12 @@ pub const SubmissionQueue = struct {
sqe_head: u32 = 0,
sqe_tail: u32 = 0,
- pub fn init(fd: os.fd_t, p: io_uring_params) !SubmissionQueue {
+ pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const size = std.math.max(
p.sq_off.array + p.sq_entries * @sizeOf(u32),
- p.cq_off.cqes + p.cq_entries * @sizeOf(io_uring_cqe),
+ p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe),
);
const mmap = try os.mmap(
null,
@@ -1113,8 +1109,8 @@ pub const SubmissionQueue = struct {
assert(mmap.len == size);
// The motivation for the `sqes` and `array` indirection is to make it possible for the
- // application to preallocate static io_uring_sqe entries and then replay them when needed.
- const size_sqes = p.sq_entries * @sizeOf(io_uring_sqe);
+ // application to preallocate static linux.io_uring_sqe entries and then replay them when needed.
+ const size_sqes = p.sq_entries * @sizeOf(linux.io_uring_sqe);
const mmap_sqes = try os.mmap(
null,
size_sqes,
@@ -1127,7 +1123,7 @@ pub const SubmissionQueue = struct {
assert(mmap_sqes.len == size_sqes);
const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array]));
- const sqes = @ptrCast([*]io_uring_sqe, @alignCast(@alignOf(io_uring_sqe), &mmap_sqes[0]));
+ const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0]));
// We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
assert(
@@ -1158,15 +1154,15 @@ pub const CompletionQueue = struct {
tail: *u32,
mask: u32,
overflow: *u32,
- cqes: []io_uring_cqe,
+ cqes: []linux.io_uring_cqe,
- pub fn init(fd: os.fd_t, p: io_uring_params, sq: SubmissionQueue) !CompletionQueue {
+ pub fn init(fd: os.fd_t, p: linux.io_uring_params, sq: SubmissionQueue) !CompletionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const mmap = sq.mmap;
const cqes = @ptrCast(
- [*]io_uring_cqe,
- @alignCast(@alignOf(io_uring_cqe), &mmap[p.cq_off.cqes]),
+ [*]linux.io_uring_cqe,
+ @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]),
);
assert(p.cq_entries ==
@ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*);
@@ -1186,7 +1182,7 @@ pub const CompletionQueue = struct {
}
};
-pub fn io_uring_prep_nop(sqe: *io_uring_sqe) void {
+pub fn io_uring_prep_nop(sqe: *linux.io_uring_sqe) void {
sqe.* = .{
.opcode = .NOP,
.flags = 0,
@@ -1204,7 +1200,7 @@ pub fn io_uring_prep_nop(sqe: *io_uring_sqe) void {
};
}
-pub fn io_uring_prep_fsync(sqe: *io_uring_sqe, fd: os.fd_t, flags: u32) void {
+pub fn io_uring_prep_fsync(sqe: *linux.io_uring_sqe, fd: os.fd_t, flags: u32) void {
sqe.* = .{
.opcode = .FSYNC,
.flags = 0,
@@ -1224,7 +1220,7 @@ pub fn io_uring_prep_fsync(sqe: *io_uring_sqe, fd: os.fd_t, flags: u32) void {
pub fn io_uring_prep_rw(
op: linux.IORING_OP,
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
addr: u64,
len: usize,
@@ -1247,16 +1243,16 @@ pub fn io_uring_prep_rw(
};
}
-pub fn io_uring_prep_read(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []u8, offset: u64) void {
+pub fn io_uring_prep_read(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, offset: u64) void {
io_uring_prep_rw(.READ, sqe, fd, @ptrToInt(buffer.ptr), buffer.len, offset);
}
-pub fn io_uring_prep_write(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []const u8, offset: u64) void {
+pub fn io_uring_prep_write(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []const u8, offset: u64) void {
io_uring_prep_rw(.WRITE, sqe, fd, @ptrToInt(buffer.ptr), buffer.len, offset);
}
pub fn io_uring_prep_readv(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
iovecs: []const os.iovec,
offset: u64,
@@ -1265,7 +1261,7 @@ pub fn io_uring_prep_readv(
}
pub fn io_uring_prep_writev(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
iovecs: []const os.iovec_const,
offset: u64,
@@ -1273,12 +1269,12 @@ pub fn io_uring_prep_writev(
io_uring_prep_rw(.WRITEV, sqe, fd, @ptrToInt(iovecs.ptr), iovecs.len, offset);
}
-pub fn io_uring_prep_read_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
+pub fn io_uring_prep_read_fixed(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
io_uring_prep_rw(.READ_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
sqe.buf_index = buffer_index;
}
-pub fn io_uring_prep_write_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
+pub fn io_uring_prep_write_fixed(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
io_uring_prep_rw(.WRITE_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
sqe.buf_index = buffer_index;
}
@@ -1294,7 +1290,7 @@ pub inline fn __io_uring_prep_poll_mask(poll_mask: u32) u32 {
}
pub fn io_uring_prep_accept(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
addr: *os.sockaddr,
addrlen: *os.socklen_t,
@@ -1307,7 +1303,7 @@ pub fn io_uring_prep_accept(
}
pub fn io_uring_prep_connect(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
addr: *const os.sockaddr,
addrlen: os.socklen_t,
@@ -1317,7 +1313,7 @@ pub fn io_uring_prep_connect(
}
pub fn io_uring_prep_epoll_ctl(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
epfd: os.fd_t,
fd: os.fd_t,
op: u32,
@@ -1326,18 +1322,18 @@ pub fn io_uring_prep_epoll_ctl(
io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @ptrToInt(ev), op, @intCast(u64, fd));
}
-pub fn io_uring_prep_recv(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
+pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
io_uring_prep_rw(.RECV, sqe, fd, @ptrToInt(buffer.ptr), buffer.len, 0);
sqe.rw_flags = flags;
}
-pub fn io_uring_prep_send(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []const u8, flags: u32) void {
+pub fn io_uring_prep_send(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []const u8, flags: u32) void {
io_uring_prep_rw(.SEND, sqe, fd, @ptrToInt(buffer.ptr), buffer.len, 0);
sqe.rw_flags = flags;
}
pub fn io_uring_prep_recvmsg(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
msg: *os.msghdr,
flags: u32,
@@ -1347,7 +1343,7 @@ pub fn io_uring_prep_recvmsg(
}
pub fn io_uring_prep_sendmsg(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
msg: *const os.msghdr_const,
flags: u32,
@@ -1357,7 +1353,7 @@ pub fn io_uring_prep_sendmsg(
}
pub fn io_uring_prep_openat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
@@ -1367,7 +1363,7 @@ pub fn io_uring_prep_openat(
sqe.rw_flags = flags;
}
-pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
+pub fn io_uring_prep_close(sqe: *linux.io_uring_sqe, fd: os.fd_t) void {
sqe.* = .{
.opcode = .CLOSE,
.flags = 0,
@@ -1386,7 +1382,7 @@ pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
}
pub fn io_uring_prep_timeout(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
ts: *const os.linux.kernel_timespec,
count: u32,
flags: u32,
@@ -1395,7 +1391,7 @@ pub fn io_uring_prep_timeout(
sqe.rw_flags = flags;
}
-pub fn io_uring_prep_timeout_remove(sqe: *io_uring_sqe, timeout_user_data: u64, flags: u32) void {
+pub fn io_uring_prep_timeout_remove(sqe: *linux.io_uring_sqe, timeout_user_data: u64, flags: u32) void {
sqe.* = .{
.opcode = .TIMEOUT_REMOVE,
.flags = 0,
@@ -1414,7 +1410,7 @@ pub fn io_uring_prep_timeout_remove(sqe: *io_uring_sqe, timeout_user_data: u64,
}
pub fn io_uring_prep_link_timeout(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
ts: *const os.linux.kernel_timespec,
flags: u32,
) void {
@@ -1423,7 +1419,7 @@ pub fn io_uring_prep_link_timeout(
}
pub fn io_uring_prep_poll_add(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
poll_mask: u32,
) void {
@@ -1432,14 +1428,14 @@ pub fn io_uring_prep_poll_add(
}
pub fn io_uring_prep_poll_remove(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
target_user_data: u64,
) void {
io_uring_prep_rw(.POLL_REMOVE, sqe, -1, target_user_data, 0, 0);
}
pub fn io_uring_prep_poll_update(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
old_user_data: u64,
new_user_data: u64,
poll_mask: u32,
@@ -1450,7 +1446,7 @@ pub fn io_uring_prep_poll_update(
}
pub fn io_uring_prep_fallocate(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
mode: i32,
offset: u64,
@@ -1474,7 +1470,7 @@ pub fn io_uring_prep_fallocate(
}
pub fn io_uring_prep_statx(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
@@ -1486,7 +1482,7 @@ pub fn io_uring_prep_statx(
}
pub fn io_uring_prep_cancel(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
cancel_user_data: u64,
flags: u32,
) void {
@@ -1495,7 +1491,7 @@ pub fn io_uring_prep_cancel(
}
pub fn io_uring_prep_shutdown(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
sockfd: os.socket_t,
how: u32,
) void {
@@ -1503,7 +1499,7 @@ pub fn io_uring_prep_shutdown(
}
pub fn io_uring_prep_renameat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
old_dir_fd: os.fd_t,
old_path: [*:0]const u8,
new_dir_fd: os.fd_t,
@@ -1523,7 +1519,7 @@ pub fn io_uring_prep_renameat(
}
pub fn io_uring_prep_unlinkat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
dir_fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
@@ -1533,7 +1529,7 @@ pub fn io_uring_prep_unlinkat(
}
pub fn io_uring_prep_mkdirat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
dir_fd: os.fd_t,
path: [*:0]const u8,
mode: os.mode_t,
@@ -1542,7 +1538,7 @@ pub fn io_uring_prep_mkdirat(
}
pub fn io_uring_prep_symlinkat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
target: [*:0]const u8,
new_dir_fd: os.fd_t,
link_path: [*:0]const u8,
@@ -1558,7 +1554,7 @@ pub fn io_uring_prep_symlinkat(
}
pub fn io_uring_prep_linkat(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
old_dir_fd: os.fd_t,
old_path: [*:0]const u8,
new_dir_fd: os.fd_t,
@@ -1578,7 +1574,7 @@ pub fn io_uring_prep_linkat(
}
pub fn io_uring_prep_provide_buffers(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
buffers: [*]u8,
num: usize,
buffer_len: usize,
@@ -1591,7 +1587,7 @@ pub fn io_uring_prep_provide_buffers(
}
pub fn io_uring_prep_remove_buffers(
- sqe: *io_uring_sqe,
+ sqe: *linux.io_uring_sqe,
num: usize,
group_id: usize,
) void {
@@ -1602,9 +1598,9 @@ pub fn io_uring_prep_remove_buffers(
test "structs/offsets/entries" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- try testing.expectEqual(@as(usize, 120), @sizeOf(io_uring_params));
- try testing.expectEqual(@as(usize, 64), @sizeOf(io_uring_sqe));
- try testing.expectEqual(@as(usize, 16), @sizeOf(io_uring_cqe));
+ try testing.expectEqual(@as(usize, 120), @sizeOf(linux.io_uring_params));
+ try testing.expectEqual(@as(usize, 64), @sizeOf(linux.io_uring_sqe));
+ try testing.expectEqual(@as(usize, 16), @sizeOf(linux.io_uring_cqe));
try testing.expectEqual(0, linux.IORING_OFF_SQ_RING);
try testing.expectEqual(0x8000000, linux.IORING_OFF_CQ_RING);
@@ -1628,7 +1624,7 @@ test "nop" {
}
const sqe = try ring.nop(0xaaaaaaaa);
- try testing.expectEqual(io_uring_sqe{
+ try testing.expectEqual(linux.io_uring_sqe{
.opcode = .NOP,
.flags = 0,
.ioprio = 0,
@@ -1658,7 +1654,7 @@ test "nop" {
try testing.expectEqual(@as(u32, 0), ring.cq.head.*);
try testing.expectEqual(@as(u32, 0), ring.sq_ready());
- try testing.expectEqual(io_uring_cqe{
+ try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0xaaaaaaaa,
.res = 0,
.flags = 0,
@@ -1669,7 +1665,7 @@ test "nop" {
const sqe_barrier = try ring.nop(0xbbbbbbbb);
sqe_barrier.flags |= linux.IOSQE_IO_DRAIN;
try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(io_uring_cqe{
+ try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0xbbbbbbbb,
.res = 0,
.flags = 0,
@@ -1909,7 +1905,7 @@ test "openat" {
const flags: u32 = os.O.CLOEXEC | os.O.RDWR | os.O.CREAT;
const mode: os.mode_t = 0o666;
const sqe_openat = try ring.openat(0x33333333, linux.AT.FDCWD, path, flags, mode);
- try testing.expectEqual(io_uring_sqe{
+ try testing.expectEqual(linux.io_uring_sqe{
.opcode = .OPENAT,
.flags = 0,
.ioprio = 0,
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 975d5a64eb..44f8b16b9e 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -766,8 +766,10 @@ test "sigaction" {
}
};
+ const actual_handler = if (builtin.zig_backend == .stage1) S.handler else &S.handler;
+
var sa = os.Sigaction{
- .handler = .{ .sigaction = S.handler },
+ .handler = .{ .sigaction = actual_handler },
.mask = os.empty_sigset,
.flags = os.SA.SIGINFO | os.SA.RESETHAND,
};
@@ -776,7 +778,7 @@ test "sigaction" {
try os.sigaction(os.SIG.USR1, &sa, null);
// Check that we can read it back correctly.
try os.sigaction(os.SIG.USR1, null, &old_sa);
- try testing.expectEqual(S.handler, old_sa.handler.sigaction.?);
+ try testing.expectEqual(actual_handler, old_sa.handler.sigaction.?);
try testing.expect((old_sa.flags & os.SA.SIGINFO) != 0);
// Invoke the handler.
try os.raise(os.SIG.USR1);
diff --git a/src/AstGen.zig b/src/AstGen.zig
index ab5befa4ba..3566610bb6 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -291,8 +291,8 @@ pub const ResultLoc = union(enum) {
}
};
-pub const align_rl: ResultLoc = .{ .ty = .u16_type };
-pub const coerced_align_rl: ResultLoc = .{ .coerced_ty = .u16_type };
+pub const align_rl: ResultLoc = .{ .ty = .u29_type };
+pub const coerced_align_rl: ResultLoc = .{ .coerced_ty = .u29_type };
pub const bool_rl: ResultLoc = .{ .ty = .bool_type };
pub const type_rl: ResultLoc = .{ .ty = .type_type };
pub const coerced_type_rl: ResultLoc = .{ .coerced_ty = .type_type };
@@ -8077,6 +8077,7 @@ const primitives = std.ComptimeStringMap(Zir.Inst.Ref, .{
.{ "true", .bool_true },
.{ "type", .type_type },
.{ "u16", .u16_type },
+ .{ "u29", .u29_type },
.{ "u32", .u32_type },
.{ "u64", .u64_type },
.{ "u128", .u128_type },
@@ -8749,6 +8750,7 @@ fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.In
.isize_type,
.type_type,
.u16_type,
+ .u29_type,
.u32_type,
.u64_type,
.u128_type,
@@ -8988,6 +8990,7 @@ fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
.i8_type,
.isize_type,
.u16_type,
+ .u29_type,
.u32_type,
.u64_type,
.u128_type,
@@ -9063,6 +9066,7 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.i8_type),
as_ty | @enumToInt(Zir.Inst.Ref.u16_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.u29_type),
as_ty | @enumToInt(Zir.Inst.Ref.i16_type),
as_ty | @enumToInt(Zir.Inst.Ref.u32_type),
as_ty | @enumToInt(Zir.Inst.Ref.i32_type),
@@ -9875,6 +9879,9 @@ const GenZir = struct {
errdefer as_scope.unstack();
as_scope.rl_ptr = try as_scope.addBin(.coerce_result_ptr, dest_type, result_ptr);
+ // `rl_ty_inst` needs to be set in case the stores to `rl_ptr` are eliminated.
+ as_scope.rl_ty_inst = dest_type;
+
return as_scope;
}
diff --git a/src/Module.zig b/src/Module.zig
index f8d662ae1f..12d311046a 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -4016,7 +4016,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
try wip_captures.finalize();
const src: LazySrcLoc = .{ .node_offset = 0 };
const decl_tv = try sema.resolveInstValue(&block_scope, src, result_ref);
- const decl_align: u16 = blk: {
+ const decl_align: u32 = blk: {
const align_ref = decl.zirAlignRef();
if (align_ref == .none) break :blk 0;
break :blk try sema.resolveAlign(&block_scope, src, align_ref);
diff --git a/src/Sema.zig b/src/Sema.zig
index 593b299833..381f922093 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1739,9 +1739,9 @@ pub fn resolveAlign(
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
-) !u16 {
- const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u16));
- const alignment = @intCast(u16, alignment_big); // We coerce to u16 in the prev line.
+) !u32 {
+ const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u29));
+ const alignment = @intCast(u32, alignment_big); // We coerce to u16 in the prev line.
if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{});
if (!std.math.isPowerOfTwo(alignment)) {
return sema.fail(block, src, "alignment value {d} is not a power of two", .{
@@ -1875,7 +1875,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const dummy_ptr = try trash_block.addTy(.alloc, sema.typeOf(ptr));
const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value);
- try sema.storePtr(&trash_block, src, dummy_ptr, dummy_operand);
+ try sema.storePtr2(&trash_block, src, dummy_ptr, src, dummy_operand, src, .bitcast);
{
const air_tags = sema.air_instructions.items(.tag);
@@ -2526,7 +2526,7 @@ fn zirRetPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const src: LazySrcLoc = .{ .node_offset = inst_data };
try sema.requireFunctionBlock(block, src);
- if (block.is_comptime) {
+ if (block.is_comptime or try sema.typeRequiresComptime(block, src, sema.fn_ret_ty)) {
const fn_ret_ty = try sema.resolveTypeFields(block, src, sema.fn_ret_ty);
return sema.analyzeComptimeAlloc(block, fn_ret_ty, 0, src);
}
@@ -2663,7 +2663,7 @@ fn zirAllocExtended(
break :blk try sema.resolveType(block, ty_src, type_ref);
} else undefined;
- const alignment: u16 = if (small.has_align) blk: {
+ const alignment: u32 = if (small.has_align) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const alignment = try sema.resolveAlign(block, align_src, align_ref);
@@ -3616,7 +3616,7 @@ fn zirValidateArrayInit(
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
- for (instrs) |elem_ptr, i| {
+ outer: for (instrs) |elem_ptr, i| {
const elem_ptr_data = sema.code.instructions.items(.data)[elem_ptr].pl_node;
const elem_src: LazySrcLoc = .{ .node_offset = elem_ptr_data.src_node };
@@ -3630,6 +3630,10 @@ fn zirValidateArrayInit(
// of the for loop.
var block_index = block.instructions.items.len - 1;
while (block.instructions.items[block_index] != elem_ptr_air_inst) {
+ if (block_index == 0) {
+ array_is_comptime = true;
+ continue :outer;
+ }
block_index -= 1;
}
first_block_index = @minimum(first_block_index, block_index);
@@ -3672,6 +3676,13 @@ fn zirValidateArrayInit(
}
if (array_is_comptime) {
+ if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
+ if (ptr_val.tag() == .comptime_field_ptr) {
+ // This store was validated by the individual elem ptrs.
+ return;
+ }
+ }
+
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
// instead a single `store` to the array_ptr with a comptime struct value.
// Also to populate the sentinel value, if any.
@@ -14199,7 +14210,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.size = ptr_size,
.mutable = !is_const_val.toBool(),
.@"volatile" = is_volatile_val.toBool(),
- .@"align" = @intCast(u16, alignment_val.toUnsignedInt(target)), // TODO: Validate this value.
+ .@"align" = @intCast(u29, alignment_val.toUnsignedInt(target)), // TODO: Validate this value.
.@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace),
.pointee_type = try child_ty.copy(sema.arena),
.@"allowzero" = is_allowzero_val.toBool(),
@@ -16973,7 +16984,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
- const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u16);
+ const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29);
if (val.tag() == .generic_poison) {
break :blk null;
}
@@ -18462,14 +18473,11 @@ fn structFieldPtrByIndex(
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
if (field.is_comptime) {
- var anon_decl = try block.startAnonDecl(field_src);
- defer anon_decl.deinit();
- const decl = try anon_decl.finish(
- try field.ty.copy(anon_decl.arena()),
- try field.default_val.copy(anon_decl.arena()),
- ptr_ty_data.@"align",
- );
- return sema.analyzeDeclRef(decl);
+ const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
+ .field_ty = try field.ty.copy(sema.arena),
+ .field_val = try field.default_val.copy(sema.arena),
+ });
+ return sema.addConstant(ptr_field_ty, val);
}
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
@@ -20187,6 +20195,13 @@ fn storePtr2(
// TODO handle if the element type requires comptime
+ if (air_tag == .bitcast) {
+ // `air_tag == .bitcast` is used as a special case for `zirCoerceResultPtr`
+ // to avoid calling `requireRuntimeBlock` for the dummy block.
+ _ = try block.addBinOp(.store, ptr, operand);
+ return;
+ }
+
try sema.requireRuntimeBlock(block, runtime_src);
try sema.queueFullTypeResolution(elem_ty);
_ = try block.addBinOp(air_tag, ptr, operand);
@@ -20240,6 +20255,14 @@ fn storePtrVal(
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, mut_kit.ty, 0);
+ if (mut_kit.decl_ref_mut.runtime_index == std.math.maxInt(u32)) {
+ // Special case for comptime field ptr.
+ if (!mut_kit.val.eql(bitcasted_val, mut_kit.ty, sema.mod)) {
+ return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
+ }
+ return;
+ }
+
const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod);
@@ -20289,6 +20312,19 @@ fn beginComptimePtrMutation(
.ty = decl.ty,
};
},
+ .comptime_field_ptr => {
+ const payload = ptr_val.castTag(.comptime_field_ptr).?.data;
+ const duped = try sema.arena.create(Value);
+ duped.* = payload.field_val;
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = .{
+ .decl_index = @intToEnum(Module.Decl.Index, 0),
+ .runtime_index = std.math.maxInt(u32),
+ },
+ .val = duped,
+ .ty = payload.field_ty,
+ };
+ },
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
var parent = try beginComptimePtrMutation(sema, block, src, elem_ptr.array_ptr);
@@ -23850,6 +23886,7 @@ pub fn typeHasOnePossibleValue(
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -24143,6 +24180,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.u8 => return .u8_type,
.i8 => return .i8_type,
.u16 => return .u16_type,
+ .u29 => return .u29_type,
.i16 => return .i16_type,
.u32 => return .u32_type,
.i32 => return .i32_type,
@@ -24513,6 +24551,7 @@ pub fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Typ
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 9f69e4c8bd..4b3bc23231 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -79,6 +79,7 @@ pub fn print(
.i8_type => return writer.writeAll("i8"),
.u16_type => return writer.writeAll("u16"),
.i16_type => return writer.writeAll("i16"),
+ .u29_type => return writer.writeAll("u29"),
.u32_type => return writer.writeAll("u32"),
.i32_type => return writer.writeAll("i32"),
.u64_type => return writer.writeAll("u64"),
@@ -264,6 +265,16 @@ pub fn print(
.val = decl.val,
}, writer, level - 1, mod);
},
+ .comptime_field_ptr => {
+ const payload = val.castTag(.comptime_field_ptr).?.data;
+ if (level == 0) {
+ return writer.writeAll("(comptime field ptr)");
+ }
+ return print(.{
+ .ty = payload.field_ty,
+ .val = payload.field_val,
+ }, writer, level - 1, mod);
+ },
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
try writer.writeAll("&");
diff --git a/src/Zir.zig b/src/Zir.zig
index f09f2015e0..9f1737d19d 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -1961,6 +1961,7 @@ pub const Inst = struct {
i8_type,
u16_type,
i16_type,
+ u29_type,
u32_type,
i32_type,
u64_type,
@@ -2072,6 +2073,10 @@ pub const Inst = struct {
.ty = Type.initTag(.type),
.val = Value.initTag(.i16_type),
},
+ .u29_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.u29_type),
+ },
.u32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u32_type),
diff --git a/src/type.zig b/src/type.zig
index ee669df620..14c613a947 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -36,6 +36,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -568,6 +569,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -979,6 +981,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -1261,6 +1264,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -1551,6 +1555,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -1935,6 +1940,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -2235,6 +2241,7 @@ pub const Type = extern union {
.u8 => return Value.initTag(.u8_type),
.i8 => return Value.initTag(.i8_type),
.u16 => return Value.initTag(.u16_type),
+ .u29 => return Value.initTag(.u29_type),
.i16 => return Value.initTag(.i16_type),
.u32 => return Value.initTag(.u32_type),
.i32 => return Value.initTag(.i32_type),
@@ -2312,6 +2319,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -2560,6 +2568,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -2953,6 +2962,7 @@ pub const Type = extern union {
.vector => return AbiAlignmentAdvanced{ .scalar = 16 },
.i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) },
+ .u29 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(29, target) },
.i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) },
.i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) },
.u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) },
@@ -3416,6 +3426,7 @@ pub const Type = extern union {
},
.i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) },
+ .u29 => return AbiSizeAdvanced{ .scalar = intAbiSize(29, target) },
.i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) },
.i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) },
.u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) },
@@ -3569,6 +3580,7 @@ pub const Type = extern union {
.bool, .u1 => 1,
.u8, .i8 => 8,
.i16, .u16, .f16 => 16,
+ .u29 => 29,
.i32, .u32, .f32 => 32,
.i64, .u64, .f64 => 64,
.f80 => 80,
@@ -4524,6 +4536,7 @@ pub const Type = extern union {
.u1,
.u8,
.u16,
+ .u29,
.u32,
.u64,
.u128,
@@ -4550,6 +4563,7 @@ pub const Type = extern union {
.i8 => return .{ .signedness = .signed, .bits = 8 },
.u16 => return .{ .signedness = .unsigned, .bits = 16 },
.i16 => return .{ .signedness = .signed, .bits = 16 },
+ .u29 => return .{ .signedness = .unsigned, .bits = 29 },
.u32 => return .{ .signedness = .unsigned, .bits = 32 },
.i32 => return .{ .signedness = .signed, .bits = 32 },
.u64 => return .{ .signedness = .unsigned, .bits = 64 },
@@ -4814,6 +4828,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -4856,6 +4871,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -5072,6 +5088,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -5816,6 +5833,7 @@ pub const Type = extern union {
i8,
u16,
i16,
+ u29,
u32,
i32,
u64,
@@ -5939,6 +5957,7 @@ pub const Type = extern union {
.i8,
.u16,
.i16,
+ .u29,
.u32,
.i32,
.u64,
@@ -6302,6 +6321,7 @@ pub const Type = extern union {
pub const @"u1" = initTag(.u1);
pub const @"u8" = initTag(.u8);
pub const @"u16" = initTag(.u16);
+ pub const @"u29" = initTag(.u29);
pub const @"u32" = initTag(.u32);
pub const @"u64" = initTag(.u64);
diff --git a/src/value.zig b/src/value.zig
index a80d788894..21fe52e706 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -30,6 +30,7 @@ pub const Value = extern union {
i8_type,
u16_type,
i16_type,
+ u29_type,
u32_type,
i32_type,
u64_type,
@@ -120,6 +121,8 @@ pub const Value = extern union {
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
+ /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value.
+ comptime_field_ptr,
/// Pointer to a specific element of an array, vector or slice.
elem_ptr,
/// Pointer to a specific field of a struct or union.
@@ -194,6 +197,7 @@ pub const Value = extern union {
.i8_type,
.u16_type,
.i16_type,
+ .u29_type,
.u32_type,
.i32_type,
.u64_type,
@@ -316,6 +320,7 @@ pub const Value = extern union {
.aggregate => Payload.Aggregate,
.@"union" => Payload.Union,
.bound_fn => Payload.BoundFn,
+ .comptime_field_ptr => Payload.ComptimeFieldPtr,
};
}
@@ -394,6 +399,7 @@ pub const Value = extern union {
.i8_type,
.u16_type,
.i16_type,
+ .u29_type,
.u32_type,
.i32_type,
.u64_type,
@@ -506,6 +512,18 @@ pub const Value = extern union {
};
return Value{ .ptr_otherwise = &new_payload.base };
},
+ .comptime_field_ptr => {
+ const payload = self.cast(Payload.ComptimeFieldPtr).?;
+ const new_payload = try arena.create(Payload.ComptimeFieldPtr);
+ new_payload.* = .{
+ .base = payload.base,
+ .data = .{
+ .field_val = try payload.data.field_val.copy(arena),
+ .field_ty = try payload.data.field_ty.copy(arena),
+ },
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
.elem_ptr => {
const payload = self.castTag(.elem_ptr).?;
const new_payload = try arena.create(Payload.ElemPtr);
@@ -645,6 +663,7 @@ pub const Value = extern union {
.u8_type => return out_stream.writeAll("u8"),
.i8_type => return out_stream.writeAll("i8"),
.u16_type => return out_stream.writeAll("u16"),
+ .u29_type => return out_stream.writeAll("u29"),
.i16_type => return out_stream.writeAll("i16"),
.u32_type => return out_stream.writeAll("u32"),
.i32_type => return out_stream.writeAll("i32"),
@@ -754,6 +773,9 @@ pub const Value = extern union {
const decl_index = val.castTag(.decl_ref).?.data;
return out_stream.print("(decl_ref {d})", .{decl_index});
},
+ .comptime_field_ptr => {
+ return out_stream.writeAll("(comptime_field_ptr)");
+ },
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
try out_stream.print("&[{}] ", .{elem_ptr.index});
@@ -882,6 +904,7 @@ pub const Value = extern union {
.i8_type => Type.initTag(.i8),
.u16_type => Type.initTag(.u16),
.i16_type => Type.initTag(.i16),
+ .u29_type => Type.initTag(.u29),
.u32_type => Type.initTag(.u32),
.i32_type => Type.initTag(.i32),
.u64_type => Type.initTag(.u64),
@@ -1706,6 +1729,7 @@ pub const Value = extern union {
.int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(),
.decl_ref_mut,
+ .comptime_field_ptr,
.extern_fn,
.decl_ref,
.function,
@@ -1770,6 +1794,7 @@ pub const Value = extern union {
.bool_true,
.decl_ref,
.decl_ref_mut,
+ .comptime_field_ptr,
.extern_fn,
.function,
.variable,
@@ -2362,7 +2387,7 @@ pub const Value = extern union {
pub fn isComptimeMutablePtr(val: Value) bool {
return switch (val.tag()) {
- .decl_ref_mut => true,
+ .decl_ref_mut, .comptime_field_ptr => true,
.elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr),
.field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr),
.eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr),
@@ -2426,6 +2451,9 @@ pub const Value = extern union {
const decl: Module.Decl.Index = ptr_val.pointerDecl().?;
std.hash.autoHash(hasher, decl);
},
+ .comptime_field_ptr => {
+ std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr);
+ },
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
@@ -2471,7 +2499,7 @@ pub const Value = extern union {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
// TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc.
- .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr => val,
+ .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val,
else => unreachable,
};
}
@@ -2497,6 +2525,14 @@ pub const Value = extern union {
return 1;
}
},
+ .comptime_field_ptr => {
+ const payload = val.castTag(.comptime_field_ptr).?.data;
+ if (payload.field_ty.zigTypeTag() == .Array) {
+ return payload.field_ty.arrayLen();
+ } else {
+ return 1;
+ }
+ },
else => unreachable,
};
}
@@ -2587,6 +2623,7 @@ pub const Value = extern union {
.decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer),
.decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer),
+ .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer),
.elem_ptr => {
const data = val.castTag(.elem_ptr).?.data;
return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer);
@@ -2623,6 +2660,7 @@ pub const Value = extern union {
.decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end),
.decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end),
+ .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end),
.elem_ptr => blk: {
const elem_ptr = val.castTag(.elem_ptr).?.data;
break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index);
@@ -4742,6 +4780,14 @@ pub const Value = extern union {
},
};
+ pub const ComptimeFieldPtr = struct {
+ base: Payload,
+ data: struct {
+ field_val: Value,
+ field_ty: Type,
+ },
+ };
+
pub const ElemPtr = struct {
pub const base_tag = Tag.elem_ptr;
@@ -4864,7 +4910,7 @@ pub const Value = extern union {
/// `Module.resolvePeerTypes`.
stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Ref) = .{},
/// 0 means ABI-aligned.
- alignment: u16,
+ alignment: u32,
},
};
@@ -4875,7 +4921,7 @@ pub const Value = extern union {
data: struct {
decl_index: Module.Decl.Index,
/// 0 means ABI-aligned.
- alignment: u16,
+ alignment: u32,
},
};
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 3129091091..a69df862c1 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -1,5 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
+const assert = std.debug.assert;
const mem = std.mem;
const expect = std.testing.expect;
const expectEqualStrings = std.testing.expectEqualStrings;
@@ -1053,3 +1054,35 @@ test "const alloc with comptime known initializer is made comptime known" {
if (u.a == 0) @compileError("bad");
}
}
+
+comptime {
+ // coerce result ptr outside a function
+ const S = struct { a: comptime_int };
+ var s: S = undefined;
+ s = S{ .a = 1 };
+ assert(s.a == 1);
+}
+
+test "switch inside @as gets correct type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var a: u32 = 0;
+ var b: [2]u32 = undefined;
+ b[0] = @as(u32, switch (a) {
+ 1 => 1,
+ else => 0,
+ });
+}
+
+test "inline call of function with a switch inside the return statement" {
+ const S = struct {
+ inline fn foo(x: anytype) @TypeOf(x) {
+ return switch (x) {
+ 1 => 1,
+ else => unreachable,
+ };
+ }
+ };
+ try expect(S.foo(1) == 1);
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 5cbb8e973e..624f1609d4 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -1336,3 +1336,25 @@ test "packed struct field access via pointer" {
try S.doTheTest();
comptime try S.doTheTest();
}
+
+test "store to comptime field" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
+ {
+ const S = struct {
+ comptime a: [2]u32 = [2]u32{ 1, 2 },
+ };
+ var s: S = .{};
+ s.a = [2]u32{ 1, 2 };
+ s.a[0] = 1;
+ }
+ {
+ const T = struct { a: u32, b: u32 };
+ const S = struct {
+ comptime a: T = T{ .a = 1, .b = 2 },
+ };
+ var s: S = .{};
+ s.a = T{ .a = 1, .b = 2 };
+ s.a.a = 1;
+ }
+}
diff --git a/test/cases/compile_errors/invalid_store_to_comptime_field.zig b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
new file mode 100644
index 0000000000..3bbd9bbaa8
--- /dev/null
+++ b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
@@ -0,0 +1,20 @@
+pub export fn entry() void {
+ const S = struct {
+ comptime a: [2]u32 = [2]u32{ 1, 2 },
+ };
+ var s: S = .{};
+ s.a = [2]u32{ 2, 2 };
+}
+pub export fn entry1() void {
+ const T = struct { a: u32, b: u32 };
+ const S = struct {
+ comptime a: T = T{ .a = 1, .b = 2 },
+ };
+ var s: S = .{};
+ s.a = T{ .a = 2, .b = 2 };
+}
+// error
+// backend=stage2,llvm
+//
+// :6:19: error: value stored in comptime field does not match the default value of the field
+// :14:19: error: value stored in comptime field does not match the default value of the field