aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/fs.zig10
-rw-r--r--lib/std/heap.zig6
-rw-r--r--lib/std/heap/sbrk_allocator.zig161
-rw-r--r--lib/std/os/plan9.zig152
-rw-r--r--lib/std/os/plan9/errno.zig8
-rw-r--r--lib/std/start.zig30
6 files changed, 321 insertions, 46 deletions
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 1ec16483f3..e5c2d67d67 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -39,7 +39,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
/// fit into a UTF-8 encoded array of this length.
/// The byte count includes room for a null sentinel byte.
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
- .linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .solaris => os.PATH_MAX,
+ .linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .solaris, .plan9 => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@@ -1160,7 +1160,9 @@ pub const Dir = struct {
return self.openFileW(path_w.span(), flags);
}
- var os_flags: u32 = os.O.CLOEXEC;
+ var os_flags: u32 = 0;
+ if (@hasDecl(os.O, "CLOEXEC")) os_flags = os.O.CLOEXEC;
+
// Use the O locking flags if the os supports them to acquire the lock
// atomically.
const has_flock_open_flags = @hasDecl(os.O, "EXLOCK");
@@ -1180,7 +1182,7 @@ pub const Dir = struct {
if (@hasDecl(os.O, "LARGEFILE")) {
os_flags |= os.O.LARGEFILE;
}
- if (!flags.allow_ctty) {
+ if (@hasDecl(os.O, "NOCTTY") and !flags.allow_ctty) {
os_flags |= os.O.NOCTTY;
}
os_flags |= switch (flags.mode) {
@@ -1196,7 +1198,7 @@ pub const Dir = struct {
// WASI doesn't have os.flock so we intetinally check OS prior to the inner if block
// since it is not compiltime-known and we need to avoid undefined symbol in Wasm.
- if (builtin.target.os.tag != .wasi) {
+ if (@hasDecl(os.system, "LOCK") and builtin.target.os.tag != .wasi) {
if (!has_flock_open_flags and flags.lock != .none) {
// TODO: integrate async I/O
const lock_nonblocking = if (flags.lock_nonblocking) os.LOCK.NB else @as(i32, 0);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 8501b386b9..44460253f4 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -21,6 +21,7 @@ pub const WasmAllocator = @import("heap/WasmAllocator.zig");
pub const WasmPageAllocator = @import("heap/WasmPageAllocator.zig");
pub const PageAllocator = @import("heap/PageAllocator.zig");
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
+pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
const memory_pool = @import("heap/memory_pool.zig");
pub const MemoryPool = memory_pool.MemoryPool;
@@ -228,6 +229,11 @@ pub const page_allocator = if (builtin.target.isWasm())
.ptr = undefined,
.vtable = &WasmPageAllocator.vtable,
}
+else if (builtin.target.os.tag == .plan9)
+ Allocator{
+ .ptr = undefined,
+ .vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
+ }
else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
else
diff --git a/lib/std/heap/sbrk_allocator.zig b/lib/std/heap/sbrk_allocator.zig
new file mode 100644
index 0000000000..3ccc2dddf7
--- /dev/null
+++ b/lib/std/heap/sbrk_allocator.zig
@@ -0,0 +1,161 @@
+const std = @import("../std.zig");
+const builtin = @import("builtin");
+const math = std.math;
+const Allocator = std.mem.Allocator;
+const mem = std.mem;
+const assert = std.debug.assert;
+
+pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
+ return struct {
+ pub const vtable = Allocator.VTable{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ };
+
+ pub const Error = Allocator.Error;
+
+ lock: std.Thread.Mutex = .{},
+
+ const max_usize = math.maxInt(usize);
+ const ushift = math.Log2Int(usize);
+ const bigpage_size = 64 * 1024;
+ const pages_per_bigpage = bigpage_size / mem.page_size;
+ const bigpage_count = max_usize / bigpage_size;
+
+ /// Because of storing free list pointers, the minimum size class is 3.
+ const min_class = math.log2(math.ceilPowerOfTwoAssert(usize, 1 + @sizeOf(usize)));
+ const size_class_count = math.log2(bigpage_size) - min_class;
+ /// 0 - 1 bigpage
+ /// 1 - 2 bigpages
+ /// 2 - 4 bigpages
+ /// etc.
+ const big_size_class_count = math.log2(bigpage_count);
+
+ var next_addrs = [1]usize{0} ** size_class_count;
+ /// For each size class, points to the freed pointer.
+ var frees = [1]usize{0} ** size_class_count;
+ /// For each big size class, points to the freed pointer.
+ var big_frees = [1]usize{0} ** big_size_class_count;
+
+ // TODO don't do the naive locking strategy
+ var lock: std.Thread.Mutex = .{};
+ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
+ _ = ctx;
+ _ = return_address;
+ lock.lock();
+ defer lock.unlock();
+ // Make room for the freelist next pointer.
+ const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
+ const actual_len = @max(len +| @sizeOf(usize), alignment);
+ const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
+ const class = math.log2(slot_size) - min_class;
+ if (class < size_class_count) {
+ const addr = a: {
+ const top_free_ptr = frees[class];
+ if (top_free_ptr != 0) {
+ const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
+ frees[class] = node.*;
+ break :a top_free_ptr;
+ }
+
+ const next_addr = next_addrs[class];
+ if (next_addr % mem.page_size == 0) {
+ const addr = allocBigPages(1);
+ if (addr == 0) return null;
+ //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
+ // slot_size, class, addr,
+ //});
+ next_addrs[class] = addr + slot_size;
+ break :a addr;
+ } else {
+ next_addrs[class] = next_addr + slot_size;
+ break :a next_addr;
+ }
+ };
+ return @as([*]u8, @ptrFromInt(addr));
+ }
+ const bigpages_needed = bigPagesNeeded(actual_len);
+ const addr = allocBigPages(bigpages_needed);
+ return @as([*]u8, @ptrFromInt(addr));
+ }
+
+ fn resize(
+ ctx: *anyopaque,
+ buf: []u8,
+ log2_buf_align: u8,
+ new_len: usize,
+ return_address: usize,
+ ) bool {
+ _ = ctx;
+ _ = return_address;
+ lock.lock();
+ defer lock.unlock();
+ // We don't want to move anything from one size class to another, but we
+ // can recover bytes in between powers of two.
+ const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
+ const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
+ const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
+ const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
+ const old_small_class = math.log2(old_small_slot_size) - min_class;
+ if (old_small_class < size_class_count) {
+ const new_small_slot_size = math.ceilPowerOfTwo(usize, new_actual_len) catch return false;
+ return old_small_slot_size == new_small_slot_size;
+ } else {
+ const old_bigpages_needed = bigPagesNeeded(old_actual_len);
+ const old_big_slot_pages = math.ceilPowerOfTwoAssert(usize, old_bigpages_needed);
+ const new_bigpages_needed = bigPagesNeeded(new_actual_len);
+ const new_big_slot_pages = math.ceilPowerOfTwo(usize, new_bigpages_needed) catch return false;
+ return old_big_slot_pages == new_big_slot_pages;
+ }
+ }
+
+ fn free(
+ ctx: *anyopaque,
+ buf: []u8,
+ log2_buf_align: u8,
+ return_address: usize,
+ ) void {
+ _ = ctx;
+ _ = return_address;
+ lock.lock();
+ defer lock.unlock();
+ const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
+ const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
+ const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
+ const class = math.log2(slot_size) - min_class;
+ const addr = @intFromPtr(buf.ptr);
+ if (class < size_class_count) {
+ const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
+ node.* = frees[class];
+ frees[class] = addr;
+ } else {
+ const bigpages_needed = bigPagesNeeded(actual_len);
+ const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
+ const big_slot_size_bytes = pow2_pages * bigpage_size;
+ const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
+ const big_class = math.log2(pow2_pages);
+ node.* = big_frees[big_class];
+ big_frees[big_class] = addr;
+ }
+ }
+
+ inline fn bigPagesNeeded(byte_count: usize) usize {
+ return (byte_count + (bigpage_size + (@sizeOf(usize) - 1))) / bigpage_size;
+ }
+
+ fn allocBigPages(n: usize) usize {
+ const pow2_pages = math.ceilPowerOfTwoAssert(usize, n);
+ const slot_size_bytes = pow2_pages * bigpage_size;
+ const class = math.log2(pow2_pages);
+
+ const top_free_ptr = big_frees[class];
+ if (top_free_ptr != 0) {
+ const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
+ big_frees[class] = node.*;
+ return top_free_ptr;
+ }
+ return sbrk(pow2_pages * pages_per_bigpage * mem.page_size);
+ }
+ };
+}
diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig
index 3e1137c7ce..9334171221 100644
--- a/lib/std/os/plan9.zig
+++ b/lib/std/os/plan9.zig
@@ -1,6 +1,12 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
+pub const fd_t = i32;
+
+pub const STDIN_FILENO = 0;
+pub const STDOUT_FILENO = 1;
+pub const STDERR_FILENO = 2;
+pub const PATH_MAX = 1023;
pub const syscall_bits = switch (builtin.cpu.arch) {
.x86_64 => @import("plan9/x86_64.zig"),
else => @compileError("more plan9 syscall implementations (needs more inline asm in stage2"),
@@ -12,6 +18,43 @@ pub fn getErrno(r: usize) E {
const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0;
return @as(E, @enumFromInt(int));
}
+// The max bytes that can be in the errstr buff
+pub const ERRMAX = 128;
+var errstr_buf: [ERRMAX]u8 = undefined;
+/// Gets whatever the last errstr was
+pub fn errstr() []const u8 {
+ _ = syscall_bits.syscall2(.ERRSTR, @intFromPtr(&errstr_buf), ERRMAX);
+ return std.mem.span(@as([*:0]u8, @ptrCast(&errstr_buf)));
+}
+pub const Plink = anyopaque;
+pub const Tos = extern struct {
+ /// Per process profiling
+ prof: extern struct {
+ /// known to be 0(ptr)
+ pp: *Plink,
+ /// known to be 4(ptr)
+ next: *Plink,
+ last: *Plink,
+ first: *Plink,
+ pid: u32,
+ what: u32,
+ },
+ /// cycle clock frequency if there is one, 0 otherwise
+ cyclefreq: u64,
+ /// cycles spent in kernel
+ kcycles: i64,
+ /// cycles spent in process (kernel + user)
+ pcycles: i64,
+ /// might as well put the pid here
+ pid: u32,
+ clock: u32,
+ // top of stack is here
+};
+
+pub var tos: *Tos = undefined; // set in start.zig
+pub fn getpid() u32 {
+ return tos.pid;
+}
pub const SIG = struct {
/// hangup
pub const HUP = 1;
@@ -57,7 +100,8 @@ pub const SIG = struct {
};
pub const sigset_t = c_long;
pub const empty_sigset = 0;
-pub const siginfo_t = c_long; // TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible.
+pub const siginfo_t = c_long;
+// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible.
pub const Sigaction = extern struct {
pub const handler_fn = *const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*const anyopaque) callconv(.C) void;
@@ -69,6 +113,9 @@ pub const Sigaction = extern struct {
mask: sigset_t,
flags: c_int,
};
+pub const AT = struct {
+ pub const FDCWD = -100; // we just make up a constant; FDCWD and openat don't actually exist in plan9
+};
// TODO implement sigaction
// right now it is just a shim to allow using start.zig code
pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) usize {
@@ -132,20 +179,48 @@ pub const SYS = enum(usize) {
_NSEC = 53,
};
-pub fn pwrite(fd: usize, buf: [*]const u8, count: usize, offset: usize) usize {
- return syscall_bits.syscall4(.PWRITE, fd, @intFromPtr(buf), count, offset);
+pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
+ return syscall_bits.syscall4(.PWRITE, @bitCast(@as(isize, fd)), @intFromPtr(buf), count, @bitCast(@as(isize, -1)));
+}
+pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: isize) usize {
+ return syscall_bits.syscall4(.PWRITE, @bitCast(@as(isize, fd)), @intFromPtr(buf), count, @bitCast(offset));
+}
+
+pub fn read(fd: i32, buf: [*]const u8, count: usize) usize {
+ return syscall_bits.syscall4(.PREAD, @bitCast(@as(isize, fd)), @intFromPtr(buf), count, @bitCast(@as(isize, -1)));
+}
+pub fn pread(fd: i32, buf: [*]const u8, count: usize, offset: isize) usize {
+ return syscall_bits.syscall4(.PREAD, @bitCast(@as(isize, fd)), @intFromPtr(buf), count, @bitCast(offset));
+}
+
+pub fn open(path: [*:0]const u8, flags: u32) usize {
+ return syscall_bits.syscall2(.OPEN, @intFromPtr(path), @bitCast(@as(isize, flags)));
}
-pub fn pread(fd: usize, buf: [*]const u8, count: usize, offset: usize) usize {
- return syscall_bits.syscall4(.PREAD, fd, @intFromPtr(buf), count, offset);
+pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, _: mode_t) usize {
+ // we skip perms because only create supports perms
+ if (dirfd == AT.FDCWD) { // openat(AT_FDCWD, ...) == open(...)
+ return open(path, flags);
+ }
+ var dir_path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ var total_path_buf: [std.fs.MAX_PATH_BYTES + 1]u8 = undefined;
+ const rc = fd2path(dirfd, &dir_path_buf, std.fs.MAX_PATH_BYTES);
+ if (rc != 0) return rc;
+ var fba = std.heap.FixedBufferAllocator.init(&total_path_buf);
+ var alloc = fba.allocator();
+ const dir_path = std.mem.span(@as([*:0]u8, @ptrCast(&dir_path_buf)));
+ const total_path = std.fs.path.join(alloc, &.{ dir_path, std.mem.span(path) }) catch unreachable; // the allocation shouldn't fail because it should not exceed MAX_PATH_BYTES
+ fba.reset();
+ const total_path_z = alloc.dupeZ(u8, total_path) catch unreachable; // should not exceed MAX_PATH_BYTES + 1
+ return open(total_path_z.ptr, flags);
}
-pub fn open(path: [*:0]const u8, omode: OpenMode) usize {
- return syscall_bits.syscall2(.OPEN, @intFromPtr(path), @intFromEnum(omode));
+pub fn fd2path(fd: i32, buf: [*]u8, nbuf: usize) usize {
+ return syscall_bits.syscall3(.FD2PATH, @bitCast(@as(isize, fd)), @intFromPtr(buf), nbuf);
}
-pub fn create(path: [*:0]const u8, omode: OpenMode, perms: usize) usize {
- return syscall_bits.syscall3(.CREATE, @intFromPtr(path), @intFromEnum(omode), perms);
+pub fn create(path: [*:0]const u8, omode: mode_t, perms: usize) usize {
+ return syscall_bits.syscall3(.CREATE, @intFromPtr(path), @bitCast(@as(isize, omode)), perms);
}
pub fn exit(status: u8) noreturn {
@@ -163,16 +238,53 @@ pub fn exits(status: ?[*:0]const u8) noreturn {
unreachable;
}
-pub fn close(fd: usize) usize {
- return syscall_bits.syscall1(.CLOSE, fd);
+pub fn close(fd: i32) usize {
+ return syscall_bits.syscall1(.CLOSE, @bitCast(@as(isize, fd)));
}
-pub const OpenMode = enum(usize) {
- OREAD = 0, //* open for read
- OWRITE = 1, //* write
- ORDWR = 2, //* read and write
- OEXEC = 3, //* execute, == read but check execute permission
- OTRUNC = 16, //* or'ed in (except for exec), truncate file first
- OCEXEC = 32, //* or'ed in (per file descriptor), close on exec
- ORCLOSE = 64, //* or'ed in, remove on close
- OEXCL = 0x1000, //* or'ed in, exclusive create
+pub const mode_t = i32;
+pub const O = struct {
+ pub const READ = 0; // open for read
+ pub const RDONLY = 0;
+ pub const WRITE = 1; // write
+ pub const WRONLY = 1;
+ pub const RDWR = 2; // read and write
+ pub const EXEC = 3; // execute, == read but check execute permission
+ pub const TRUNC = 16; // or'ed in (except for exec), truncate file first
+ pub const CEXEC = 32; // or'ed in (per file descriptor), close on exec
+ pub const RCLOSE = 64; // or'ed in, remove on close
+ pub const EXCL = 0x1000; // or'ed in, exclusive create
};
+
+pub const ExecData = struct {
+ pub extern const etext: anyopaque;
+ pub extern const edata: anyopaque;
+ pub extern const end: anyopaque;
+};
+
+/// Brk sets the system's idea of the lowest bss location not
+/// used by the program (called the break) to addr rounded up to
+/// the next multiple of 8 bytes. Locations not less than addr
+/// and below the stack pointer may cause a memory violation if
+/// accessed. -9front brk(2)
+pub fn brk_(addr: usize) i32 {
+ return @intCast(syscall_bits.syscall1(.BRK_, addr));
+}
+var bloc: usize = 0;
+var bloc_max: usize = 0;
+
+pub fn sbrk(n: usize) usize {
+ if (bloc == 0) {
+ // we are at the start
+ bloc = @intFromPtr(&ExecData.end);
+ bloc_max = @intFromPtr(&ExecData.end);
+ }
+ var bl = std.mem.alignForward(usize, bloc, std.mem.page_size);
+ const n_aligned = std.mem.alignForward(usize, n, std.mem.page_size);
+ if (bl + n_aligned > bloc_max) {
+ // we need to allocate
+ if (brk_(bl + n_aligned) < 0) return 0;
+ bloc_max = bl + n_aligned;
+ }
+ bloc = bloc + n_aligned;
+ return bl;
+}
diff --git a/lib/std/os/plan9/errno.zig b/lib/std/os/plan9/errno.zig
index 94197beca3..47a232e67c 100644
--- a/lib/std/os/plan9/errno.zig
+++ b/lib/std/os/plan9/errno.zig
@@ -73,4 +73,12 @@ pub const E = enum(u16) {
// These added in 1003.1b-1993
CANCELED = 61,
INPROGRESS = 62,
+
+ // We just add these to be compatible with std.os, which uses them,
+ // They should never get used.
+ DQUOT,
+ CONNRESET,
+ OVERFLOW,
+ LOOP,
+ TXTBSY,
};
diff --git a/lib/std/start.zig b/lib/std/start.zig
index f730b0dd90..d2099ca803 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -166,28 +166,7 @@ fn exit2(code: usize) noreturn {
else => @compileError("TODO"),
},
// exits(0)
- .plan9 => switch (builtin.cpu.arch) {
- .x86_64 => {
- asm volatile (
- \\push $0
- \\push $0
- \\syscall
- :
- : [syscall_number] "{rbp}" (8),
- : "rcx", "r11", "memory"
- );
- },
- // TODO once we get stack setting with assembly on
- // arm, exit with 0 instead of stack garbage
- .aarch64 => {
- asm volatile ("svc #0"
- :
- : [exit] "{x0}" (0x08),
- : "memory", "cc"
- );
- },
- else => @compileError("TODO"),
- },
+ .plan9 => std.os.plan9.exits(null),
.windows => {
ExitProcess(@as(u32, @truncate(code)));
},
@@ -254,6 +233,13 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv
}
fn _start() callconv(.Naked) noreturn {
+ // TODO set Top of Stack on non x86_64-plan9
+ if (native_os == .plan9 and native_arch == .x86_64) {
+ // from /sys/src/libc/amd64/main9.s
+ std.os.plan9.tos = asm volatile (""
+ : [tos] "={rax}" (-> *std.os.plan9.Tos),
+ );
+ }
asm volatile (switch (native_arch) {
.x86_64 =>
\\ xorl %%ebp, %%ebp