aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-02-07 06:21:51 -0800
committerGitHub <noreply@github.com>2025-02-07 06:21:51 -0800
commit6a6e72fff820fb641aa1b00700f6835430dae72e (patch)
treeea70863e08ba9167cfe954287691cce98716d918 /lib/std
parent8ad0732954df80f0f9a0248525c2bded7e82ba27 (diff)
parentb8f5cfed457726a77082b7ffe6672b6066c0a66e (diff)
downloadzig-6a6e72fff820fb641aa1b00700f6835430dae72e.tar.gz
zig-6a6e72fff820fb641aa1b00700f6835430dae72e.zip
Merge pull request #20511 from archbirdplus
runtime page size detection rework GeneralPurposeAllocator to reduce active mapping count Allocator VTable API update
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/Build/Fuzz/WebServer.zig2
-rw-r--r--lib/std/Thread.zig6
-rw-r--r--lib/std/array_list.zig37
-rw-r--r--lib/std/c.zig57
-rw-r--r--lib/std/c/solaris.zig4
-rw-r--r--lib/std/crypto/tlcsprng.zig10
-rw-r--r--lib/std/debug.zig16
-rw-r--r--lib/std/debug/Dwarf.zig10
-rw-r--r--lib/std/debug/Info.zig1
-rw-r--r--lib/std/debug/MemoryAccessor.zig9
-rw-r--r--lib/std/debug/SelfInfo.zig6
-rw-r--r--lib/std/dynamic_library.zig12
-rw-r--r--lib/std/fifo.zig2
-rw-r--r--lib/std/hash_map.zig20
-rw-r--r--lib/std/heap.zig921
-rw-r--r--lib/std/heap/FixedBufferAllocator.zig230
-rw-r--r--lib/std/heap/PageAllocator.zig180
-rw-r--r--lib/std/heap/ThreadSafeAllocator.zig22
-rw-r--r--lib/std/heap/WasmAllocator.zig32
-rw-r--r--lib/std/heap/arena_allocator.zig42
-rw-r--r--lib/std/heap/debug_allocator.zig1410
-rw-r--r--lib/std/heap/general_purpose_allocator.zig1500
-rw-r--r--lib/std/heap/log_to_writer_allocator.zig118
-rw-r--r--lib/std/heap/logging_allocator.zig133
-rw-r--r--lib/std/heap/sbrk_allocator.zig7
-rw-r--r--lib/std/mem.zig145
-rw-r--r--lib/std/mem/Allocator.zig230
-rw-r--r--lib/std/os/linux.zig22
-rw-r--r--lib/std/os/linux/IoUring.zig15
-rw-r--r--lib/std/os/linux/tls.zig19
-rw-r--r--lib/std/os/plan9.zig4
-rw-r--r--lib/std/os/windows.zig12
-rw-r--r--lib/std/os/windows/kernel32.zig16
-rw-r--r--lib/std/posix.zig54
-rw-r--r--lib/std/process.zig2
-rw-r--r--lib/std/start.zig2
-rw-r--r--lib/std/std.zig7
-rw-r--r--lib/std/testing.zig24
-rw-r--r--lib/std/testing/FailingAllocator.zig161
-rw-r--r--lib/std/testing/failing_allocator.zig142
-rw-r--r--lib/std/zip.zig2
41 files changed, 2979 insertions, 2665 deletions
diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig
index ac4336476e..87cd7a1a1d 100644
--- a/lib/std/Build/Fuzz/WebServer.zig
+++ b/lib/std/Build/Fuzz/WebServer.zig
@@ -41,7 +41,7 @@ const fuzzer_arch_os_abi = "wasm32-freestanding";
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
const CoverageMap = struct {
- mapped_memory: []align(std.mem.page_size) const u8,
+ mapped_memory: []align(std.heap.page_size_min) const u8,
coverage: Coverage,
source_locations: []Coverage.SourceLocation,
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 4e691de98c..6dcb956184 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -769,7 +769,7 @@ const PosixThreadImpl = struct {
// Use the same set of parameters used by the libc-less impl.
const stack_size = @max(config.stack_size, 16 * 1024);
assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
- assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS);
+ assert(c.pthread_attr_setguardsize(&attr, std.heap.pageSize()) == .SUCCESS);
var handle: c.pthread_t = undefined;
switch (c.pthread_create(
@@ -1155,7 +1155,7 @@ const LinuxThreadImpl = struct {
completion: Completion = Completion.init(.running),
child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
parent_tid: i32 = undefined,
- mapped: []align(std.mem.page_size) u8,
+ mapped: []align(std.heap.page_size_min) u8,
/// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
/// Ported over from musl libc's pthread detached implementation:
@@ -1362,7 +1362,7 @@ const LinuxThreadImpl = struct {
};
fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
- const page_size = std.mem.page_size;
+ const page_size = std.heap.pageSize();
const Args = @TypeOf(args);
const Instance = struct {
fn_args: Args,
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 5eb527e742..55b6e3cc40 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -105,21 +105,19 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return result;
}
- /// The caller owns the returned memory. Empties this ArrayList,
- /// Its capacity is cleared, making deinit() safe but unnecessary to call.
+ /// The caller owns the returned memory. Empties this ArrayList.
+ /// Its capacity is cleared, making `deinit` safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
const allocator = self.allocator;
const old_memory = self.allocatedSlice();
- if (allocator.resize(old_memory, self.items.len)) {
- const result = self.items;
+ if (allocator.remap(old_memory, self.items.len)) |new_items| {
self.* = init(allocator);
- return result;
+ return new_items;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
@memcpy(new_memory, self.items);
- @memset(self.items, undefined);
self.clearAndFree();
return new_memory;
}
@@ -185,8 +183,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
// extra capacity.
const new_capacity = growCapacity(self.capacity, new_len);
const old_memory = self.allocatedSlice();
- if (self.allocator.resize(old_memory, new_capacity)) {
- self.capacity = new_capacity;
+ if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
return addManyAtAssumeCapacity(self, index, count);
}
@@ -468,8 +467,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
- if (self.allocator.resize(old_memory, new_capacity)) {
- self.capacity = new_capacity;
+ if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
} else {
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
@memcpy(new_memory[0..self.items.len], self.items);
@@ -707,15 +707,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
const old_memory = self.allocatedSlice();
- if (allocator.resize(old_memory, self.items.len)) {
- const result = self.items;
+ if (allocator.remap(old_memory, self.items.len)) |new_items| {
self.* = .empty;
- return result;
+ return new_items;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
@memcpy(new_memory, self.items);
- @memset(self.items, undefined);
self.clearAndFree(allocator);
return new_memory;
}
@@ -1031,9 +1029,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
}
const old_memory = self.allocatedSlice();
- if (allocator.resize(old_memory, new_len)) {
- self.capacity = new_len;
- self.items.len = new_len;
+ if (allocator.remap(old_memory, new_len)) |new_items| {
+ self.capacity = new_items.len;
+ self.items = new_items;
return;
}
@@ -1099,8 +1097,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
- if (allocator.resize(old_memory, new_capacity)) {
- self.capacity = new_capacity;
+ if (allocator.remap(old_memory, new_capacity)) |new_memory| {
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
} else {
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
@memcpy(new_memory[0..self.items.len], self.items);
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 85bec8adc0..d3418c7d8a 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -3,7 +3,7 @@ const builtin = @import("builtin");
const c = @This();
const maxInt = std.math.maxInt;
const assert = std.debug.assert;
-const page_size = std.mem.page_size;
+const page_size = std.heap.page_size_min;
const native_abi = builtin.abi;
const native_arch = builtin.cpu.arch;
const native_os = builtin.os.tag;
@@ -2227,6 +2227,39 @@ pub const SC = switch (native_os) {
.linux => linux.SC,
else => void,
};
+
+pub const _SC = switch (native_os) {
+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) {
+ PAGESIZE = 29,
+ },
+ .dragonfly => enum(c_int) {
+ PAGESIZE = 47,
+ },
+ .freebsd => enum(c_int) {
+ PAGESIZE = 47,
+ },
+ .fuchsia => enum(c_int) {
+ PAGESIZE = 30,
+ },
+ .haiku => enum(c_int) {
+ PAGESIZE = 27,
+ },
+ .linux => enum(c_int) {
+ PAGESIZE = 30,
+ },
+ .netbsd => enum(c_int) {
+ PAGESIZE = 28,
+ },
+ .openbsd => enum(c_int) {
+ PAGESIZE = 28,
+ },
+ .solaris, .illumos => enum(c_int) {
+ PAGESIZE = 11,
+ NPROCESSORS_ONLN = 15,
+ },
+ else => void,
+};
+
pub const SEEK = switch (native_os) {
.linux => linux.SEEK,
.emscripten => emscripten.SEEK,
@@ -7834,6 +7867,11 @@ pub const MAP = switch (native_os) {
else => void,
};
+pub const MREMAP = switch (native_os) {
+ .linux => linux.MREMAP,
+ else => void,
+};
+
/// Used by libc to communicate failure. Not actually part of the underlying syscall.
pub const MAP_FAILED: *anyopaque = @ptrFromInt(maxInt(usize));
@@ -9232,7 +9270,7 @@ pub extern "c" fn getpwnam(name: [*:0]const u8) ?*passwd;
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
-pub extern "c" fn mmap64(addr: ?*align(std.mem.page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
+pub extern "c" fn mmap64(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
@@ -9324,13 +9362,13 @@ pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int;
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
pub extern "c" fn mincore(
- addr: *align(std.mem.page_size) anyopaque,
+ addr: *align(page_size) anyopaque,
length: usize,
vec: [*]u8,
) c_int;
pub extern "c" fn madvise(
- addr: *align(std.mem.page_size) anyopaque,
+ addr: *align(page_size) anyopaque,
length: usize,
advice: u32,
) c_int;
@@ -9428,6 +9466,10 @@ pub const posix_memalign = switch (native_os) {
.dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => private.posix_memalign,
else => {},
};
+pub const sysconf = switch (native_os) {
+ .solaris => solaris.sysconf,
+ else => private.sysconf,
+};
pub const sf_hdtr = switch (native_os) {
.freebsd, .macos, .ios, .tvos, .watchos, .visionos => extern struct {
@@ -9471,6 +9513,7 @@ pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
+pub extern "c" fn mremap(addr: ?*align(page_size) const anyopaque, old_len: usize, new_len: usize, flags: MREMAP, ...) *anyopaque;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
@@ -9823,7 +9866,6 @@ pub const SCM = solaris.SCM;
pub const SETCONTEXT = solaris.SETCONTEXT;
pub const SETUSTACK = solaris.GETUSTACK;
pub const SFD = solaris.SFD;
-pub const _SC = solaris._SC;
pub const cmsghdr = solaris.cmsghdr;
pub const ctid_t = solaris.ctid_t;
pub const file_obj = solaris.file_obj;
@@ -9840,7 +9882,6 @@ pub const priority = solaris.priority;
pub const procfs = solaris.procfs;
pub const projid_t = solaris.projid_t;
pub const signalfd_siginfo = solaris.signalfd_siginfo;
-pub const sysconf = solaris.sysconf;
pub const taskid_t = solaris.taskid_t;
pub const zoneid_t = solaris.zoneid_t;
@@ -9997,6 +10038,7 @@ pub const host_t = darwin.host_t;
pub const ipc_space_t = darwin.ipc_space_t;
pub const ipc_space_port_t = darwin.ipc_space_port_t;
pub const kern_return_t = darwin.kern_return_t;
+pub const vm_size_t = darwin.vm_size_t;
pub const kevent64 = darwin.kevent64;
pub const kevent64_s = darwin.kevent64_s;
pub const mach_absolute_time = darwin.mach_absolute_time;
@@ -10168,6 +10210,7 @@ const private = struct {
extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int;
extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
+ extern "c" fn sysconf(sc: c_int) c_long;
extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
extern "c" fn getcontext(ucp: *ucontext_t) c_int;
@@ -10202,7 +10245,7 @@ const private = struct {
extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
extern "c" fn __libc_thr_yield() c_int;
- extern "c" fn __msync13(addr: *align(std.mem.page_size) const anyopaque, len: usize, flags: c_int) c_int;
+ extern "c" fn __msync13(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
extern "c" fn __sigfillset14(set: ?*sigset_t) void;
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index c84478e2c0..df7dbca16a 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -154,10 +154,6 @@ pub const AF_SUN = struct {
pub const NOPLM = 0x00000004;
};
-pub const _SC = struct {
- pub const NPROCESSORS_ONLN = 15;
-};
-
pub const procfs = struct {
pub const misc_header = extern struct {
size: u32,
diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig
index bad4df2ea8..a1d9beb9b5 100644
--- a/lib/std/crypto/tlcsprng.zig
+++ b/lib/std/crypto/tlcsprng.zig
@@ -42,7 +42,7 @@ var install_atfork_handler = std.once(struct {
}
}.do);
-threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{};
+threadlocal var wipe_mem: []align(std.heap.page_size_min) u8 = &[_]u8{};
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
if (os_has_arc4random) {
@@ -77,7 +77,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
} else {
// Use a static thread-local buffer.
const S = struct {
- threadlocal var buf: Context align(mem.page_size) = .{
+ threadlocal var buf: Context align(std.heap.page_size_min) = .{
.init_state = .uninitialized,
.rng = undefined,
};
@@ -85,7 +85,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
wipe_mem = mem.asBytes(&S.buf);
}
}
- const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
+ const ctx: *Context = @ptrCast(wipe_mem.ptr);
switch (ctx.init_state) {
.uninitialized => {
@@ -141,7 +141,7 @@ fn childAtForkHandler() callconv(.c) void {
}
fn fillWithCsprng(buffer: []u8) void {
- const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
+ const ctx: *Context = @ptrCast(wipe_mem.ptr);
return ctx.rng.fill(buffer);
}
@@ -157,7 +157,7 @@ fn initAndFill(buffer: []u8) void {
// the `std.options.cryptoRandomSeed` function is provided.
std.options.cryptoRandomSeed(&seed);
- const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
+ const ctx: *Context = @ptrCast(wipe_mem.ptr);
ctx.rng = Rng.init(seed);
std.crypto.secureZero(u8, &seed);
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 643dcf731a..9deca6de49 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1134,7 +1134,7 @@ fn printLineFromFileAnyOs(out_stream: anytype, source_location: SourceLocation)
defer f.close();
// TODO fstat and make sure that the file has the correct size
- var buf: [mem.page_size]u8 = undefined;
+ var buf: [4096]u8 = undefined;
var amt_read = try f.read(buf[0..]);
const line_start = seek: {
var current_line_start: usize = 0;
@@ -1237,7 +1237,7 @@ test printLineFromFileAnyOs {
const overlap = 10;
var writer = file.writer();
- try writer.writeByteNTimes('a', mem.page_size - overlap);
+ try writer.writeByteNTimes('a', std.heap.page_size_min - overlap);
try writer.writeByte('\n');
try writer.writeByteNTimes('a', overlap);
@@ -1252,10 +1252,10 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
var writer = file.writer();
- try writer.writeByteNTimes('a', mem.page_size);
+ try writer.writeByteNTimes('a', std.heap.page_size_max);
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
- try expectEqualStrings(("a" ** mem.page_size) ++ "\n", output.items);
+ try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", output.items);
output.clearRetainingCapacity();
}
{
@@ -1265,18 +1265,18 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
var writer = file.writer();
- try writer.writeByteNTimes('a', 3 * mem.page_size);
+ try writer.writeByteNTimes('a', 3 * std.heap.page_size_max);
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
- try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "\n", output.items);
+ try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", output.items);
output.clearRetainingCapacity();
try writer.writeAll("a\na");
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
- try expectEqualStrings(("a" ** (3 * mem.page_size)) ++ "a\n", output.items);
+ try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", output.items);
output.clearRetainingCapacity();
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
@@ -1290,7 +1290,7 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
var writer = file.writer();
- const real_file_start = 3 * mem.page_size;
+ const real_file_start = 3 * std.heap.page_size_min;
try writer.writeByteNTimes('\n', real_file_start);
try writer.writeAll("abc\ndef");
diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig
index 3469deaae9..b72ddcac47 100644
--- a/lib/std/debug/Dwarf.zig
+++ b/lib/std/debug/Dwarf.zig
@@ -2120,8 +2120,8 @@ fn pcRelBase(field_ptr: usize, pc_rel_offset: i64) !usize {
pub const ElfModule = struct {
base_address: usize,
dwarf: Dwarf,
- mapped_memory: []align(std.mem.page_size) const u8,
- external_mapped_memory: ?[]align(std.mem.page_size) const u8,
+ mapped_memory: []align(std.heap.page_size_min) const u8,
+ external_mapped_memory: ?[]align(std.heap.page_size_min) const u8,
pub fn deinit(self: *@This(), allocator: Allocator) void {
self.dwarf.deinit(allocator);
@@ -2167,11 +2167,11 @@ pub const ElfModule = struct {
/// sections from an external file.
pub fn load(
gpa: Allocator,
- mapped_mem: []align(std.mem.page_size) const u8,
+ mapped_mem: []align(std.heap.page_size_min) const u8,
build_id: ?[]const u8,
expected_crc: ?u32,
parent_sections: *Dwarf.SectionArray,
- parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
+ parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
elf_filename: ?[]const u8,
) LoadError!Dwarf.ElfModule {
if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
@@ -2423,7 +2423,7 @@ pub const ElfModule = struct {
build_id: ?[]const u8,
expected_crc: ?u32,
parent_sections: *Dwarf.SectionArray,
- parent_mapped_mem: ?[]align(std.mem.page_size) const u8,
+ parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
) LoadError!Dwarf.ElfModule {
const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => return missing(),
diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig
index 0a07d9ba15..c809547f73 100644
--- a/lib/std/debug/Info.zig
+++ b/lib/std/debug/Info.zig
@@ -10,7 +10,6 @@ const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const Dwarf = std.debug.Dwarf;
-const page_size = std.mem.page_size;
const assert = std.debug.assert;
const Coverage = std.debug.Coverage;
const SourceLocation = std.debug.Coverage.SourceLocation;
diff --git a/lib/std/debug/MemoryAccessor.zig b/lib/std/debug/MemoryAccessor.zig
index 5f57ad5853..7857656554 100644
--- a/lib/std/debug/MemoryAccessor.zig
+++ b/lib/std/debug/MemoryAccessor.zig
@@ -7,7 +7,7 @@ const native_os = builtin.os.tag;
const std = @import("../std.zig");
const posix = std.posix;
const File = std.fs.File;
-const page_size = std.mem.page_size;
+const page_size_min = std.heap.page_size_min;
const MemoryAccessor = @This();
@@ -93,9 +93,10 @@ pub fn isValidMemory(address: usize) bool {
// We are unable to determine validity of memory for freestanding targets
if (native_os == .freestanding or native_os == .other or native_os == .uefi) return true;
- const aligned_address = address & ~@as(usize, @intCast((page_size - 1)));
+ const page_size = std.heap.pageSize();
+ const aligned_address = address & ~(page_size - 1);
if (aligned_address == 0) return false;
- const aligned_memory = @as([*]align(page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
+ const aligned_memory = @as([*]align(page_size_min) u8, @ptrFromInt(aligned_address))[0..page_size];
if (native_os == .windows) {
const windows = std.os.windows;
@@ -104,7 +105,7 @@ pub fn isValidMemory(address: usize) bool {
// The only error this function can throw is ERROR_INVALID_PARAMETER.
// supply an address that invalid i'll be thrown.
- const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch {
+ const rc = windows.VirtualQuery(@ptrCast(aligned_memory), &memory_info, aligned_memory.len) catch {
return false;
};
diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig
index a2cea70d37..b51a8f18d2 100644
--- a/lib/std/debug/SelfInfo.zig
+++ b/lib/std/debug/SelfInfo.zig
@@ -504,7 +504,7 @@ pub const Module = switch (native_os) {
.macos, .ios, .watchos, .tvos, .visionos => struct {
base_address: usize,
vmaddr_slide: usize,
- mapped_memory: []align(mem.page_size) const u8,
+ mapped_memory: []align(std.heap.page_size_min) const u8,
symbols: []const MachoSymbol,
strings: [:0]const u8,
ofiles: OFileTable,
@@ -1046,7 +1046,7 @@ pub fn readElfDebugInfo(
build_id: ?[]const u8,
expected_crc: ?u32,
parent_sections: *Dwarf.SectionArray,
- parent_mapped_mem: ?[]align(mem.page_size) const u8,
+ parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
) !Dwarf.ElfModule {
nosuspend {
const elf_file = (if (elf_filename) |filename| blk: {
@@ -1088,7 +1088,7 @@ const MachoSymbol = struct {
/// Takes ownership of file, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
-fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
+fn mapWholeFile(file: File) ![]align(std.heap.page_size_min) const u8 {
nosuspend {
defer file.close();
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index 8f07db68da..b31fa5ea4d 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -143,7 +143,7 @@ pub const ElfDynLib = struct {
hashtab: [*]posix.Elf_Symndx,
versym: ?[*]elf.Versym,
verdef: ?*elf.Verdef,
- memory: []align(mem.page_size) u8,
+ memory: []align(std.heap.page_size_min) u8,
pub const Error = ElfDynLibError;
@@ -219,11 +219,13 @@ pub const ElfDynLib = struct {
const stat = try file.stat();
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
+ const page_size = std.heap.pageSize();
+
// This one is to read the ELF info. We do more mmapping later
// corresponding to the actual LOAD sections.
const file_bytes = try posix.mmap(
null,
- mem.alignForward(usize, size, mem.page_size),
+ mem.alignForward(usize, size, page_size),
posix.PROT.READ,
.{ .TYPE = .PRIVATE },
fd,
@@ -284,10 +286,10 @@ pub const ElfDynLib = struct {
elf.PT_LOAD => {
// The VirtAddr may not be page-aligned; in such case there will be
// extra nonsense mapped before/after the VirtAddr,MemSiz
- const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
+ const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, page_size) - 1);
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
- const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
- const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr));
+ const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, page_size);
+ const ptr = @as([*]align(std.heap.page_size_min) u8, @ptrFromInt(aligned_addr));
const prot = elfToMmapProt(ph.p_flags);
if ((ph.p_flags & elf.PF_W) == 0) {
// If it does not need write access, it can be mapped from the fd.
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index b07e870f04..fa5357cd6c 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -91,7 +91,7 @@ pub fn LinearFifo(
mem.copyForwards(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]);
self.head = 0;
} else {
- var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined;
+ var tmp: [4096 / 2 / @sizeOf(T)]T = undefined;
while (self.head != 0) {
const n = @min(self.head, tmp.len);
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 270cd1b273..1cb4bfe010 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -413,10 +413,15 @@ pub fn HashMap(
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and this function returns true. Otherwise this
/// function returns false.
+ ///
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn remove(self: *Self, key: K) bool {
return self.unmanaged.removeContext(key, self.ctx);
}
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.removeAdapted(key, ctx);
}
@@ -424,6 +429,9 @@ pub fn HashMap(
/// Delete the entry with key pointed to by key_ptr from the hash map.
/// key_ptr is assumed to be a valid pointer to a key that is present
/// in the hash map.
+ ///
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn removeByPtr(self: *Self, key_ptr: *K) void {
self.unmanaged.removeByPtr(key_ptr);
}
@@ -1225,14 +1233,23 @@ pub fn HashMapUnmanaged(
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and this function returns true. Otherwise this
/// function returns false.
+ ///
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn remove(self: *Self, key: K) bool {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead.");
return self.removeContext(key, undefined);
}
+
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn removeContext(self: *Self, key: K, ctx: Context) bool {
return self.removeAdapted(key, ctx);
}
+
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
if (self.getIndex(key, ctx)) |idx| {
self.removeByIndex(idx);
@@ -1245,6 +1262,9 @@ pub fn HashMapUnmanaged(
/// Delete the entry with key pointed to by key_ptr from the hash map.
/// key_ptr is assumed to be a valid pointer to a key that is present
/// in the hash map.
+ ///
+ /// TODO: answer the question in these doc comments, does this
+ /// increase the unused capacity by one?
pub fn removeByPtr(self: *Self, key_ptr: *K) void {
// TODO: replace with pointer subtraction once supported by zig
// if @sizeOf(K) == 0 then there is at most one item in the hash
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index df72786f0f..d1874c5b00 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -8,19 +8,20 @@ const c = std.c;
const Allocator = std.mem.Allocator;
const windows = std.os.windows;
-pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
-pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
-pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator;
-pub const LogToWriterAllocator = @import("heap/log_to_writer_allocator.zig").LogToWriterAllocator;
-pub const logToWriterAllocator = @import("heap/log_to_writer_allocator.zig").logToWriterAllocator;
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
-pub const GeneralPurposeAllocatorConfig = @import("heap/general_purpose_allocator.zig").Config;
-pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
-pub const Check = @import("heap/general_purpose_allocator.zig").Check;
pub const WasmAllocator = @import("heap/WasmAllocator.zig");
pub const PageAllocator = @import("heap/PageAllocator.zig");
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
+pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig");
+
+pub const DebugAllocatorConfig = @import("heap/debug_allocator.zig").Config;
+pub const DebugAllocator = @import("heap/debug_allocator.zig").DebugAllocator;
+pub const Check = enum { ok, leak };
+/// Deprecated; to be removed after 0.14.0 is tagged.
+pub const GeneralPurposeAllocatorConfig = DebugAllocatorConfig;
+/// Deprecated; to be removed after 0.14.0 is tagged.
+pub const GeneralPurposeAllocator = DebugAllocator;
const memory_pool = @import("heap/memory_pool.zig");
pub const MemoryPool = memory_pool.MemoryPool;
@@ -29,7 +30,97 @@ pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra;
pub const MemoryPoolOptions = memory_pool.Options;
/// TODO Utilize this on Windows.
-pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
+pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null;
+
+/// comptime-known minimum page size of the target.
+///
+/// All pointers from `mmap` or `VirtualAlloc` are aligned to at least
+/// `page_size_min`, but their actual alignment may be bigger.
+///
+/// This value can be overridden via `std.options.page_size_min`.
+///
+/// On many systems, the actual page size can only be determined at runtime
+/// with `pageSize`.
+pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
+ @compileError("freestanding/other page_size_min must provided with std.options.page_size_min")
+else
+ @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min"));
+
+/// comptime-known maximum page size of the target.
+///
+/// Targeting a system with a larger page size may require overriding
+/// `std.options.page_size_max`, as well as providing a corresponding linker
+/// option.
+///
+/// The actual page size can only be determined at runtime with `pageSize`.
+pub const page_size_max: usize = std.options.page_size_max orelse (page_size_max_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
+ @compileError("freestanding/other page_size_max must provided with std.options.page_size_max")
+else
+ @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_max; populate std.options.page_size_max"));
+
+/// If the page size is comptime-known, return value is comptime.
+/// Otherwise, calls `std.options.queryPageSize` which by default queries the
+/// host operating system at runtime.
+pub inline fn pageSize() usize {
+ if (page_size_min == page_size_max) return page_size_min;
+ return std.options.queryPageSize();
+}
+
+test pageSize {
+ assert(std.math.isPowerOfTwo(pageSize()));
+}
+
+/// The default implementation of `std.options.queryPageSize`.
+/// Asserts that the page size is within `page_size_min` and `page_size_max`
+pub fn defaultQueryPageSize() usize {
+ const global = struct {
+ var cached_result: std.atomic.Value(usize) = .init(0);
+ };
+ var size = global.cached_result.load(.unordered);
+ if (size > 0) return size;
+ size = switch (builtin.os.tag) {
+ .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: {
+ const task_port = std.c.mach_task_self();
+ // mach_task_self may fail "if there are any resource failures or other errors".
+ if (task_port == std.c.TASK_NULL)
+ break :blk 0;
+ var info_count = std.c.TASK_VM_INFO_COUNT;
+ var vm_info: std.c.task_vm_info_data_t = undefined;
+ vm_info.page_size = 0;
+ _ = std.c.task_info(
+ task_port,
+ std.c.TASK_VM_INFO,
+ @as(std.c.task_info_t, @ptrCast(&vm_info)),
+ &info_count,
+ );
+ assert(vm_info.page_size != 0);
+ break :blk @intCast(vm_info.page_size);
+ },
+ .windows => blk: {
+ var info: std.os.windows.SYSTEM_INFO = undefined;
+ std.os.windows.kernel32.GetSystemInfo(&info);
+ break :blk info.dwPageSize;
+ },
+ else => if (builtin.link_libc)
+ @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)))
+ else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
+ @compileError("unsupported target: freestanding/other")
+ else
+ @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
+ };
+
+ assert(size >= page_size_min);
+ assert(size <= page_size_max);
+ global.cached_result.store(size, .unordered);
+
+ return size;
+}
+
+test defaultQueryPageSize {
+ if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
+ assert(std.math.isPowerOfTwo(defaultQueryPageSize()));
+}
const CAllocator = struct {
comptime {
@@ -38,6 +129,13 @@ const CAllocator = struct {
}
}
+ const vtable: Allocator.VTable = .{
+ .alloc = alloc,
+ .resize = resize,
+ .remap = remap,
+ .free = free,
+ };
+
pub const supports_malloc_size = @TypeOf(malloc_size) != void;
pub const malloc_size = if (@TypeOf(c.malloc_size) != void)
c.malloc_size
@@ -53,29 +151,29 @@ const CAllocator = struct {
};
fn getHeader(ptr: [*]u8) *[*]u8 {
- return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
+ return @alignCast(@ptrCast(ptr - @sizeOf(usize)));
}
- fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
- const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
+ fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
+ const alignment_bytes = alignment.toByteUnits();
if (supports_posix_memalign) {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
- const eff_alignment = @max(alignment, @sizeOf(usize));
+ const effective_alignment = @max(alignment_bytes, @sizeOf(usize));
var aligned_ptr: ?*anyopaque = undefined;
- if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
+ if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0)
return null;
- return @as([*]u8, @ptrCast(aligned_ptr));
+ return @ptrCast(aligned_ptr);
}
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the original malloc()'ed pointer before
// the aligned address.
- const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null));
+ const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null));
const unaligned_addr = @intFromPtr(unaligned_ptr);
- const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment);
+ const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
@@ -104,22 +202,22 @@ const CAllocator = struct {
fn alloc(
_: *anyopaque,
len: usize,
- log2_align: u8,
+ alignment: mem.Alignment,
return_address: usize,
) ?[*]u8 {
_ = return_address;
assert(len > 0);
- return alignedAlloc(len, log2_align);
+ return alignedAlloc(len, alignment);
}
fn resize(
_: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
new_len: usize,
return_address: usize,
) bool {
- _ = log2_buf_align;
+ _ = alignment;
_ = return_address;
if (new_len <= buf.len) {
return true;
@@ -133,13 +231,25 @@ const CAllocator = struct {
return false;
}
+ fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ ) ?[*]u8 {
+ // realloc would potentially return a new allocation that does not
+ // respect the original alignment.
+ return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
+ }
+
fn free(
_: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
return_address: usize,
) void {
- _ = log2_buf_align;
+ _ = alignment;
_ = return_address;
alignedFree(buf.ptr);
}
@@ -148,78 +258,83 @@ const CAllocator = struct {
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
-pub const c_allocator = Allocator{
+pub const c_allocator: Allocator = .{
.ptr = undefined,
- .vtable = &c_allocator_vtable,
-};
-const c_allocator_vtable = Allocator.VTable{
- .alloc = CAllocator.alloc,
- .resize = CAllocator.resize,
- .free = CAllocator.free,
+ .vtable = &CAllocator.vtable,
};
-/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
-/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
+/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly
+/// calls `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
-/// `ArenaAllocator` for example and is more optimal in such a case
-/// than `c_allocator`.
-pub const raw_c_allocator = Allocator{
+/// `ArenaAllocator` for example and is more optimal in such a case than
+/// `c_allocator`.
+pub const raw_c_allocator: Allocator = .{
.ptr = undefined,
.vtable = &raw_c_allocator_vtable,
};
-const raw_c_allocator_vtable = Allocator.VTable{
+const raw_c_allocator_vtable: Allocator.VTable = .{
.alloc = rawCAlloc,
.resize = rawCResize,
+ .remap = rawCRemap,
.free = rawCFree,
};
fn rawCAlloc(
- _: *anyopaque,
+ context: *anyopaque,
len: usize,
- log2_ptr_align: u8,
- ret_addr: usize,
+ alignment: mem.Alignment,
+ return_address: usize,
) ?[*]u8 {
- _ = ret_addr;
- assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t)));
+ _ = context;
+ _ = return_address;
+ assert(alignment.compare(.lte, comptime .fromByteUnits(@alignOf(std.c.max_align_t))));
// Note that this pointer cannot be aligncasted to max_align_t because if
// len is < max_align_t then the alignment can be smaller. For example, if
// max_align_t is 16, but the user requests 8 bytes, there is no built-in
// type in C that is size 8 and has 16 byte alignment, so the alignment may
// be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
// is allowed to return a 1-byte aligned pointer.
- return @as(?[*]u8, @ptrCast(c.malloc(len)));
+ return @ptrCast(c.malloc(len));
}
fn rawCResize(
- _: *anyopaque,
- buf: []u8,
- log2_old_align: u8,
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
new_len: usize,
- ret_addr: usize,
+ return_address: usize,
) bool {
- _ = log2_old_align;
- _ = ret_addr;
-
- if (new_len <= buf.len)
- return true;
-
- if (CAllocator.supports_malloc_size) {
- const full_len = CAllocator.malloc_size(buf.ptr);
- if (new_len <= full_len) return true;
- }
-
+ _ = context;
+ _ = memory;
+ _ = alignment;
+ _ = new_len;
+ _ = return_address;
return false;
}
+fn rawCRemap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+) ?[*]u8 {
+ _ = context;
+ _ = alignment;
+ _ = return_address;
+ return @ptrCast(c.realloc(memory.ptr, new_len));
+}
+
fn rawCFree(
- _: *anyopaque,
- buf: []u8,
- log2_old_align: u8,
- ret_addr: usize,
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ return_address: usize,
) void {
- _ = log2_old_align;
- _ = ret_addr;
- c.free(buf.ptr);
+ _ = context;
+ _ = alignment;
+ _ = return_address;
+ c.free(memory.ptr);
}
/// On operating systems that support memory mapping, this allocator makes a
@@ -253,252 +368,6 @@ pub const wasm_allocator: Allocator = .{
.vtable = &WasmAllocator.vtable,
};
-/// Verifies that the adjusted length will still map to the full length
-pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
- const aligned_len = mem.alignAllocLen(full_len, len);
- assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len);
- return aligned_len;
-}
-
-pub const HeapAllocator = switch (builtin.os.tag) {
- .windows => struct {
- heap_handle: ?HeapHandle,
-
- const HeapHandle = windows.HANDLE;
-
- pub fn init() HeapAllocator {
- return HeapAllocator{
- .heap_handle = null,
- };
- }
-
- pub fn allocator(self: *HeapAllocator) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- pub fn deinit(self: *HeapAllocator) void {
- if (self.heap_handle) |heap_handle| {
- windows.HeapDestroy(heap_handle);
- }
- }
-
- fn getRecordPtr(buf: []u8) *align(1) usize {
- return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len));
- }
-
- fn alloc(
- ctx: *anyopaque,
- n: usize,
- log2_ptr_align: u8,
- return_address: usize,
- ) ?[*]u8 {
- _ = return_address;
- const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
-
- const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
- const amt = n + ptr_align - 1 + @sizeOf(usize);
- const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .seq_cst);
- const heap_handle = optional_heap_handle orelse blk: {
- const options = if (builtin.single_threaded) windows.HEAP_NO_SERIALIZE else 0;
- const hh = windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
- const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .seq_cst, .seq_cst) orelse break :blk hh;
- windows.HeapDestroy(hh);
- break :blk other_hh.?; // can't be null because of the cmpxchg
- };
- const ptr = windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
- const root_addr = @intFromPtr(ptr);
- const aligned_addr = mem.alignForward(usize, root_addr, ptr_align);
- const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n];
- getRecordPtr(buf).* = root_addr;
- return buf.ptr;
- }
-
- fn resize(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- new_size: usize,
- return_address: usize,
- ) bool {
- _ = log2_buf_align;
- _ = return_address;
- const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
-
- const root_addr = getRecordPtr(buf).*;
- const align_offset = @intFromPtr(buf.ptr) - root_addr;
- const amt = align_offset + new_size + @sizeOf(usize);
- const new_ptr = windows.kernel32.HeapReAlloc(
- self.heap_handle.?,
- windows.HEAP_REALLOC_IN_PLACE_ONLY,
- @as(*anyopaque, @ptrFromInt(root_addr)),
- amt,
- ) orelse return false;
- assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr)));
- getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
- return true;
- }
-
- fn free(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- return_address: usize,
- ) void {
- _ = log2_buf_align;
- _ = return_address;
- const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
- windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
- }
- },
- else => @compileError("Unsupported OS"),
-};
-
-fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
- return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and
- @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len);
-}
-
-fn sliceContainsSlice(container: []u8, slice: []u8) bool {
- return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and
- (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len);
-}
-
-pub const FixedBufferAllocator = struct {
- end_index: usize,
- buffer: []u8,
-
- pub fn init(buffer: []u8) FixedBufferAllocator {
- return FixedBufferAllocator{
- .buffer = buffer,
- .end_index = 0,
- };
- }
-
- /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
- pub fn allocator(self: *FixedBufferAllocator) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
- /// *WARNING* using this at the same time as the interface returned by `allocator` is not thread safe
- pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = threadSafeAlloc,
- .resize = Allocator.noResize,
- .free = Allocator.noFree,
- },
- };
- }
-
- pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
- return sliceContainsPtr(self.buffer, ptr);
- }
-
- pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
- return sliceContainsSlice(self.buffer, slice);
- }
-
- /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
- /// then we won't be able to determine what the last allocation was. This is because
- /// the alignForward operation done in alloc is not reversible.
- pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
- return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
- }
-
- fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
- _ = ra;
- const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
- const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
- const adjusted_index = self.end_index + adjust_off;
- const new_end_index = adjusted_index + n;
- if (new_end_index > self.buffer.len) return null;
- self.end_index = new_end_index;
- return self.buffer.ptr + adjusted_index;
- }
-
- fn resize(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- new_size: usize,
- return_address: usize,
- ) bool {
- const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
- _ = log2_buf_align;
- _ = return_address;
- assert(@inComptime() or self.ownsSlice(buf));
-
- if (!self.isLastAllocation(buf)) {
- if (new_size > buf.len) return false;
- return true;
- }
-
- if (new_size <= buf.len) {
- const sub = buf.len - new_size;
- self.end_index -= sub;
- return true;
- }
-
- const add = new_size - buf.len;
- if (add + self.end_index > self.buffer.len) return false;
-
- self.end_index += add;
- return true;
- }
-
- fn free(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- return_address: usize,
- ) void {
- const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
- _ = log2_buf_align;
- _ = return_address;
- assert(@inComptime() or self.ownsSlice(buf));
-
- if (self.isLastAllocation(buf)) {
- self.end_index -= buf.len;
- }
- }
-
- fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
- _ = ra;
- const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
- var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
- while (true) {
- const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
- const adjusted_index = end_index + adjust_off;
- const new_end_index = adjusted_index + n;
- if (new_end_index > self.buffer.len) return null;
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
- return self.buffer[adjusted_index..new_end_index].ptr;
- }
- }
-
- pub fn reset(self: *FixedBufferAllocator) void {
- self.end_index = 0;
- }
-};
-
/// Returns a `StackFallbackAllocator` allocating using either a
/// `FixedBufferAllocator` on an array of size `size` and falling back to
/// `fallback_allocator` if that fails.
@@ -537,6 +406,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
.vtable = &.{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
},
};
@@ -551,40 +421,55 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
fn alloc(
ctx: *anyopaque,
len: usize,
- log2_ptr_align: u8,
+ alignment: mem.Alignment,
ra: usize,
) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(ctx));
- return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
- return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
+ return self.fallback_allocator.rawAlloc(len, alignment, ra);
}
fn resize(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
new_len: usize,
ra: usize,
) bool {
const self: *Self = @ptrCast(@alignCast(ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
- return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
+ return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
} else {
- return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra);
+ return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
+ }
+ }
+
+ fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ ) ?[*]u8 {
+ const self: *Self = @ptrCast(@alignCast(context));
+ if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
+ return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
+ } else {
+ return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
}
}
fn free(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
ra: usize,
) void {
const self: *Self = @ptrCast(@alignCast(ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
- return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
+ return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
} else {
- return self.fallback_allocator.rawFree(buf, log2_buf_align, ra);
+ return self.fallback_allocator.rawFree(buf, alignment, ra);
}
}
};
@@ -605,7 +490,7 @@ test "raw_c_allocator" {
}
}
-test "PageAllocator" {
+test PageAllocator {
const allocator = page_allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
@@ -615,35 +500,19 @@ test "PageAllocator" {
}
if (builtin.os.tag == .windows) {
- const slice = try allocator.alignedAlloc(u8, mem.page_size, 128);
+ const slice = try allocator.alignedAlloc(u8, page_size_min, 128);
slice[0] = 0x12;
slice[127] = 0x34;
allocator.free(slice);
}
{
- var buf = try allocator.alloc(u8, mem.page_size + 1);
+ var buf = try allocator.alloc(u8, pageSize() + 1);
defer allocator.free(buf);
buf = try allocator.realloc(buf, 1); // shrink past the page boundary
}
}
-test "HeapAllocator" {
- if (builtin.os.tag == .windows) {
- // https://github.com/ziglang/zig/issues/13702
- if (builtin.cpu.arch == .aarch64) return error.SkipZigTest;
-
- var heap_allocator = HeapAllocator.init();
- defer heap_allocator.deinit();
- const allocator = heap_allocator.allocator();
-
- try testAllocator(allocator);
- try testAllocatorAligned(allocator);
- try testAllocatorLargeAlignment(allocator);
- try testAllocatorAlignedShrink(allocator);
- }
-}
-
-test "ArenaAllocator" {
+test ArenaAllocator {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();
const allocator = arena_allocator.allocator();
@@ -654,38 +523,6 @@ test "ArenaAllocator" {
try testAllocatorAlignedShrink(allocator);
}
-var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
-test "FixedBufferAllocator" {
- var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
- const allocator = fixed_buffer_allocator.allocator();
-
- try testAllocator(allocator);
- try testAllocatorAligned(allocator);
- try testAllocatorLargeAlignment(allocator);
- try testAllocatorAlignedShrink(allocator);
-}
-
-test "FixedBufferAllocator.reset" {
- var buf: [8]u8 align(@alignOf(u64)) = undefined;
- var fba = FixedBufferAllocator.init(buf[0..]);
- const allocator = fba.allocator();
-
- const X = 0xeeeeeeeeeeeeeeee;
- const Y = 0xffffffffffffffff;
-
- const x = try allocator.create(u64);
- x.* = X;
- try testing.expectError(error.OutOfMemory, allocator.create(u64));
-
- fba.reset();
- const y = try allocator.create(u64);
- y.* = Y;
-
- // we expect Y to have overwritten X.
- try testing.expect(x.* == y.*);
- try testing.expect(y.* == Y);
-}
-
test "StackFallbackAllocator" {
{
var stack_allocator = stackFallback(4096, std.testing.allocator);
@@ -705,46 +542,6 @@ test "StackFallbackAllocator" {
}
}
-test "FixedBufferAllocator Reuse memory on realloc" {
- var small_fixed_buffer: [10]u8 = undefined;
- // check if we re-use the memory
- {
- var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- const allocator = fixed_buffer_allocator.allocator();
-
- const slice0 = try allocator.alloc(u8, 5);
- try testing.expect(slice0.len == 5);
- const slice1 = try allocator.realloc(slice0, 10);
- try testing.expect(slice1.ptr == slice0.ptr);
- try testing.expect(slice1.len == 10);
- try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
- }
- // check that we don't re-use the memory if it's not the most recent block
- {
- var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- const allocator = fixed_buffer_allocator.allocator();
-
- var slice0 = try allocator.alloc(u8, 2);
- slice0[0] = 1;
- slice0[1] = 2;
- const slice1 = try allocator.alloc(u8, 2);
- const slice2 = try allocator.realloc(slice0, 4);
- try testing.expect(slice0.ptr != slice2.ptr);
- try testing.expect(slice1.ptr != slice2.ptr);
- try testing.expect(slice2[0] == 1);
- try testing.expect(slice2[1] == 2);
- }
-}
-
-test "Thread safe FixedBufferAllocator" {
- var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
-
- try testAllocator(fixed_buffer_allocator.threadSafeAllocator());
- try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
- try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
- try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
-}
-
/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
@@ -824,7 +621,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
- const large_align: usize = mem.page_size / 2;
+ const large_align: usize = page_size_min / 2;
var align_mask: usize = undefined;
align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
@@ -857,7 +654,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var fib = FixedBufferAllocator.init(&debug_buffer);
const debug_allocator = fib.allocator();
- const alloc_size = mem.page_size * 2 + 50;
+ const alloc_size = pageSize() * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
@@ -866,7 +663,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
- while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), mem.page_size * 32)) {
+ while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
@@ -881,13 +678,303 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
try testing.expect(slice[60] == 0x34);
}
+const page_size_min_default: ?usize = switch (builtin.os.tag) {
+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
+ .x86_64 => 4 << 10,
+ .aarch64 => 16 << 10,
+ else => null,
+ },
+ .windows => switch (builtin.cpu.arch) {
+ // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
+ .x86, .x86_64 => 4 << 10,
+ // SuperH => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
+ // DEC Alpha => 8 << 10,
+ // Itanium => 8 << 10,
+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
+ else => null,
+ },
+ .wasi => switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => 64 << 10,
+ else => null,
+ },
+ // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
+ .uefi => 4 << 10,
+ .freebsd => switch (builtin.cpu.arch) {
+ // FreeBSD/sys/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ else => null,
+ },
+ .netbsd => switch (builtin.cpu.arch) {
+ // NetBSD/sys/arch/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .sparc => 4 << 10,
+ .sparc64 => 8 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ // Sun-2
+ .m68k => 2 << 10,
+ else => null,
+ },
+ .dragonfly => switch (builtin.cpu.arch) {
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .openbsd => switch (builtin.cpu.arch) {
+ // OpenBSD/sys/arch/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
+ .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv64 => 4 << 10,
+ .sparc64 => 8 << 10,
+ else => null,
+ },
+ .solaris, .illumos => switch (builtin.cpu.arch) {
+ // src/uts/*/sys/machparam.h
+ .x86, .x86_64 => 4 << 10,
+ .sparc, .sparc64 => 8 << 10,
+ else => null,
+ },
+ .fuchsia => switch (builtin.cpu.arch) {
+ // fuchsia/kernel/arch/*/include/arch/defines.h
+ .x86_64 => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .riscv64 => 4 << 10,
+ else => null,
+ },
+ // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
+ .serenity => 4 << 10,
+ .haiku => switch (builtin.cpu.arch) {
+ // haiku/headers/posix/arch/*/limits.h
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .m68k => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv64 => 4 << 10,
+ .sparc64 => 8 << 10,
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .hurd => switch (builtin.cpu.arch) {
+ // gnumach/*/include/mach/*/vm_param.h
+ .x86, .x86_64 => 4 << 10,
+ .aarch64 => null,
+ else => null,
+ },
+ .plan9 => switch (builtin.cpu.arch) {
+ // 9front/sys/src/9/*/mem.h
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
+ .sparc => 4 << 10,
+ else => null,
+ },
+ .ps3 => switch (builtin.cpu.arch) {
+ // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
+ .powerpc64 => 1 << 20, // 1 MiB
+ else => null,
+ },
+ .ps4 => switch (builtin.cpu.arch) {
+ // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .ps5 => switch (builtin.cpu.arch) {
+ // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
+ .x86, .x86_64 => 16 << 10,
+ else => null,
+ },
+ // system/lib/libc/musl/arch/emscripten/bits/limits.h
+ .emscripten => 64 << 10,
+ .linux => switch (builtin.cpu.arch) {
+ // Linux/arch/*/Kconfig
+ .arc => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .csky => 4 << 10,
+ .hexagon => 4 << 10,
+ .loongarch32, .loongarch64 => 4 << 10,
+ .m68k => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ .s390x => 4 << 10,
+ .sparc => 4 << 10,
+ .sparc64 => 8 << 10,
+ .x86, .x86_64 => 4 << 10,
+ .xtensa => 4 << 10,
+ else => null,
+ },
+ .freestanding => switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => 64 << 10,
+ else => null,
+ },
+ else => null,
+};
+
+const page_size_max_default: ?usize = switch (builtin.os.tag) {
+ .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
+ .x86_64 => 4 << 10,
+ .aarch64 => 16 << 10,
+ else => null,
+ },
+ .windows => switch (builtin.cpu.arch) {
+ // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
+ .x86, .x86_64 => 4 << 10,
+ // SuperH => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
+ // DEC Alpha => 8 << 10,
+ // Itanium => 8 << 10,
+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
+ else => null,
+ },
+ .wasi => switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => 64 << 10,
+ else => null,
+ },
+ // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
+ .uefi => 4 << 10,
+ .freebsd => switch (builtin.cpu.arch) {
+ // FreeBSD/sys/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ else => null,
+ },
+ .netbsd => switch (builtin.cpu.arch) {
+ // NetBSD/sys/arch/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 64 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 16 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 16 << 10,
+ .sparc => 8 << 10,
+ .sparc64 => 8 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ .m68k => 8 << 10,
+ else => null,
+ },
+ .dragonfly => switch (builtin.cpu.arch) {
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .openbsd => switch (builtin.cpu.arch) {
+ // OpenBSD/sys/arch/*
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
+ .mips64, .mips64el => 16 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv64 => 4 << 10,
+ .sparc64 => 8 << 10,
+ else => null,
+ },
+ .solaris, .illumos => switch (builtin.cpu.arch) {
+ // src/uts/*/sys/machparam.h
+ .x86, .x86_64 => 4 << 10,
+ .sparc, .sparc64 => 8 << 10,
+ else => null,
+ },
+ .fuchsia => switch (builtin.cpu.arch) {
+ // fuchsia/kernel/arch/*/include/arch/defines.h
+ .x86_64 => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .riscv64 => 4 << 10,
+ else => null,
+ },
+ // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
+ .serenity => 4 << 10,
+ .haiku => switch (builtin.cpu.arch) {
+ // haiku/headers/posix/arch/*/limits.h
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 4 << 10,
+ .m68k => 4 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 4 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
+ .riscv64 => 4 << 10,
+ .sparc64 => 8 << 10,
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .hurd => switch (builtin.cpu.arch) {
+ // gnumach/*/include/mach/*/vm_param.h
+ .x86, .x86_64 => 4 << 10,
+ .aarch64 => null,
+ else => null,
+ },
+ .plan9 => switch (builtin.cpu.arch) {
+ // 9front/sys/src/9/*/mem.h
+ .x86, .x86_64 => 4 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 64 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 16 << 10,
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
+ .sparc => 4 << 10,
+ else => null,
+ },
+ .ps3 => switch (builtin.cpu.arch) {
+ // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
+ .powerpc64 => 1 << 20, // 1 MiB
+ else => null,
+ },
+ .ps4 => switch (builtin.cpu.arch) {
+ // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
+ .x86, .x86_64 => 4 << 10,
+ else => null,
+ },
+ .ps5 => switch (builtin.cpu.arch) {
+ // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
+ .x86, .x86_64 => 16 << 10,
+ else => null,
+ },
+ // system/lib/libc/musl/arch/emscripten/bits/limits.h
+ .emscripten => 64 << 10,
+ .linux => switch (builtin.cpu.arch) {
+ // Linux/arch/*/Kconfig
+ .arc => 16 << 10,
+ .thumb, .thumbeb, .arm, .armeb => 4 << 10,
+ .aarch64, .aarch64_be => 64 << 10,
+ .csky => 4 << 10,
+ .hexagon => 256 << 10,
+ .loongarch32, .loongarch64 => 64 << 10,
+ .m68k => 8 << 10,
+ .mips, .mipsel, .mips64, .mips64el => 64 << 10,
+ .powerpc, .powerpc64, .powerpc64le, .powerpcle => 256 << 10,
+ .riscv32, .riscv64 => 4 << 10,
+ .s390x => 4 << 10,
+ .sparc => 4 << 10,
+ .sparc64 => 8 << 10,
+ .x86, .x86_64 => 4 << 10,
+ .xtensa => 4 << 10,
+ else => null,
+ },
+ .freestanding => switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => 64 << 10,
+ else => null,
+ },
+ else => null,
+};
+
test {
- _ = LoggingAllocator;
- _ = LogToWriterAllocator;
- _ = ScopedLoggingAllocator;
_ = @import("heap/memory_pool.zig");
_ = ArenaAllocator;
_ = GeneralPurposeAllocator;
+ _ = FixedBufferAllocator;
+ _ = ThreadSafeAllocator;
if (builtin.target.isWasm()) {
_ = WasmAllocator;
}
diff --git a/lib/std/heap/FixedBufferAllocator.zig b/lib/std/heap/FixedBufferAllocator.zig
new file mode 100644
index 0000000000..0951dd3bcc
--- /dev/null
+++ b/lib/std/heap/FixedBufferAllocator.zig
@@ -0,0 +1,230 @@
+const std = @import("../std.zig");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const mem = std.mem;
+
+const FixedBufferAllocator = @This();
+
+end_index: usize,
+buffer: []u8,
+
+pub fn init(buffer: []u8) FixedBufferAllocator {
+ return .{
+ .buffer = buffer,
+ .end_index = 0,
+ };
+}
+
+/// Using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe.
+pub fn allocator(self: *FixedBufferAllocator) Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .remap = remap,
+ .free = free,
+ },
+ };
+}
+
+/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
+///
+/// Using this at the same time as the interface returned by `allocator` is not thread safe.
+pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = threadSafeAlloc,
+ .resize = Allocator.noResize,
+ .remap = Allocator.noRemap,
+ .free = Allocator.noFree,
+ },
+ };
+}
+
+pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
+ return sliceContainsPtr(self.buffer, ptr);
+}
+
+pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
+ return sliceContainsSlice(self.buffer, slice);
+}
+
+/// This has false negatives when the last allocation had an
+/// adjusted_index. In such case we won't be able to determine what the
+/// last allocation was because the alignForward operation done in alloc is
+/// not reversible.
+pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
+ return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
+}
+
+pub fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
+ _ = ra;
+ const ptr_align = alignment.toByteUnits();
+ const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
+ const adjusted_index = self.end_index + adjust_off;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) return null;
+ self.end_index = new_end_index;
+ return self.buffer.ptr + adjusted_index;
+}
+
+pub fn resize(
+ ctx: *anyopaque,
+ buf: []u8,
+ alignment: mem.Alignment,
+ new_size: usize,
+ return_address: usize,
+) bool {
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
+ _ = alignment;
+ _ = return_address;
+ assert(@inComptime() or self.ownsSlice(buf));
+
+ if (!self.isLastAllocation(buf)) {
+ if (new_size > buf.len) return false;
+ return true;
+ }
+
+ if (new_size <= buf.len) {
+ const sub = buf.len - new_size;
+ self.end_index -= sub;
+ return true;
+ }
+
+ const add = new_size - buf.len;
+ if (add + self.end_index > self.buffer.len) return false;
+
+ self.end_index += add;
+ return true;
+}
+
+pub fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+) ?[*]u8 {
+ return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
+}
+
+pub fn free(
+ ctx: *anyopaque,
+ buf: []u8,
+ alignment: mem.Alignment,
+ return_address: usize,
+) void {
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
+ _ = alignment;
+ _ = return_address;
+ assert(@inComptime() or self.ownsSlice(buf));
+
+ if (self.isLastAllocation(buf)) {
+ self.end_index -= buf.len;
+ }
+}
+
+fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
+ _ = ra;
+ const ptr_align = alignment.toByteUnits();
+ var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
+ while (true) {
+ const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
+ const adjusted_index = end_index + adjust_off;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) return null;
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
+ return self.buffer[adjusted_index..new_end_index].ptr;
+ }
+}
+
+pub fn reset(self: *FixedBufferAllocator) void {
+ self.end_index = 0;
+}
+
+fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
+ return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and
+ @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len);
+}
+
+fn sliceContainsSlice(container: []u8, slice: []u8) bool {
+ return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and
+ (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len);
+}
+
+var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
+
+test FixedBufferAllocator {
+ var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
+ const a = fixed_buffer_allocator.allocator();
+
+ try std.heap.testAllocator(a);
+ try std.heap.testAllocatorAligned(a);
+ try std.heap.testAllocatorLargeAlignment(a);
+ try std.heap.testAllocatorAlignedShrink(a);
+}
+
+test reset {
+ var buf: [8]u8 align(@alignOf(u64)) = undefined;
+ var fba = FixedBufferAllocator.init(buf[0..]);
+ const a = fba.allocator();
+
+ const X = 0xeeeeeeeeeeeeeeee;
+ const Y = 0xffffffffffffffff;
+
+ const x = try a.create(u64);
+ x.* = X;
+ try std.testing.expectError(error.OutOfMemory, a.create(u64));
+
+ fba.reset();
+ const y = try a.create(u64);
+ y.* = Y;
+
+ // we expect Y to have overwritten X.
+ try std.testing.expect(x.* == y.*);
+ try std.testing.expect(y.* == Y);
+}
+
+test "reuse memory on realloc" {
+ var small_fixed_buffer: [10]u8 = undefined;
+ // check if we re-use the memory
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const a = fixed_buffer_allocator.allocator();
+
+ const slice0 = try a.alloc(u8, 5);
+ try std.testing.expect(slice0.len == 5);
+ const slice1 = try a.realloc(slice0, 10);
+ try std.testing.expect(slice1.ptr == slice0.ptr);
+ try std.testing.expect(slice1.len == 10);
+ try std.testing.expectError(error.OutOfMemory, a.realloc(slice1, 11));
+ }
+ // check that we don't re-use the memory if it's not the most recent block
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const a = fixed_buffer_allocator.allocator();
+
+ var slice0 = try a.alloc(u8, 2);
+ slice0[0] = 1;
+ slice0[1] = 2;
+ const slice1 = try a.alloc(u8, 2);
+ const slice2 = try a.realloc(slice0, 4);
+ try std.testing.expect(slice0.ptr != slice2.ptr);
+ try std.testing.expect(slice1.ptr != slice2.ptr);
+ try std.testing.expect(slice2[0] == 1);
+ try std.testing.expect(slice2[1] == 2);
+ }
+}
+
+test "thread safe version" {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+
+ try std.heap.testAllocator(fixed_buffer_allocator.threadSafeAllocator());
+ try std.heap.testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
+ try std.heap.testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
+ try std.heap.testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
+}
diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig
index 4188c25528..433e0f1218 100644
--- a/lib/std/heap/PageAllocator.zig
+++ b/lib/std/heap/PageAllocator.zig
@@ -7,107 +7,183 @@ const assert = std.debug.assert;
const native_os = builtin.os.tag;
const windows = std.os.windows;
const posix = std.posix;
+const page_size_min = std.heap.page_size_min;
-pub const vtable = Allocator.VTable{
+pub const vtable: Allocator.VTable = .{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
};
-fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
+fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
+ _ = context;
_ = ra;
- _ = log2_align;
assert(n > 0);
- if (n > maxInt(usize) - (mem.page_size - 1)) return null;
+
+ const page_size = std.heap.pageSize();
+ if (n >= maxInt(usize) - page_size) return null;
+ const alignment_bytes = alignment.toByteUnits();
if (native_os == .windows) {
+ // According to official documentation, VirtualAlloc aligns to page
+ // boundary, however, empirically it reserves pages on a 64K boundary.
+ // Since it is very likely the requested alignment will be honored,
+ // this logic first tries a call with exactly the size requested,
+ // before falling back to the loop below.
+ // https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = windows.VirtualAlloc(
null,
-
// VirtualAlloc will round the length to a multiple of page size.
- // VirtualAlloc docs: If the lpAddress parameter is NULL, this value is rounded up to the next page boundary
+ // "If the lpAddress parameter is NULL, this value is rounded up to
+ // the next page boundary".
n,
-
windows.MEM_COMMIT | windows.MEM_RESERVE,
windows.PAGE_READWRITE,
) catch return null;
- return @ptrCast(addr);
+
+ if (mem.isAligned(@intFromPtr(addr), alignment_bytes))
+ return @ptrCast(addr);
+
+ // Fallback: reserve a range of memory large enough to find a
+ // sufficiently aligned address, then free the entire range and
+ // immediately allocate the desired subset. Another thread may have won
+ // the race to map the target range, in which case a retry is needed.
+ windows.VirtualFree(addr, 0, windows.MEM_RELEASE);
+
+ const overalloc_len = n + alignment_bytes - page_size;
+ const aligned_len = mem.alignForward(usize, n, page_size);
+
+ while (true) {
+ const reserved_addr = windows.VirtualAlloc(
+ null,
+ overalloc_len,
+ windows.MEM_RESERVE,
+ windows.PAGE_NOACCESS,
+ ) catch return null;
+ const aligned_addr = mem.alignForward(usize, @intFromPtr(reserved_addr), alignment_bytes);
+ windows.VirtualFree(reserved_addr, 0, windows.MEM_RELEASE);
+ const ptr = windows.VirtualAlloc(
+ @ptrFromInt(aligned_addr),
+ aligned_len,
+ windows.MEM_COMMIT | windows.MEM_RESERVE,
+ windows.PAGE_READWRITE,
+ ) catch continue;
+ return @ptrCast(ptr);
+ }
}
- const aligned_len = mem.alignForward(usize, n, mem.page_size);
+ const aligned_len = mem.alignForward(usize, n, page_size);
+ const max_drop_len = alignment_bytes - @min(alignment_bytes, page_size);
+ const overalloc_len = if (max_drop_len <= aligned_len - n)
+ aligned_len
+ else
+ mem.alignForward(usize, aligned_len + max_drop_len, page_size);
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
const slice = posix.mmap(
hint,
- aligned_len,
+ overalloc_len,
posix.PROT.READ | posix.PROT.WRITE,
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,
) catch return null;
- assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
- const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
+ const result_ptr = mem.alignPointer(slice.ptr, alignment_bytes) orelse return null;
+ // Unmap the extra bytes that were only requested in order to guarantee
+ // that the range of memory we were provided had a proper alignment in it
+ // somewhere. The extra bytes could be at the beginning, or end, or both.
+ const drop_len = result_ptr - slice.ptr;
+ if (drop_len != 0) posix.munmap(slice[0..drop_len]);
+ const remaining_len = overalloc_len - drop_len;
+ if (remaining_len > aligned_len) posix.munmap(@alignCast(result_ptr[aligned_len..remaining_len]));
+ const new_hint: [*]align(page_size_min) u8 = @alignCast(result_ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
- return slice.ptr;
+ return result_ptr;
}
fn resize(
- _: *anyopaque,
- buf_unaligned: []u8,
- log2_buf_align: u8,
- new_size: usize,
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
return_address: usize,
) bool {
- _ = log2_buf_align;
+ _ = context;
+ _ = alignment;
_ = return_address;
- const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size);
+ return realloc(memory, new_len, false) != null;
+}
+
+pub fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+) ?[*]u8 {
+ _ = context;
+ _ = alignment;
+ _ = return_address;
+ return realloc(memory, new_len, true);
+}
+
+fn free(context: *anyopaque, slice: []u8, alignment: mem.Alignment, return_address: usize) void {
+ _ = context;
+ _ = alignment;
+ _ = return_address;
+
+ if (native_os == .windows) {
+ windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
+ } else {
+ const buf_aligned_len = mem.alignForward(usize, slice.len, std.heap.pageSize());
+ posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
+ }
+}
+
+fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
+ const memory: []align(std.heap.page_size_min) u8 = @alignCast(uncasted_memory);
+ const page_size = std.heap.pageSize();
+ const new_size_aligned = mem.alignForward(usize, new_len, page_size);
if (native_os == .windows) {
- if (new_size <= buf_unaligned.len) {
- const base_addr = @intFromPtr(buf_unaligned.ptr);
- const old_addr_end = base_addr + buf_unaligned.len;
- const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size);
+ if (new_len <= memory.len) {
+ const base_addr = @intFromPtr(memory.ptr);
+ const old_addr_end = base_addr + memory.len;
+ const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size);
if (old_addr_end > new_addr_end) {
- // For shrinking that is not releasing, we will only
- // decommit the pages not needed anymore.
+ // For shrinking that is not releasing, we will only decommit
+ // the pages not needed anymore.
windows.VirtualFree(
- @as(*anyopaque, @ptrFromInt(new_addr_end)),
+ @ptrFromInt(new_addr_end),
old_addr_end - new_addr_end,
windows.MEM_DECOMMIT,
);
}
- return true;
+ return memory.ptr;
}
- const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
+ const old_size_aligned = mem.alignForward(usize, memory.len, page_size);
if (new_size_aligned <= old_size_aligned) {
- return true;
+ return memory.ptr;
}
- return false;
+ return null;
}
- const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size);
- if (new_size_aligned == buf_aligned_len)
- return true;
+ const page_aligned_len = mem.alignForward(usize, memory.len, page_size);
+ if (new_size_aligned == page_aligned_len)
+ return memory.ptr;
- if (new_size_aligned < buf_aligned_len) {
- const ptr = buf_unaligned.ptr + new_size_aligned;
- // TODO: if the next_mmap_addr_hint is within the unmapped range, update it
- posix.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
- return true;
+ if (posix.MREMAP != void) {
+ // TODO: if the next_mmap_addr_hint is within the remapped range, update it
+ const new_memory = posix.mremap(memory.ptr, memory.len, new_len, .{ .MAYMOVE = may_move }, null) catch return null;
+ return new_memory.ptr;
}
- // TODO: call mremap
- // TODO: if the next_mmap_addr_hint is within the remapped range, update it
- return false;
-}
-
-fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
- _ = log2_buf_align;
- _ = return_address;
-
- if (native_os == .windows) {
- windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
- } else {
- const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
- posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
+ if (new_size_aligned < page_aligned_len) {
+ const ptr = memory.ptr + new_size_aligned;
+ // TODO: if the next_mmap_addr_hint is within the unmapped range, update it
+ posix.munmap(@alignCast(ptr[0 .. page_aligned_len - new_size_aligned]));
+ return memory.ptr;
}
+
+ return null;
}
diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig
index 12bb095b30..dc8bf89017 100644
--- a/lib/std/heap/ThreadSafeAllocator.zig
+++ b/lib/std/heap/ThreadSafeAllocator.zig
@@ -9,35 +9,45 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator {
.vtable = &.{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
},
};
}
-fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
+fn alloc(ctx: *anyopaque, n: usize, alignment: std.mem.Alignment, ra: usize) ?[*]u8 {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
- return self.child_allocator.rawAlloc(n, log2_ptr_align, ra);
+ return self.child_allocator.rawAlloc(n, alignment, ra);
}
-fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
+fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
- return self.child_allocator.rawResize(buf, log2_buf_align, new_len, ret_addr);
+ return self.child_allocator.rawResize(buf, alignment, new_len, ret_addr);
}
-fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
+fn remap(context: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, return_address: usize) ?[*]u8 {
+ const self: *ThreadSafeAllocator = @ptrCast(@alignCast(context));
+
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ return self.child_allocator.rawRemap(memory, alignment, new_len, return_address);
+}
+
+fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
- return self.child_allocator.rawFree(buf, log2_buf_align, ret_addr);
+ return self.child_allocator.rawFree(buf, alignment, ret_addr);
}
const std = @import("../std.zig");
diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig
index fea6ae5f52..e30ac5ab01 100644
--- a/lib/std/heap/WasmAllocator.zig
+++ b/lib/std/heap/WasmAllocator.zig
@@ -20,6 +20,7 @@ comptime {
pub const vtable: Allocator.VTable = .{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
};
@@ -40,18 +41,17 @@ const size_class_count = math.log2(bigpage_size) - min_class;
/// etc.
const big_size_class_count = math.log2(bigpage_count);
-var next_addrs = [1]usize{0} ** size_class_count;
+var next_addrs: [size_class_count]usize = @splat(0);
/// For each size class, points to the freed pointer.
-var frees = [1]usize{0} ** size_class_count;
+var frees: [size_class_count]usize = @splat(0);
/// For each big size class, points to the freed pointer.
-var big_frees = [1]usize{0} ** big_size_class_count;
+var big_frees: [big_size_class_count]usize = @splat(0);
-fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
+fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, return_address: usize) ?[*]u8 {
_ = ctx;
_ = return_address;
// Make room for the freelist next pointer.
- const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
- const actual_len = @max(len +| @sizeOf(usize), alignment);
+ const actual_len = @max(len +| @sizeOf(usize), alignment.toByteUnits());
const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
const class = math.log2(slot_size) - min_class;
if (class < size_class_count) {
@@ -86,7 +86,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
fn resize(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
new_len: usize,
return_address: usize,
) bool {
@@ -94,7 +94,7 @@ fn resize(
_ = return_address;
// We don't want to move anything from one size class to another, but we
// can recover bytes in between powers of two.
- const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
+ const buf_align = alignment.toByteUnits();
const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
@@ -111,15 +111,25 @@ fn resize(
}
}
+fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+) ?[*]u8 {
+ return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
+}
+
fn free(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: mem.Alignment,
return_address: usize,
) void {
_ = ctx;
_ = return_address;
- const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
+ const buf_align = alignment.toByteUnits();
const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
const class = math.log2(slot_size) - min_class;
@@ -160,7 +170,7 @@ fn allocBigPages(n: usize) usize {
return @as(usize, @intCast(page_index)) * wasm.page_size;
}
-const test_ally = Allocator{
+const test_ally: Allocator = .{
.ptr = undefined,
.vtable = &vtable,
};
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index 3cff6b439f..15d9044479 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -29,12 +29,14 @@ pub const ArenaAllocator = struct {
.vtable = &.{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
},
};
}
const BufNode = std.SinglyLinkedList(usize).Node;
+ const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
@@ -47,9 +49,8 @@ pub const ArenaAllocator = struct {
while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
- const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
- self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
+ self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
it = next_it;
}
}
@@ -120,7 +121,6 @@ pub const ArenaAllocator = struct {
return true;
}
const total_size = requested_capacity + @sizeOf(BufNode);
- const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
// Free all nodes except for the last one
var it = self.state.buffer_list.first;
const maybe_first_node = while (it) |node| {
@@ -129,7 +129,7 @@ pub const ArenaAllocator = struct {
if (next_it == null)
break node;
const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
- self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
+ self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
it = next_it;
} else null;
std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null);
@@ -141,16 +141,16 @@ pub const ArenaAllocator = struct {
if (first_node.data == total_size)
return true;
const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
- if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
+ if (self.child_allocator.rawResize(first_alloc_buf, BufNode_alignment, total_size, @returnAddress())) {
// successful resize
first_node.data = total_size;
} else {
// manual realloc
- const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse {
+ const new_ptr = self.child_allocator.rawAlloc(total_size, BufNode_alignment, @returnAddress()) orelse {
// we failed to preheat the arena properly, signal this to the user.
return false;
};
- self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
+ self.child_allocator.rawFree(first_alloc_buf, BufNode_alignment, @returnAddress());
const node: *BufNode = @ptrCast(@alignCast(new_ptr));
node.* = .{ .data = total_size };
self.state.buffer_list.first = node;
@@ -163,8 +163,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
- const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
+ const ptr = self.child_allocator.rawAlloc(len, BufNode_alignment, @returnAddress()) orelse
return null;
const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
buf_node.* = .{ .data = len };
@@ -173,11 +172,11 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
+ fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
_ = ra;
- const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
+ const ptr_align = alignment.toByteUnits();
var cur_node = if (self.state.buffer_list.first) |first_node|
first_node
else
@@ -197,8 +196,7 @@ pub const ArenaAllocator = struct {
}
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
- const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
- if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) {
+ if (self.child_allocator.rawResize(cur_alloc_buf, BufNode_alignment, bigger_buf_size, @returnAddress())) {
cur_node.data = bigger_buf_size;
} else {
// Allocate a new node if that's not possible
@@ -207,9 +205,9 @@ pub const ArenaAllocator = struct {
}
}
- fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
+ fn resize(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool {
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
- _ = log2_buf_align;
+ _ = alignment;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return false;
@@ -231,8 +229,18 @@ pub const ArenaAllocator = struct {
}
}
- fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
- _ = log2_buf_align;
+ fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ ) ?[*]u8 {
+ return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
+ }
+
+ fn free(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, ret_addr: usize) void {
+ _ = alignment;
_ = ret_addr;
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig
new file mode 100644
index 0000000000..8abf6133bf
--- /dev/null
+++ b/lib/std/heap/debug_allocator.zig
@@ -0,0 +1,1410 @@
+//! An allocator that is intended to be used in Debug mode.
+//!
+//! ## Features
+//!
+//! * Captures stack traces on allocation, free, and optionally resize.
+//! * Double free detection, which prints all three traces (first alloc, first
+//! free, second free).
+//! * Leak detection, with stack traces.
+//! * Never reuses memory addresses, making it easier for Zig to detect branch
+//! on undefined values in case of dangling pointers. This relies on
+//! the backing allocator to also not reuse addresses.
+//! * Uses a minimum backing allocation size to avoid operating system errors
+//! from having too many active memory mappings.
+//! * When a page of memory is no longer needed, give it back to resident
+//! memory as soon as possible, so that it causes page faults when used.
+//! * Cross platform. Operates based on a backing allocator which makes it work
+//! everywhere, even freestanding.
+//! * Compile-time configuration.
+//!
+//! These features require the allocator to be quite slow and wasteful. For
+//! example, when allocating a single byte, the efficiency is less than 1%;
+//! it requires more than 100 bytes of overhead to manage the allocation for
+//! one byte. The efficiency gets better with larger allocations.
+//!
+//! ## Basic Design
+//!
+//! Allocations are divided into two categories, small and large.
+//!
+//! Small allocations are divided into buckets based on `page_size`:
+//!
+//! ```
+//! index obj_size
+//! 0 1
+//! 1 2
+//! 2 4
+//! 3 8
+//! 4 16
+//! 5 32
+//! 6 64
+//! 7 128
+//! 8 256
+//! 9 512
+//! 10 1024
+//! 11 2048
+//! ...
+//! ```
+//!
+//! This goes on for `small_bucket_count` indexes.
+//!
+//! Allocations are grouped into an object size based on max(len, alignment),
+//! rounded up to the next power of two.
+//!
+//! The main allocator state has an array of all the "current" buckets for each
+//! size class. Each slot in the array can be null, meaning the bucket for that
+//! size class is not allocated. When the first object is allocated for a given
+//! size class, it makes one `page_size` allocation from the backing allocator.
+//! This allocation is divided into "slots" - one per allocated object, leaving
+//! room for the allocation metadata (starting with `BucketHeader`), which is
+//! located at the very end of the "page".
+//!
+//! The allocation metadata includes "used bits" - 1 bit per slot representing
+//! whether the slot is used. Allocations always take the next available slot
+//! from the current bucket, setting the corresponding used bit, as well as
+//! incrementing `allocated_count`.
+//!
+//! Frees recover the allocation metadata based on the address, length, and
+//! alignment, relying on the backing allocation's large alignment, combined
+//! with the fact that allocations are never moved from small to large, or vice
+//! versa.
+//!
+//! When a bucket is full, a new one is allocated, containing a pointer to the
+//! previous one. This singly-linked list is iterated during leak detection.
+//!
+//! Resizing and remapping work the same on small allocations: if the size
+//! class would not change, then the operation succeeds, and the address is
+//! unchanged. Otherwise, the request is rejected.
+//!
+//! Large objects are allocated directly using the backing allocator. Metadata
+//! is stored separately in a `std.HashMap` using the backing allocator.
+//!
+//! Resizing and remapping are forwarded directly to the backing allocator,
+//! except where such operations would change the category from large to small.
+
+const std = @import("std");
+const builtin = @import("builtin");
+const log = std.log.scoped(.gpa);
+const math = std.math;
+const assert = std.debug.assert;
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const StackTrace = std.builtin.StackTrace;
+
+const default_page_size: usize = @max(std.heap.page_size_max, switch (builtin.os.tag) {
+ .windows => 64 * 1024, // Makes `std.heap.PageAllocator` take the happy path.
+ .wasi => 64 * 1024, // Max alignment supported by `std.heap.WasmAllocator`.
+ else => 128 * 1024, // Avoids too many active mappings when `page_size_max` is low.
+});
+
+const Log2USize = std.math.Log2Int(usize);
+
+const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) 6 else 0;
+const default_stack_trace_frames: usize = switch (builtin.mode) {
+ .Debug => default_sys_stack_trace_frames,
+ else => 0,
+};
+
+pub const Config = struct {
+ /// Number of stack frames to capture.
+ stack_trace_frames: usize = default_stack_trace_frames,
+
+ /// If true, the allocator will have two fields:
+ /// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
+ /// * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
+ /// when the `total_requested_bytes` exceeds this limit.
+ /// If false, these fields will be `void`.
+ enable_memory_limit: bool = false,
+
+ /// Whether to enable safety checks.
+ safety: bool = std.debug.runtime_safety,
+
+ /// Whether the allocator may be used simultaneously from multiple threads.
+ thread_safe: bool = !builtin.single_threaded,
+
+ /// What type of mutex you'd like to use, for thread safety.
+ /// when specified, the mutex type must have the same shape as `std.Thread.Mutex` and
+ /// `DummyMutex`, and have no required fields. Specifying this field causes
+ /// the `thread_safe` field to be ignored.
+ ///
+ /// when null (default):
+ /// * the mutex type defaults to `std.Thread.Mutex` when thread_safe is enabled.
+ /// * the mutex type defaults to `DummyMutex` otherwise.
+ MutexType: ?type = null,
+
+ /// This is a temporary debugging trick you can use to turn segfaults into more helpful
+ /// logged error messages with stack trace details. The downside is that every allocation
+ /// will be leaked, unless used with retain_metadata!
+ never_unmap: bool = false,
+
+ /// This is a temporary debugging aid that retains metadata about allocations indefinitely.
+ /// This allows a greater range of double frees to be reported. All metadata is freed when
+ /// deinit is called. When used with never_unmap, deliberately leaked memory is also freed
+ /// during deinit. Currently should be used with never_unmap to avoid segfaults.
+ /// TODO https://github.com/ziglang/zig/issues/4298 will allow use without never_unmap
+ retain_metadata: bool = false,
+
+ /// Enables emitting info messages with the size and address of every allocation.
+ verbose_log: bool = false,
+
+ /// Tell whether the backing allocator returns already-zeroed memory.
+ backing_allocator_zeroes: bool = true,
+
+ /// When resizing an allocation, refresh the stack trace with the resize
+ /// callsite. Comes with a performance penalty.
+ resize_stack_traces: bool = false,
+
+ /// Magic value that distinguishes allocations owned by this allocator from
+ /// other regions of memory.
+ canary: usize = @truncate(0x9232a6ff85dff10f),
+
+ /// The size of allocations requested from the backing allocator for
+ /// subdividing into slots for small allocations.
+ ///
+ /// Must be a power of two.
+ page_size: usize = default_page_size,
+};
+
+/// Default initialization of this struct is deprecated; use `.init` instead.
+pub fn DebugAllocator(comptime config: Config) type {
+ return struct {
+ backing_allocator: Allocator = std.heap.page_allocator,
+ /// Tracks the active bucket, which is the one that has free slots in it.
+ buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
+ large_allocations: LargeAllocTable = .empty,
+ total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
+ requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
+ mutex: @TypeOf(mutex_init) = mutex_init,
+
+ const Self = @This();
+
+ pub const init: Self = .{};
+
+ /// These can be derived from size_class_index but the calculation is nontrivial.
+ const slot_counts: [small_bucket_count]SlotIndex = init: {
+ @setEvalBranchQuota(10000);
+ var result: [small_bucket_count]SlotIndex = undefined;
+ for (&result, 0..) |*elem, i| elem.* = calculateSlotCount(i);
+ break :init result;
+ };
+
+ comptime {
+ assert(math.isPowerOfTwo(page_size));
+ }
+
+ const page_size = config.page_size;
+ const page_align: mem.Alignment = .fromByteUnits(page_size);
+ /// Integer type for pointing to slots in a small allocation
+ const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
+
+ const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
+ const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
+
+ const mutex_init = if (config.MutexType) |T|
+ T{}
+ else if (config.thread_safe)
+ std.Thread.Mutex{}
+ else
+ DummyMutex{};
+
+ const DummyMutex = struct {
+ inline fn lock(_: *DummyMutex) void {}
+ inline fn unlock(_: *DummyMutex) void {}
+ };
+
+ const stack_n = config.stack_trace_frames;
+ const one_trace_size = @sizeOf(usize) * stack_n;
+ const traces_per_slot = 2;
+
+ pub const Error = mem.Allocator.Error;
+
+ /// Avoids creating buckets that would only be able to store a small
+ /// number of slots. Value of 1 means 2 is the minimum slot count.
+ const minimum_slots_per_bucket_log2 = 1;
+ const small_bucket_count = math.log2(page_size) - minimum_slots_per_bucket_log2;
+ const largest_bucket_object_size = 1 << (small_bucket_count - 1);
+ const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
+
+ const bucketCompare = struct {
+ fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
+ return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
+ }
+ }.compare;
+
+ const LargeAlloc = struct {
+ bytes: []u8,
+ requested_size: if (config.enable_memory_limit) usize else void,
+ stack_addresses: [trace_n][stack_n]usize,
+ freed: if (config.retain_metadata) bool else void,
+ alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,
+
+ const trace_n = if (config.retain_metadata) traces_per_slot else 1;
+
+ fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
+ std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
+ }
+
+ fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
+ assert(@intFromEnum(trace_kind) < trace_n);
+ const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
+ var len: usize = 0;
+ while (len < stack_n and stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return .{
+ .instruction_addresses = stack_addresses,
+ .index = len,
+ };
+ }
+
+ fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
+ assert(@intFromEnum(trace_kind) < trace_n);
+ const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
+ collectStackTrace(ret_addr, stack_addresses);
+ }
+ };
+ const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
+
+ /// Bucket: In memory, in order:
+ /// * BucketHeader
+ /// * bucket_used_bits: [N]usize, // 1 bit for every slot
+ /// -- below only exists when config.safety is true --
+ /// * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
+ /// * log2_ptr_aligns: [N]u8 // 1 byte for every slot
+ /// -- above only exists when config.safety is true --
+ /// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
+ const BucketHeader = struct {
+ allocated_count: SlotIndex,
+ freed_count: SlotIndex,
+ prev: ?*BucketHeader,
+ canary: usize = config.canary,
+
+ fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader {
+ const unaligned = page_addr + page_size - bucketSize(slot_count);
+ return @ptrFromInt(unaligned & ~(@as(usize, @alignOf(BucketHeader)) - 1));
+ }
+
+ fn usedBits(bucket: *BucketHeader, index: usize) *usize {
+ const ptr: [*]u8 = @ptrCast(bucket);
+ const bits: [*]usize = @alignCast(@ptrCast(ptr + @sizeOf(BucketHeader)));
+ return &bits[index];
+ }
+
+ fn requestedSizes(bucket: *BucketHeader, slot_count: usize) []LargestSizeClassInt {
+ if (!config.safety) @compileError("requested size is only stored when safety is enabled");
+ const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(slot_count);
+ const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
+ return sizes[0..slot_count];
+ }
+
+ fn log2PtrAligns(bucket: *BucketHeader, slot_count: usize) []mem.Alignment {
+ if (!config.safety) @compileError("requested size is only stored when safety is enabled");
+ const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(slot_count);
+ return @ptrCast(aligns_ptr[0..slot_count]);
+ }
+
+ fn stackTracePtr(
+ bucket: *BucketHeader,
+ slot_count: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) *[stack_n]usize {
+ const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(slot_count);
+ const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
+ @intFromEnum(trace_kind) * @as(usize, one_trace_size);
+ return @ptrCast(@alignCast(addr));
+ }
+
+ fn captureStackTrace(
+ bucket: *BucketHeader,
+ ret_addr: usize,
+ slot_count: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) void {
+ // Initialize them to 0. When determining the count we must look
+ // for non zero addresses.
+ const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
+ collectStackTrace(ret_addr, stack_addresses);
+ }
+ };
+
+ pub fn allocator(self: *Self) Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .remap = remap,
+ .free = free,
+ },
+ };
+ }
+
+ fn bucketStackTrace(
+ bucket: *BucketHeader,
+ slot_count: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) StackTrace {
+ const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
+ var len: usize = 0;
+ while (len < stack_n and stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return .{
+ .instruction_addresses = stack_addresses,
+ .index = len,
+ };
+ }
+
+ fn bucketRequestedSizesStart(slot_count: usize) usize {
+ if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
+ return mem.alignForward(
+ usize,
+ @sizeOf(BucketHeader) + usedBitsSize(slot_count),
+ @alignOf(LargestSizeClassInt),
+ );
+ }
+
+ fn bucketAlignsStart(slot_count: usize) usize {
+ if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
+ return bucketRequestedSizesStart(slot_count) + (@sizeOf(LargestSizeClassInt) * slot_count);
+ }
+
+ fn bucketStackFramesStart(slot_count: usize) usize {
+ const unaligned_start = if (config.safety)
+ bucketAlignsStart(slot_count) + slot_count
+ else
+ @sizeOf(BucketHeader) + usedBitsSize(slot_count);
+ return mem.alignForward(usize, unaligned_start, @alignOf(usize));
+ }
+
+ fn bucketSize(slot_count: usize) usize {
+ return bucketStackFramesStart(slot_count) + one_trace_size * traces_per_slot * slot_count;
+ }
+
+ /// This is executed only at compile-time to prepopulate a lookup table.
+ fn calculateSlotCount(size_class_index: usize) SlotIndex {
+ const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
+ var lower: usize = 1 << minimum_slots_per_bucket_log2;
+ var upper: usize = (page_size - bucketSize(lower)) / size_class;
+ while (upper > lower) {
+ const proposed: usize = lower + (upper - lower) / 2;
+ if (proposed == lower) return lower;
+ const slots_end = proposed * size_class;
+ const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
+ const end = header_begin + bucketSize(proposed);
+ if (end > page_size) {
+ upper = proposed - 1;
+ } else {
+ lower = proposed;
+ }
+ }
+ const slots_end = lower * size_class;
+ const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
+ const end = header_begin + bucketSize(lower);
+ assert(end <= page_size);
+ return lower;
+ }
+
+ fn usedBitsCount(slot_count: usize) usize {
+ return (slot_count + (@bitSizeOf(usize) - 1)) / @bitSizeOf(usize);
+ }
+
+ fn usedBitsSize(slot_count: usize) usize {
+ return usedBitsCount(slot_count) * @sizeOf(usize);
+ }
+
+ fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
+ const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
+ const slot_count = slot_counts[size_class_index];
+ var leaks = false;
+ for (0..used_bits_count) |used_bits_byte| {
+ const used_int = bucket.usedBits(used_bits_byte).*;
+ if (used_int != 0) {
+ for (0..@bitSizeOf(usize)) |bit_index_usize| {
+ const bit_index: Log2USize = @intCast(bit_index_usize);
+ const is_used = @as(u1, @truncate(used_int >> bit_index)) != 0;
+ if (is_used) {
+ const slot_index: SlotIndex = @intCast(used_bits_byte * @bitSizeOf(usize) + bit_index);
+ const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
+ const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
+ const addr = page_addr + slot_index * size_class;
+ log.err("memory address 0x{x} leaked: {}", .{ addr, stack_trace });
+ leaks = true;
+ }
+ }
+ }
+ }
+ return leaks;
+ }
+
+ /// Emits log messages for leaks and then returns whether there were any leaks.
+ pub fn detectLeaks(self: *Self) bool {
+ var leaks = false;
+
+ for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
+ var optional_bucket = init_optional_bucket;
+ const slot_count = slot_counts[size_class_index];
+ const used_bits_count = usedBitsCount(slot_count);
+ while (optional_bucket) |bucket| {
+ leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
+ optional_bucket = bucket.prev;
+ }
+ }
+
+ var it = self.large_allocations.valueIterator();
+ while (it.next()) |large_alloc| {
+ if (config.retain_metadata and large_alloc.freed) continue;
+ const stack_trace = large_alloc.getStackTrace(.alloc);
+ log.err("memory address 0x{x} leaked: {}", .{
+ @intFromPtr(large_alloc.bytes.ptr), stack_trace,
+ });
+ leaks = true;
+ }
+ return leaks;
+ }
+
+ fn freeRetainedMetadata(self: *Self) void {
+ comptime assert(config.retain_metadata);
+ if (config.never_unmap) {
+ // free large allocations that were intentionally leaked by never_unmap
+ var it = self.large_allocations.iterator();
+ while (it.next()) |large| {
+ if (large.value_ptr.freed) {
+ self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
+ }
+ }
+ }
+ }
+
+ pub fn flushRetainedMetadata(self: *Self) void {
+ comptime assert(config.retain_metadata);
+ self.freeRetainedMetadata();
+ // also remove entries from large_allocations
+ var it = self.large_allocations.iterator();
+ while (it.next()) |large| {
+ if (large.value_ptr.freed) {
+ _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
+ }
+ }
+ }
+
+ /// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
+ pub fn deinit(self: *Self) std.heap.Check {
+ const leaks = if (config.safety) self.detectLeaks() else false;
+ if (config.retain_metadata) self.freeRetainedMetadata();
+ self.large_allocations.deinit(self.backing_allocator);
+ self.* = undefined;
+ return if (leaks) .leak else .ok;
+ }
+
+ fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
+ if (stack_n == 0) return;
+ @memset(addresses, 0);
+ var stack_trace: StackTrace = .{
+ .instruction_addresses = addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(first_trace_addr, &stack_trace);
+ }
+
+ fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
+ var addresses: [stack_n]usize = @splat(0);
+ var second_free_stack_trace: StackTrace = .{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
+ log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
+ alloc_stack_trace, free_stack_trace, second_free_stack_trace,
+ });
+ }
+
+ /// This function assumes the object is in the large object storage regardless
+ /// of the parameters.
+ fn resizeLarge(
+ self: *Self,
+ old_mem: []u8,
+ alignment: mem.Alignment,
+ new_size: usize,
+ ret_addr: usize,
+ may_move: bool,
+ ) ?[*]u8 {
+ if (config.retain_metadata and may_move) {
+ // Before looking up the entry (since this could invalidate
+ // it), we must reserve space for the new entry in case the
+ // allocation is relocated.
+ self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
+ }
+
+ const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
+ if (config.safety) {
+ @panic("Invalid free");
+ } else {
+ unreachable;
+ }
+ };
+
+ if (config.retain_metadata and entry.value_ptr.freed) {
+ if (config.safety) {
+ reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
+ @panic("Unrecoverable double free");
+ } else {
+ unreachable;
+ }
+ }
+
+ if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace: StackTrace = .{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &free_stack_trace);
+ log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
+ entry.value_ptr.bytes.len,
+ old_mem.len,
+ entry.value_ptr.getStackTrace(.alloc),
+ free_stack_trace,
+ });
+ }
+
+ // If this would move the allocation into a small size class,
+ // refuse the request, because it would require creating small
+ // allocation metadata.
+ const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_size - 1), @intFromEnum(alignment));
+ if (new_size_class_index < self.buckets.len) return null;
+
+ // Do memory limit accounting with requested sizes rather than what
+ // backing_allocator returns because if we want to return
+ // error.OutOfMemory, we have to leave allocation untouched, and
+ // that is impossible to guarantee after calling
+ // backing_allocator.rawResize.
+ const prev_req_bytes = self.total_requested_bytes;
+ if (config.enable_memory_limit) {
+ const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
+ if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
+ return null;
+ }
+ self.total_requested_bytes = new_req_bytes;
+ }
+
+ const opt_resized_ptr = if (may_move)
+ self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
+ else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
+ old_mem.ptr
+ else
+ null;
+
+ const resized_ptr = opt_resized_ptr orelse {
+ if (config.enable_memory_limit) {
+ self.total_requested_bytes = prev_req_bytes;
+ }
+ return null;
+ };
+
+ if (config.enable_memory_limit) {
+ entry.value_ptr.requested_size = new_size;
+ }
+
+ if (config.verbose_log) {
+ log.info("large resize {d} bytes at {*} to {d} at {*}", .{
+ old_mem.len, old_mem.ptr, new_size, resized_ptr,
+ });
+ }
+ entry.value_ptr.bytes = resized_ptr[0..new_size];
+ if (config.resize_stack_traces)
+ entry.value_ptr.captureStackTrace(ret_addr, .alloc);
+
+ // Update the key of the hash map if the memory was relocated.
+ if (resized_ptr != old_mem.ptr) {
+ const large_alloc = entry.value_ptr.*;
+ if (config.retain_metadata) {
+ entry.value_ptr.freed = true;
+ entry.value_ptr.captureStackTrace(ret_addr, .free);
+ } else {
+ self.large_allocations.removeByPtr(entry.key_ptr);
+ }
+
+ const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(resized_ptr));
+ if (config.retain_metadata and !config.never_unmap) {
+ // Backing allocator may be reusing memory that we're retaining metadata for
+ assert(!gop.found_existing or gop.value_ptr.freed);
+ } else {
+ assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
+ }
+ gop.value_ptr.* = large_alloc;
+ }
+
+ return resized_ptr;
+ }
+
+ /// This function assumes the object is in the large object storage regardless
+ /// of the parameters.
+ fn freeLarge(
+ self: *Self,
+ old_mem: []u8,
+ alignment: mem.Alignment,
+ ret_addr: usize,
+ ) void {
+ const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
+ if (config.safety) {
+ @panic("Invalid free");
+ } else {
+ unreachable;
+ }
+ };
+
+ if (config.retain_metadata and entry.value_ptr.freed) {
+ if (config.safety) {
+ reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
+ return;
+ } else {
+ unreachable;
+ }
+ }
+
+ if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace = StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &free_stack_trace);
+ log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
+ entry.value_ptr.bytes.len,
+ old_mem.len,
+ entry.value_ptr.getStackTrace(.alloc),
+ free_stack_trace,
+ });
+ }
+
+ if (!config.never_unmap) {
+ self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
+ }
+
+ if (config.enable_memory_limit) {
+ self.total_requested_bytes -= entry.value_ptr.requested_size;
+ }
+
+ if (config.verbose_log) {
+ log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
+ }
+
+ if (!config.retain_metadata) {
+ assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
+ } else {
+ entry.value_ptr.freed = true;
+ entry.value_ptr.captureStackTrace(ret_addr, .free);
+ }
+ }
+
+ fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
+ const self: *Self = @ptrCast(@alignCast(context));
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ if (config.enable_memory_limit) {
+ const new_req_bytes = self.total_requested_bytes + len;
+ if (new_req_bytes > self.requested_memory_limit) return null;
+ self.total_requested_bytes = new_req_bytes;
+ }
+
+ const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(len - 1), @intFromEnum(alignment));
+ if (size_class_index >= self.buckets.len) {
+ @branchHint(.unlikely);
+ self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
+ const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse return null;
+ const slice = ptr[0..len];
+
+ const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
+ if (config.retain_metadata and !config.never_unmap) {
+ // Backing allocator may be reusing memory that we're retaining metadata for
+ assert(!gop.found_existing or gop.value_ptr.freed);
+ } else {
+ assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
+ }
+ gop.value_ptr.bytes = slice;
+ if (config.enable_memory_limit)
+ gop.value_ptr.requested_size = len;
+ gop.value_ptr.captureStackTrace(ret_addr, .alloc);
+ if (config.retain_metadata) {
+ gop.value_ptr.freed = false;
+ if (config.never_unmap) {
+ gop.value_ptr.alignment = alignment;
+ }
+ }
+
+ if (config.verbose_log) {
+ log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
+ }
+ return slice.ptr;
+ }
+
+ const slot_count = slot_counts[size_class_index];
+
+ if (self.buckets[size_class_index]) |bucket| {
+ @branchHint(.likely);
+ const slot_index = bucket.allocated_count;
+ if (slot_index < slot_count) {
+ @branchHint(.likely);
+ bucket.allocated_count = slot_index + 1;
+ const used_bits_byte = bucket.usedBits(slot_index / @bitSizeOf(usize));
+ const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
+ used_bits_byte.* |= (@as(usize, 1) << used_bit_index);
+ const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
+ if (config.stack_trace_frames > 0) {
+ bucket.captureStackTrace(ret_addr, slot_count, slot_index, .alloc);
+ }
+ if (config.safety) {
+ bucket.requestedSizes(slot_count)[slot_index] = @intCast(len);
+ bucket.log2PtrAligns(slot_count)[slot_index] = alignment;
+ }
+ const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
+ const addr = page_addr + slot_index * size_class;
+ if (config.verbose_log) {
+ log.info("small alloc {d} bytes at 0x{x}", .{ len, addr });
+ }
+ return @ptrFromInt(addr);
+ }
+ }
+
+ const page = self.backing_allocator.rawAlloc(page_size, page_align, @returnAddress()) orelse
+ return null;
+ const bucket: *BucketHeader = .fromPage(@intFromPtr(page), slot_count);
+ bucket.* = .{
+ .allocated_count = 1,
+ .freed_count = 0,
+ .prev = self.buckets[size_class_index],
+ };
+ self.buckets[size_class_index] = bucket;
+
+ if (!config.backing_allocator_zeroes) {
+ @memset(@as([*]usize, @as(*[1]usize, bucket.usedBits(0)))[0..usedBitsCount(slot_count)], 0);
+ if (config.safety) @memset(bucket.requestedSizes(slot_count), 0);
+ }
+
+ bucket.usedBits(0).* = 0b1;
+
+ if (config.stack_trace_frames > 0) {
+ bucket.captureStackTrace(ret_addr, slot_count, 0, .alloc);
+ }
+
+ if (config.safety) {
+ bucket.requestedSizes(slot_count)[0] = @intCast(len);
+ bucket.log2PtrAligns(slot_count)[0] = alignment;
+ }
+
+ if (config.verbose_log) {
+ log.info("small alloc {d} bytes at 0x{x}", .{ len, @intFromPtr(page) });
+ }
+
+ return page;
+ }
+
+ fn resize(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ ) bool {
+ const self: *Self = @ptrCast(@alignCast(context));
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
+ if (size_class_index >= self.buckets.len) {
+ return self.resizeLarge(memory, alignment, new_len, return_address, false) != null;
+ } else {
+ return resizeSmall(self, memory, alignment, new_len, return_address, size_class_index);
+ }
+ }
+
+ fn remap(
+ context: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ ) ?[*]u8 {
+ const self: *Self = @ptrCast(@alignCast(context));
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
+ if (size_class_index >= self.buckets.len) {
+ return self.resizeLarge(memory, alignment, new_len, return_address, true);
+ } else {
+ return if (resizeSmall(self, memory, alignment, new_len, return_address, size_class_index)) memory.ptr else null;
+ }
+ }
+
+ fn free(
+ context: *anyopaque,
+ old_memory: []u8,
+ alignment: mem.Alignment,
+ return_address: usize,
+ ) void {
+ const self: *Self = @ptrCast(@alignCast(context));
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ assert(old_memory.len != 0);
+
+ const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(old_memory.len - 1), @intFromEnum(alignment));
+ if (size_class_index >= self.buckets.len) {
+ @branchHint(.unlikely);
+ self.freeLarge(old_memory, alignment, return_address);
+ return;
+ }
+
+ const slot_count = slot_counts[size_class_index];
+ const freed_addr = @intFromPtr(old_memory.ptr);
+ const page_addr = freed_addr & ~(page_size - 1);
+ const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
+ if (bucket.canary != config.canary) @panic("Invalid free");
+ const page_offset = freed_addr - page_addr;
+ const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
+ const slot_index: SlotIndex = @intCast(page_offset / size_class);
+ const used_byte_index = slot_index / @bitSizeOf(usize);
+ const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
+ const used_byte = bucket.usedBits(used_byte_index);
+ const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
+ if (!is_used) {
+ if (config.safety) {
+ reportDoubleFree(
+ return_address,
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ bucketStackTrace(bucket, slot_count, slot_index, .free),
+ );
+ // Recoverable since this is a free.
+ return;
+ } else {
+ unreachable;
+ }
+ }
+
+ // Definitely an in-use small alloc now.
+ if (config.safety) {
+ const requested_size = bucket.requestedSizes(slot_count)[slot_index];
+ if (requested_size == 0) @panic("Invalid free");
+ const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
+ if (old_memory.len != requested_size or alignment != slot_alignment) {
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace: StackTrace = .{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(return_address, &free_stack_trace);
+ if (old_memory.len != requested_size) {
+ log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
+ requested_size,
+ old_memory.len,
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ free_stack_trace,
+ });
+ }
+ if (alignment != slot_alignment) {
+ log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
+ slot_alignment.toByteUnits(),
+ alignment.toByteUnits(),
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ free_stack_trace,
+ });
+ }
+ }
+ }
+
+ if (config.enable_memory_limit) {
+ self.total_requested_bytes -= old_memory.len;
+ }
+
+ if (config.stack_trace_frames > 0) {
+ // Capture stack trace to be the "first free", in case a double free happens.
+ bucket.captureStackTrace(return_address, slot_count, slot_index, .free);
+ }
+
+ used_byte.* &= ~(@as(usize, 1) << used_bit_index);
+ if (config.safety) {
+ bucket.requestedSizes(slot_count)[slot_index] = 0;
+ }
+ bucket.freed_count += 1;
+ if (bucket.freed_count == bucket.allocated_count) {
+ if (self.buckets[size_class_index] == bucket) {
+ self.buckets[size_class_index] = null;
+ }
+ if (!config.never_unmap) {
+ const page: [*]align(page_size) u8 = @ptrFromInt(page_addr);
+ self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress());
+ }
+ }
+ if (config.verbose_log) {
+ log.info("small free {d} bytes at {*}", .{ old_memory.len, old_memory.ptr });
+ }
+ }
+
+ fn resizeSmall(
+ self: *Self,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ return_address: usize,
+ size_class_index: usize,
+ ) bool {
+ const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_len - 1), @intFromEnum(alignment));
+ if (!config.safety) return new_size_class_index == size_class_index;
+ const slot_count = slot_counts[size_class_index];
+ const memory_addr = @intFromPtr(memory.ptr);
+ const page_addr = memory_addr & ~(page_size - 1);
+ const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
+ if (bucket.canary != config.canary) @panic("Invalid free");
+ const page_offset = memory_addr - page_addr;
+ const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
+ const slot_index: SlotIndex = @intCast(page_offset / size_class);
+ const used_byte_index = slot_index / @bitSizeOf(usize);
+ const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
+ const used_byte = bucket.usedBits(used_byte_index);
+ const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
+ if (!is_used) {
+ reportDoubleFree(
+ return_address,
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ bucketStackTrace(bucket, slot_count, slot_index, .free),
+ );
+ // Recoverable since this is a free.
+ return false;
+ }
+
+ // Definitely an in-use small alloc now.
+ const requested_size = bucket.requestedSizes(slot_count)[slot_index];
+ if (requested_size == 0) @panic("Invalid free");
+ const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
+ if (memory.len != requested_size or alignment != slot_alignment) {
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace: StackTrace = .{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(return_address, &free_stack_trace);
+ if (memory.len != requested_size) {
+ log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
+ requested_size,
+ memory.len,
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ free_stack_trace,
+ });
+ }
+ if (alignment != slot_alignment) {
+ log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
+ slot_alignment.toByteUnits(),
+ alignment.toByteUnits(),
+ bucketStackTrace(bucket, slot_count, slot_index, .alloc),
+ free_stack_trace,
+ });
+ }
+ }
+
+ if (new_size_class_index != size_class_index) return false;
+
+ const prev_req_bytes = self.total_requested_bytes;
+ if (config.enable_memory_limit) {
+ const new_req_bytes = prev_req_bytes - memory.len + new_len;
+ if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
+ return false;
+ }
+ self.total_requested_bytes = new_req_bytes;
+ }
+
+ if (memory.len > new_len) @memset(memory[new_len..], undefined);
+ if (config.verbose_log)
+ log.info("small resize {d} bytes at {*} to {d}", .{ memory.len, memory.ptr, new_len });
+
+ if (config.safety)
+ bucket.requestedSizes(slot_count)[slot_index] = @intCast(new_len);
+
+ if (config.resize_stack_traces)
+ bucket.captureStackTrace(return_address, slot_count, slot_index, .alloc);
+
+ return true;
+ }
+ };
+}
+
+const TraceKind = enum {
+ alloc,
+ free,
+};
+
+const test_config = Config{};
+
+test "small allocations - free in same order" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var list = std.ArrayList(*u64).init(std.testing.allocator);
+ defer list.deinit();
+
+ var i: usize = 0;
+ while (i < 513) : (i += 1) {
+ const ptr = try allocator.create(u64);
+ try list.append(ptr);
+ }
+
+ for (list.items) |ptr| {
+ allocator.destroy(ptr);
+ }
+}
+
+test "small allocations - free in reverse order" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var list = std.ArrayList(*u64).init(std.testing.allocator);
+ defer list.deinit();
+
+ var i: usize = 0;
+ while (i < 513) : (i += 1) {
+ const ptr = try allocator.create(u64);
+ try list.append(ptr);
+ }
+
+ while (list.popOrNull()) |ptr| {
+ allocator.destroy(ptr);
+ }
+}
+
+test "large allocations" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ const ptr1 = try allocator.alloc(u64, 42768);
+ const ptr2 = try allocator.alloc(u64, 52768);
+ allocator.free(ptr1);
+ const ptr3 = try allocator.alloc(u64, 62768);
+ allocator.free(ptr3);
+ allocator.free(ptr2);
+}
+
+test "very large allocation" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ try std.testing.expectError(error.OutOfMemory, allocator.alloc(u8, math.maxInt(usize)));
+}
+
+test "realloc" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+
+ // This reallocation should keep its pointer address.
+ const old_slice = slice;
+ slice = try allocator.realloc(slice, 2);
+ try std.testing.expect(old_slice.ptr == slice.ptr);
+ try std.testing.expect(slice[0] == 0x12);
+ slice[1] = 0x34;
+
+ // This requires upgrading to a larger size class
+ slice = try allocator.realloc(slice, 17);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[1] == 0x34);
+}
+
+test "shrink" {
+ var gpa: DebugAllocator(test_config) = .{};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alloc(u8, 20);
+ defer allocator.free(slice);
+
+ @memset(slice, 0x11);
+
+ try std.testing.expect(allocator.resize(slice, 17));
+ slice = slice[0..17];
+
+ for (slice) |b| {
+ try std.testing.expect(b == 0x11);
+ }
+
+ // Does not cross size class boundaries when shrinking.
+ try std.testing.expect(!allocator.resize(slice, 16));
+}
+
+test "large object - grow" {
+ if (builtin.target.isWasm()) {
+ // Not expected to pass on targets that do not have memory mapping.
+ return error.SkipZigTest;
+ }
+ var gpa: DebugAllocator(test_config) = .{};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice1 = try allocator.alloc(u8, default_page_size * 2 - 20);
+ defer allocator.free(slice1);
+
+ const old = slice1;
+ slice1 = try allocator.realloc(slice1, default_page_size * 2 - 10);
+ try std.testing.expect(slice1.ptr == old.ptr);
+
+ slice1 = try allocator.realloc(slice1, default_page_size * 2);
+ try std.testing.expect(slice1.ptr == old.ptr);
+
+ slice1 = try allocator.realloc(slice1, default_page_size * 2 + 1);
+}
+
+test "realloc small object to large object" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alloc(u8, 70);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ // This requires upgrading to a large object
+ const large_object_size = default_page_size * 2 + 50;
+ slice = try allocator.realloc(slice, large_object_size);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[60] == 0x34);
+}
+
+test "shrink large object to large object" {
+ var gpa: DebugAllocator(test_config) = .{};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ if (!allocator.resize(slice, default_page_size * 2 + 1)) return;
+ slice = slice.ptr[0 .. default_page_size * 2 + 1];
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[60] == 0x34);
+
+ try std.testing.expect(allocator.resize(slice, default_page_size * 2 + 1));
+ slice = slice[0 .. default_page_size * 2 + 1];
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[60] == 0x34);
+
+ slice = try allocator.realloc(slice, default_page_size * 2);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[60] == 0x34);
+}
+
+test "shrink large object to large object with larger alignment" {
+ if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
+
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var debug_buffer: [1000]u8 = undefined;
+ var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
+ const debug_allocator = fba.allocator();
+
+ const alloc_size = default_page_size * 2 + 50;
+ var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ defer allocator.free(slice);
+
+ const big_alignment: usize = default_page_size * 2;
+ // This loop allocates until we find a page that is not aligned to the big
+ // alignment. Then we shrink the allocation after the loop, but increase the
+ // alignment to the higher one, that we know will force it to realloc.
+ var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
+ try stuff_to_free.append(slice);
+ slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ }
+ while (stuff_to_free.popOrNull()) |item| {
+ allocator.free(item);
+ }
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[60] == 0x34);
+}
+
+test "realloc large object to small object" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[16] = 0x34;
+
+ slice = try allocator.realloc(slice, 19);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[16] == 0x34);
+}
+
+test "overridable mutexes" {
+ var gpa = DebugAllocator(.{ .MutexType = std.Thread.Mutex }){
+ .backing_allocator = std.testing.allocator,
+ .mutex = std.Thread.Mutex{},
+ };
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ const ptr = try allocator.create(i32);
+ defer allocator.destroy(ptr);
+}
+
+test "non-page-allocator backing allocator" {
+ var gpa: DebugAllocator(.{
+ .backing_allocator_zeroes = false,
+ }) = .{
+ .backing_allocator = std.testing.allocator,
+ };
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ const ptr = try allocator.create(i32);
+ defer allocator.destroy(ptr);
+}
+
+test "realloc large object to larger alignment" {
+ if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
+
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var debug_buffer: [1000]u8 = undefined;
+ var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
+ const debug_allocator = fba.allocator();
+
+ var slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
+ defer allocator.free(slice);
+
+ const big_alignment: usize = default_page_size * 2;
+ // This loop allocates until we find a page that is not aligned to the big alignment.
+ var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
+ try stuff_to_free.append(slice);
+ slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
+ }
+ while (stuff_to_free.popOrNull()) |item| {
+ allocator.free(item);
+ }
+ slice[0] = 0x12;
+ slice[16] = 0x34;
+
+ slice = try allocator.reallocAdvanced(slice, 32, default_page_size * 2 + 100);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[16] == 0x34);
+
+ slice = try allocator.reallocAdvanced(slice, 32, default_page_size * 2 + 25);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[16] == 0x34);
+
+ slice = try allocator.reallocAdvanced(slice, big_alignment, default_page_size * 2 + 100);
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[16] == 0x34);
+}
+
+test "large object rejects shrinking to small" {
+ if (builtin.target.isWasm()) {
+ // Not expected to pass on targets that do not have memory mapping.
+ return error.SkipZigTest;
+ }
+
+ var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, .{ .fail_index = 3 });
+ var gpa: DebugAllocator(.{}) = .{
+ .backing_allocator = failing_allocator.allocator(),
+ };
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[3] = 0x34;
+
+ try std.testing.expect(!allocator.resize(slice, 4));
+ try std.testing.expect(slice[0] == 0x12);
+ try std.testing.expect(slice[3] == 0x34);
+}
+
+test "objects of size 1024 and 2048" {
+ var gpa = DebugAllocator(test_config){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ const slice = try allocator.alloc(u8, 1025);
+ const slice2 = try allocator.alloc(u8, 3000);
+
+ allocator.free(slice);
+ allocator.free(slice2);
+}
+
+test "setting a memory cap" {
+ var gpa = DebugAllocator(.{ .enable_memory_limit = true }){};
+ defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
+ const allocator = gpa.allocator();
+
+ gpa.requested_memory_limit = 1010;
+
+ const small = try allocator.create(i32);
+ try std.testing.expect(gpa.total_requested_bytes == 4);
+
+ const big = try allocator.alloc(u8, 1000);
+ try std.testing.expect(gpa.total_requested_bytes == 1004);
+
+ try std.testing.expectError(error.OutOfMemory, allocator.create(u64));
+
+ allocator.destroy(small);
+ try std.testing.expect(gpa.total_requested_bytes == 1000);
+
+ allocator.free(big);
+ try std.testing.expect(gpa.total_requested_bytes == 0);
+
+ const exact = try allocator.alloc(u8, 1010);
+ try std.testing.expect(gpa.total_requested_bytes == 1010);
+ allocator.free(exact);
+}
+
+test "large allocations count requested size not backing size" {
+ var gpa: DebugAllocator(.{ .enable_memory_limit = true }) = .{};
+ const allocator = gpa.allocator();
+
+ var buf = try allocator.alignedAlloc(u8, 1, default_page_size + 1);
+ try std.testing.expectEqual(default_page_size + 1, gpa.total_requested_bytes);
+ buf = try allocator.realloc(buf, 1);
+ try std.testing.expectEqual(1, gpa.total_requested_bytes);
+ buf = try allocator.realloc(buf, 2);
+ try std.testing.expectEqual(2, gpa.total_requested_bytes);
+}
+
+test "retain metadata and never unmap" {
+ var gpa = std.heap.DebugAllocator(.{
+ .safety = true,
+ .never_unmap = true,
+ .retain_metadata = true,
+ }){};
+ defer std.debug.assert(gpa.deinit() == .ok);
+ const allocator = gpa.allocator();
+
+ const alloc = try allocator.alloc(u8, 8);
+ allocator.free(alloc);
+
+ const alloc2 = try allocator.alloc(u8, 8);
+ allocator.free(alloc2);
+}
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
deleted file mode 100644
index c23f8dcd79..0000000000
--- a/lib/std/heap/general_purpose_allocator.zig
+++ /dev/null
@@ -1,1500 +0,0 @@
-//! # General Purpose Allocator
-//!
-//! ## Design Priorities
-//!
-//! ### `OptimizationMode.debug` and `OptimizationMode.release_safe`:
-//!
-//! * Detect double free, and emit stack trace of:
-//! - Where it was first allocated
-//! - Where it was freed the first time
-//! - Where it was freed the second time
-//!
-//! * Detect leaks and emit stack trace of:
-//! - Where it was allocated
-//!
-//! * When a page of memory is no longer needed, give it back to resident memory
-//! as soon as possible, so that it causes page faults when used.
-//!
-//! * Do not re-use memory slots, so that memory safety is upheld. For small
-//! allocations, this is handled here; for larger ones it is handled in the
-//! backing allocator (by default `std.heap.page_allocator`).
-//!
-//! * Make pointer math errors unlikely to harm memory from
-//! unrelated allocations.
-//!
-//! * It's OK for these mechanisms to cost some extra overhead bytes.
-//!
-//! * It's OK for performance cost for these mechanisms.
-//!
-//! * Rogue memory writes should not harm the allocator's state.
-//!
-//! * Cross platform. Operates based on a backing allocator which makes it work
-//! everywhere, even freestanding.
-//!
-//! * Compile-time configuration.
-//!
-//! ### `OptimizationMode.release_fast` (note: not much work has gone into this use case yet):
-//!
-//! * Low fragmentation is primary concern
-//! * Performance of worst-case latency is secondary concern
-//! * Performance of average-case latency is next
-//! * Finally, having freed memory unmapped, and pointer math errors unlikely to
-//! harm memory from unrelated allocations are nice-to-haves.
-//!
-//! ### `OptimizationMode.release_small` (note: not much work has gone into this use case yet):
-//!
-//! * Small binary code size of the executable is the primary concern.
-//! * Next, defer to the `.release_fast` priority list.
-//!
-//! ## Basic Design:
-//!
-//! Small allocations are divided into buckets:
-//!
-//! ```
-//! index obj_size
-//! 0 1
-//! 1 2
-//! 2 4
-//! 3 8
-//! 4 16
-//! 5 32
-//! 6 64
-//! 7 128
-//! 8 256
-//! 9 512
-//! 10 1024
-//! 11 2048
-//! ```
-//!
-//! The main allocator state has an array of all the "current" buckets for each
-//! size class. Each slot in the array can be null, meaning the bucket for that
-//! size class is not allocated. When the first object is allocated for a given
-//! size class, it allocates 1 page of memory from the OS. This page is
-//! divided into "slots" - one per allocated object. Along with the page of memory
-//! for object slots, as many pages as necessary are allocated to store the
-//! BucketHeader, followed by "used bits", and two stack traces for each slot
-//! (allocation trace and free trace).
-//!
-//! The "used bits" are 1 bit per slot representing whether the slot is used.
-//! Allocations use the data to iterate to find a free slot. Frees assert that the
-//! corresponding bit is 1 and set it to 0.
-//!
-//! Buckets have prev and next pointers. When there is only one bucket for a given
-//! size class, both prev and next point to itself. When all slots of a bucket are
-//! used, a new bucket is allocated, and enters the doubly linked list. The main
-//! allocator state tracks the "current" bucket for each size class. Leak detection
-//! currently only checks the current bucket.
-//!
-//! Resizing detects if the size class is unchanged or smaller, in which case the same
-//! pointer is returned unmodified. If a larger size class is required,
-//! `error.OutOfMemory` is returned.
-//!
-//! Large objects are allocated directly using the backing allocator and their metadata is stored
-//! in a `std.HashMap` using the backing allocator.
-
-const std = @import("std");
-const builtin = @import("builtin");
-const log = std.log.scoped(.gpa);
-const math = std.math;
-const assert = std.debug.assert;
-const mem = std.mem;
-const Allocator = std.mem.Allocator;
-const page_size = std.mem.page_size;
-const StackTrace = std.builtin.StackTrace;
-
-/// Integer type for pointing to slots in a small allocation
-const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
-
-const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
-const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
-const default_stack_trace_frames: usize = switch (builtin.mode) {
- .Debug => default_sys_stack_trace_frames,
- else => 0,
-};
-
-pub const Config = struct {
- /// Number of stack frames to capture.
- stack_trace_frames: usize = default_stack_trace_frames,
-
- /// If true, the allocator will have two fields:
- /// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
- /// * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
- /// when the `total_requested_bytes` exceeds this limit.
- /// If false, these fields will be `void`.
- enable_memory_limit: bool = false,
-
- /// Whether to enable safety checks.
- safety: bool = std.debug.runtime_safety,
-
- /// Whether the allocator may be used simultaneously from multiple threads.
- thread_safe: bool = !builtin.single_threaded,
-
- /// What type of mutex you'd like to use, for thread safety.
- /// when specified, the mutex type must have the same shape as `std.Thread.Mutex` and
- /// `DummyMutex`, and have no required fields. Specifying this field causes
- /// the `thread_safe` field to be ignored.
- ///
- /// when null (default):
- /// * the mutex type defaults to `std.Thread.Mutex` when thread_safe is enabled.
- /// * the mutex type defaults to `DummyMutex` otherwise.
- MutexType: ?type = null,
-
- /// This is a temporary debugging trick you can use to turn segfaults into more helpful
- /// logged error messages with stack trace details. The downside is that every allocation
- /// will be leaked, unless used with retain_metadata!
- never_unmap: bool = false,
-
- /// This is a temporary debugging aid that retains metadata about allocations indefinitely.
- /// This allows a greater range of double frees to be reported. All metadata is freed when
- /// deinit is called. When used with never_unmap, deliberately leaked memory is also freed
- /// during deinit. Currently should be used with never_unmap to avoid segfaults.
- /// TODO https://github.com/ziglang/zig/issues/4298 will allow use without never_unmap
- retain_metadata: bool = false,
-
- /// Enables emitting info messages with the size and address of every allocation.
- verbose_log: bool = false,
-};
-
-pub const Check = enum { ok, leak };
-
-/// Default initialization of this struct is deprecated; use `.init` instead.
-pub fn GeneralPurposeAllocator(comptime config: Config) type {
- return struct {
- backing_allocator: Allocator = std.heap.page_allocator,
- buckets: [small_bucket_count]Buckets = [1]Buckets{Buckets{}} ** small_bucket_count,
- cur_buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
- large_allocations: LargeAllocTable = .{},
- empty_buckets: if (config.retain_metadata) Buckets else void =
- if (config.retain_metadata) Buckets{} else {},
- bucket_node_pool: std.heap.MemoryPool(Buckets.Node) = std.heap.MemoryPool(Buckets.Node).init(std.heap.page_allocator),
-
- total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
- requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
-
- mutex: @TypeOf(mutex_init) = mutex_init,
-
- const Self = @This();
-
- /// The initial state of a `GeneralPurposeAllocator`, containing no allocations and backed by the system page allocator.
- pub const init: Self = .{
- .backing_allocator = std.heap.page_allocator,
- .buckets = [1]Buckets{.{}} ** small_bucket_count,
- .cur_buckets = [1]?*BucketHeader{null} ** small_bucket_count,
- .large_allocations = .{},
- .empty_buckets = if (config.retain_metadata) .{} else {},
- .bucket_node_pool = .init(std.heap.page_allocator),
- };
-
- const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
- const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
-
- const mutex_init = if (config.MutexType) |T|
- T{}
- else if (config.thread_safe)
- std.Thread.Mutex{}
- else
- DummyMutex{};
-
- const DummyMutex = struct {
- fn lock(_: *DummyMutex) void {}
- fn unlock(_: *DummyMutex) void {}
- };
-
- const stack_n = config.stack_trace_frames;
- const one_trace_size = @sizeOf(usize) * stack_n;
- const traces_per_slot = 2;
-
- pub const Error = mem.Allocator.Error;
-
- const small_bucket_count = math.log2(page_size);
- const largest_bucket_object_size = 1 << (small_bucket_count - 1);
- const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
-
- const bucketCompare = struct {
- fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
- return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
- }
- }.compare;
- const Buckets = std.Treap(*BucketHeader, bucketCompare);
-
- const LargeAlloc = struct {
- bytes: []u8,
- requested_size: if (config.enable_memory_limit) usize else void,
- stack_addresses: [trace_n][stack_n]usize,
- freed: if (config.retain_metadata) bool else void,
- log2_ptr_align: if (config.never_unmap and config.retain_metadata) u8 else void,
-
- const trace_n = if (config.retain_metadata) traces_per_slot else 1;
-
- fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
- std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
- }
-
- fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
- assert(@intFromEnum(trace_kind) < trace_n);
- const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
- var len: usize = 0;
- while (len < stack_n and stack_addresses[len] != 0) {
- len += 1;
- }
- return .{
- .instruction_addresses = stack_addresses,
- .index = len,
- };
- }
-
- fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
- assert(@intFromEnum(trace_kind) < trace_n);
- const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
- collectStackTrace(ret_addr, stack_addresses);
- }
- };
- const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
-
- // Bucket: In memory, in order:
- // * BucketHeader
- // * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots
- // -- below only exists when config.safety is true --
- // * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
- // * log2_ptr_aligns: [N]u8 // 1 byte for every slot
- // -- above only exists when config.safety is true --
- // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
-
- const BucketHeader = struct {
- page: [*]align(page_size) u8,
- alloc_cursor: SlotIndex,
- used_count: SlotIndex,
-
- fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
- return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index));
- }
-
- fn requestedSizes(bucket: *BucketHeader, size_class: usize) []LargestSizeClassInt {
- if (!config.safety) @compileError("requested size is only stored when safety is enabled");
- const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(size_class);
- const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
- const slot_count = @divExact(page_size, size_class);
- return sizes[0..slot_count];
- }
-
- fn log2PtrAligns(bucket: *BucketHeader, size_class: usize) []u8 {
- if (!config.safety) @compileError("requested size is only stored when safety is enabled");
- const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(size_class);
- const slot_count = @divExact(page_size, size_class);
- return aligns_ptr[0..slot_count];
- }
-
- fn stackTracePtr(
- bucket: *BucketHeader,
- size_class: usize,
- slot_index: SlotIndex,
- trace_kind: TraceKind,
- ) *[stack_n]usize {
- const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class);
- const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
- @intFromEnum(trace_kind) * @as(usize, one_trace_size);
- return @ptrCast(@alignCast(addr));
- }
-
- fn captureStackTrace(
- bucket: *BucketHeader,
- ret_addr: usize,
- size_class: usize,
- slot_index: SlotIndex,
- trace_kind: TraceKind,
- ) void {
- // Initialize them to 0. When determining the count we must look
- // for non zero addresses.
- const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
- collectStackTrace(ret_addr, stack_addresses);
- }
-
- /// Only valid for buckets within `empty_buckets`, and relies on the `alloc_cursor`
- /// of empty buckets being set to `slot_count` when they are added to `empty_buckets`
- fn emptyBucketSizeClass(bucket: *BucketHeader) usize {
- return @divExact(page_size, bucket.alloc_cursor);
- }
- };
-
- pub fn allocator(self: *Self) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- fn bucketStackTrace(
- bucket: *BucketHeader,
- size_class: usize,
- slot_index: SlotIndex,
- trace_kind: TraceKind,
- ) StackTrace {
- const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
- var len: usize = 0;
- while (len < stack_n and stack_addresses[len] != 0) {
- len += 1;
- }
- return StackTrace{
- .instruction_addresses = stack_addresses,
- .index = len,
- };
- }
-
- fn bucketRequestedSizesStart(size_class: usize) usize {
- if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
- return mem.alignForward(
- usize,
- @sizeOf(BucketHeader) + usedBitsCount(size_class),
- @alignOf(LargestSizeClassInt),
- );
- }
-
- fn bucketAlignsStart(size_class: usize) usize {
- if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
- const slot_count = @divExact(page_size, size_class);
- return bucketRequestedSizesStart(size_class) + (@sizeOf(LargestSizeClassInt) * slot_count);
- }
-
- fn bucketStackFramesStart(size_class: usize) usize {
- const unaligned_start = if (config.safety) blk: {
- const slot_count = @divExact(page_size, size_class);
- break :blk bucketAlignsStart(size_class) + slot_count;
- } else @sizeOf(BucketHeader) + usedBitsCount(size_class);
- return mem.alignForward(
- usize,
- unaligned_start,
- @alignOf(usize),
- );
- }
-
- fn bucketSize(size_class: usize) usize {
- const slot_count = @divExact(page_size, size_class);
- return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
- }
-
- fn usedBitsCount(size_class: usize) usize {
- const slot_count = @divExact(page_size, size_class);
- if (slot_count < 8) return 1;
- return @divExact(slot_count, 8);
- }
-
- fn detectLeaksInBucket(
- bucket: *BucketHeader,
- size_class: usize,
- used_bits_count: usize,
- ) bool {
- var leaks = false;
- var used_bits_byte: usize = 0;
- while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
- const used_byte = bucket.usedBits(used_bits_byte).*;
- if (used_byte != 0) {
- var bit_index: u3 = 0;
- while (true) : (bit_index += 1) {
- const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0;
- if (is_used) {
- const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index));
- const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
- const addr = bucket.page + slot_index * size_class;
- log.err("memory address 0x{x} leaked: {}", .{
- @intFromPtr(addr), stack_trace,
- });
- leaks = true;
- }
- if (bit_index == math.maxInt(u3))
- break;
- }
- }
- }
- return leaks;
- }
-
- /// Emits log messages for leaks and then returns whether there were any leaks.
- pub fn detectLeaks(self: *Self) bool {
- var leaks = false;
-
- for (&self.buckets, 0..) |*buckets, bucket_i| {
- if (buckets.root == null) continue;
- const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
- const used_bits_count = usedBitsCount(size_class);
- var it = buckets.inorderIterator();
- while (it.next()) |node| {
- const bucket = node.key;
- leaks = detectLeaksInBucket(bucket, size_class, used_bits_count) or leaks;
- }
- }
- var it = self.large_allocations.valueIterator();
- while (it.next()) |large_alloc| {
- if (config.retain_metadata and large_alloc.freed) continue;
- const stack_trace = large_alloc.getStackTrace(.alloc);
- log.err("memory address 0x{x} leaked: {}", .{
- @intFromPtr(large_alloc.bytes.ptr), stack_trace,
- });
- leaks = true;
- }
- return leaks;
- }
-
- fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void {
- const bucket_size = bucketSize(size_class);
- const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size];
- self.backing_allocator.free(bucket_slice);
- }
-
- fn freeRetainedMetadata(self: *Self) void {
- if (config.retain_metadata) {
- if (config.never_unmap) {
- // free large allocations that were intentionally leaked by never_unmap
- var it = self.large_allocations.iterator();
- while (it.next()) |large| {
- if (large.value_ptr.freed) {
- self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.log2_ptr_align, @returnAddress());
- }
- }
- }
- // free retained metadata for small allocations
- while (self.empty_buckets.getMin()) |node| {
- // remove the node from the tree before destroying it
- var entry = self.empty_buckets.getEntryForExisting(node);
- entry.set(null);
-
- var bucket = node.key;
- if (config.never_unmap) {
- // free page that was intentionally leaked by never_unmap
- self.backing_allocator.free(bucket.page[0..page_size]);
- }
- // alloc_cursor was set to slot count when bucket added to empty_buckets
- self.freeBucket(bucket, bucket.emptyBucketSizeClass());
- self.bucket_node_pool.destroy(node);
- }
- self.empty_buckets.root = null;
- }
- }
-
- pub fn flushRetainedMetadata(self: *Self) void {
- if (!config.retain_metadata) {
- @compileError("'flushRetainedMetadata' requires 'config.retain_metadata = true'");
- }
- self.freeRetainedMetadata();
- // also remove entries from large_allocations
- var it = self.large_allocations.iterator();
- while (it.next()) |large| {
- if (large.value_ptr.freed) {
- _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
- }
- }
- }
-
- /// Returns `Check.leak` if there were leaks; `Check.ok` otherwise.
- pub fn deinit(self: *Self) Check {
- const leaks = if (config.safety) self.detectLeaks() else false;
- if (config.retain_metadata) {
- self.freeRetainedMetadata();
- }
- self.large_allocations.deinit(self.backing_allocator);
- self.bucket_node_pool.deinit();
- self.* = undefined;
- return @as(Check, @enumFromInt(@intFromBool(leaks)));
- }
-
- fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
- if (stack_n == 0) return;
- @memset(addresses, 0);
- var stack_trace = StackTrace{
- .instruction_addresses = addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(first_trace_addr, &stack_trace);
- }
-
- fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
- var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
- var second_free_stack_trace = StackTrace{
- .instruction_addresses = &addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
- log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
- alloc_stack_trace, free_stack_trace, second_free_stack_trace,
- });
- }
-
- const Slot = struct {
- bucket: *BucketHeader,
- slot_index: usize,
- ptr: [*]u8,
- };
-
- fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error!Slot {
- const bucket_index = math.log2(size_class);
- var buckets = &self.buckets[bucket_index];
- const slot_count = @divExact(page_size, size_class);
- if (self.cur_buckets[bucket_index] == null or self.cur_buckets[bucket_index].?.alloc_cursor == slot_count) {
- const new_bucket = try self.createBucket(size_class);
- errdefer self.freeBucket(new_bucket, size_class);
- const node = try self.bucket_node_pool.create();
- node.key = new_bucket;
- var entry = buckets.getEntryFor(new_bucket);
- std.debug.assert(entry.node == null);
- entry.set(node);
- self.cur_buckets[bucket_index] = node.key;
- }
- const bucket = self.cur_buckets[bucket_index].?;
-
- const slot_index = bucket.alloc_cursor;
- bucket.alloc_cursor += 1;
-
- const used_bits_byte = bucket.usedBits(slot_index / 8);
- const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary
- used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
- bucket.used_count += 1;
- bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
- return .{
- .bucket = bucket,
- .slot_index = slot_index,
- .ptr = bucket.page + slot_index * size_class,
- };
- }
-
- fn searchBucket(
- buckets: *Buckets,
- addr: usize,
- current_bucket: ?*BucketHeader,
- ) ?*BucketHeader {
- const search_page: [*]align(page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, page_size));
- if (current_bucket != null and current_bucket.?.page == search_page) {
- return current_bucket;
- }
- var search_header: BucketHeader = undefined;
- search_header.page = search_page;
- const entry = buckets.getEntryFor(&search_header);
- return if (entry.node) |node| node.key else null;
- }
-
- /// This function assumes the object is in the large object storage regardless
- /// of the parameters.
- fn resizeLarge(
- self: *Self,
- old_mem: []u8,
- log2_old_align: u8,
- new_size: usize,
- ret_addr: usize,
- ) bool {
- const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
- if (config.safety) {
- @panic("Invalid free");
- } else {
- unreachable;
- }
- };
-
- if (config.retain_metadata and entry.value_ptr.freed) {
- if (config.safety) {
- reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
- @panic("Unrecoverable double free");
- } else {
- unreachable;
- }
- }
-
- if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
- var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
- var free_stack_trace = StackTrace{
- .instruction_addresses = &addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(ret_addr, &free_stack_trace);
- log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
- entry.value_ptr.bytes.len,
- old_mem.len,
- entry.value_ptr.getStackTrace(.alloc),
- free_stack_trace,
- });
- }
-
- // Do memory limit accounting with requested sizes rather than what
- // backing_allocator returns because if we want to return
- // error.OutOfMemory, we have to leave allocation untouched, and
- // that is impossible to guarantee after calling
- // backing_allocator.rawResize.
- const prev_req_bytes = self.total_requested_bytes;
- if (config.enable_memory_limit) {
- const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
- if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
- return false;
- }
- self.total_requested_bytes = new_req_bytes;
- }
-
- if (!self.backing_allocator.rawResize(old_mem, log2_old_align, new_size, ret_addr)) {
- if (config.enable_memory_limit) {
- self.total_requested_bytes = prev_req_bytes;
- }
- return false;
- }
-
- if (config.enable_memory_limit) {
- entry.value_ptr.requested_size = new_size;
- }
-
- if (config.verbose_log) {
- log.info("large resize {d} bytes at {*} to {d}", .{
- old_mem.len, old_mem.ptr, new_size,
- });
- }
- entry.value_ptr.bytes = old_mem.ptr[0..new_size];
- entry.value_ptr.captureStackTrace(ret_addr, .alloc);
- return true;
- }
-
- /// This function assumes the object is in the large object storage regardless
- /// of the parameters.
- fn freeLarge(
- self: *Self,
- old_mem: []u8,
- log2_old_align: u8,
- ret_addr: usize,
- ) void {
- const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
- if (config.safety) {
- @panic("Invalid free");
- } else {
- unreachable;
- }
- };
-
- if (config.retain_metadata and entry.value_ptr.freed) {
- if (config.safety) {
- reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
- return;
- } else {
- unreachable;
- }
- }
-
- if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
- var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
- var free_stack_trace = StackTrace{
- .instruction_addresses = &addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(ret_addr, &free_stack_trace);
- log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
- entry.value_ptr.bytes.len,
- old_mem.len,
- entry.value_ptr.getStackTrace(.alloc),
- free_stack_trace,
- });
- }
-
- if (!config.never_unmap) {
- self.backing_allocator.rawFree(old_mem, log2_old_align, ret_addr);
- }
-
- if (config.enable_memory_limit) {
- self.total_requested_bytes -= entry.value_ptr.requested_size;
- }
-
- if (config.verbose_log) {
- log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
- }
-
- if (!config.retain_metadata) {
- assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
- } else {
- entry.value_ptr.freed = true;
- entry.value_ptr.captureStackTrace(ret_addr, .free);
- }
- }
-
- pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
- self.requested_memory_limit = limit;
- }
-
- fn resize(
- ctx: *anyopaque,
- old_mem: []u8,
- log2_old_align_u8: u8,
- new_size: usize,
- ret_addr: usize,
- ) bool {
- const self: *Self = @ptrCast(@alignCast(ctx));
- const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
- self.mutex.lock();
- defer self.mutex.unlock();
-
- assert(old_mem.len != 0);
-
- const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
- if (aligned_size > largest_bucket_object_size) {
- return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
- }
- const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
-
- var bucket_index = math.log2(size_class_hint);
- var size_class: usize = size_class_hint;
- const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
- if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
- break bucket;
- }
- size_class *= 2;
- } else blk: {
- if (config.retain_metadata) {
- if (!self.large_allocations.contains(@intFromPtr(old_mem.ptr))) {
- // object not in active buckets or a large allocation, so search empty buckets
- if (searchBucket(&self.empty_buckets, @intFromPtr(old_mem.ptr), null)) |bucket| {
- size_class = bucket.emptyBucketSizeClass();
- // bucket is empty so is_used below will always be false and we exit there
- break :blk bucket;
- } else {
- @panic("Invalid free");
- }
- }
- }
- return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
- };
- const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
- const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
- const used_byte_index = slot_index / 8;
- const used_bit_index = @as(u3, @intCast(slot_index % 8));
- const used_byte = bucket.usedBits(used_byte_index);
- const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
- if (!is_used) {
- if (config.safety) {
- reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
- @panic("Unrecoverable double free");
- } else {
- unreachable;
- }
- }
-
- // Definitely an in-use small alloc now.
- if (config.safety) {
- const requested_size = bucket.requestedSizes(size_class)[slot_index];
- if (requested_size == 0) @panic("Invalid free");
- const log2_ptr_align = bucket.log2PtrAligns(size_class)[slot_index];
- if (old_mem.len != requested_size or log2_old_align != log2_ptr_align) {
- var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
- var free_stack_trace = StackTrace{
- .instruction_addresses = &addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(ret_addr, &free_stack_trace);
- if (old_mem.len != requested_size) {
- log.err("Allocation size {d} bytes does not match resize size {d}. Allocation: {} Resize: {}", .{
- requested_size,
- old_mem.len,
- bucketStackTrace(bucket, size_class, slot_index, .alloc),
- free_stack_trace,
- });
- }
- if (log2_old_align != log2_ptr_align) {
- log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{
- @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_ptr_align)),
- @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
- bucketStackTrace(bucket, size_class, slot_index, .alloc),
- free_stack_trace,
- });
- }
- }
- }
- const prev_req_bytes = self.total_requested_bytes;
- if (config.enable_memory_limit) {
- const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
- if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
- return false;
- }
- self.total_requested_bytes = new_req_bytes;
- }
-
- const new_aligned_size = @max(new_size, @as(usize, 1) << log2_old_align);
- const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
- if (new_size_class <= size_class) {
- if (old_mem.len > new_size) {
- @memset(old_mem[new_size..], undefined);
- }
- if (config.verbose_log) {
- log.info("small resize {d} bytes at {*} to {d}", .{
- old_mem.len, old_mem.ptr, new_size,
- });
- }
- if (config.safety) {
- bucket.requestedSizes(size_class)[slot_index] = @intCast(new_size);
- }
- return true;
- }
-
- if (config.enable_memory_limit) {
- self.total_requested_bytes = prev_req_bytes;
- }
- return false;
- }
-
- fn free(
- ctx: *anyopaque,
- old_mem: []u8,
- log2_old_align_u8: u8,
- ret_addr: usize,
- ) void {
- const self: *Self = @ptrCast(@alignCast(ctx));
- const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
- self.mutex.lock();
- defer self.mutex.unlock();
-
- assert(old_mem.len != 0);
-
- const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
- if (aligned_size > largest_bucket_object_size) {
- self.freeLarge(old_mem, log2_old_align, ret_addr);
- return;
- }
- const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
-
- var bucket_index = math.log2(size_class_hint);
- var size_class: usize = size_class_hint;
- const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
- if (searchBucket(&self.buckets[bucket_index], @intFromPtr(old_mem.ptr), self.cur_buckets[bucket_index])) |bucket| {
- break bucket;
- }
- size_class *= 2;
- } else blk: {
- if (config.retain_metadata) {
- if (!self.large_allocations.contains(@intFromPtr(old_mem.ptr))) {
- // object not in active buckets or a large allocation, so search empty buckets
- if (searchBucket(&self.empty_buckets, @intFromPtr(old_mem.ptr), null)) |bucket| {
- size_class = bucket.emptyBucketSizeClass();
- // bucket is empty so is_used below will always be false and we exit there
- break :blk bucket;
- } else {
- @panic("Invalid free");
- }
- }
- }
- self.freeLarge(old_mem, log2_old_align, ret_addr);
- return;
- };
- const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
- const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
- const used_byte_index = slot_index / 8;
- const used_bit_index = @as(u3, @intCast(slot_index % 8));
- const used_byte = bucket.usedBits(used_byte_index);
- const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
- if (!is_used) {
- if (config.safety) {
- reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
- // Recoverable if this is a free.
- return;
- } else {
- unreachable;
- }
- }
-
- // Definitely an in-use small alloc now.
- if (config.safety) {
- const requested_size = bucket.requestedSizes(size_class)[slot_index];
- if (requested_size == 0) @panic("Invalid free");
- const log2_ptr_align = bucket.log2PtrAligns(size_class)[slot_index];
- if (old_mem.len != requested_size or log2_old_align != log2_ptr_align) {
- var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
- var free_stack_trace = StackTrace{
- .instruction_addresses = &addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(ret_addr, &free_stack_trace);
- if (old_mem.len != requested_size) {
- log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
- requested_size,
- old_mem.len,
- bucketStackTrace(bucket, size_class, slot_index, .alloc),
- free_stack_trace,
- });
- }
- if (log2_old_align != log2_ptr_align) {
- log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
- @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_ptr_align)),
- @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
- bucketStackTrace(bucket, size_class, slot_index, .alloc),
- free_stack_trace,
- });
- }
- }
- }
-
- if (config.enable_memory_limit) {
- self.total_requested_bytes -= old_mem.len;
- }
-
- // Capture stack trace to be the "first free", in case a double free happens.
- bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
-
- used_byte.* &= ~(@as(u8, 1) << used_bit_index);
- bucket.used_count -= 1;
- if (config.safety) {
- bucket.requestedSizes(size_class)[slot_index] = 0;
- }
- if (bucket.used_count == 0) {
- var entry = self.buckets[bucket_index].getEntryFor(bucket);
- // save the node for destruction/insertion into in empty_buckets
- const node = entry.node.?;
- entry.set(null);
- if (self.cur_buckets[bucket_index] == bucket) {
- self.cur_buckets[bucket_index] = null;
- }
- if (!config.never_unmap) {
- self.backing_allocator.free(bucket.page[0..page_size]);
- }
- if (!config.retain_metadata) {
- self.freeBucket(bucket, size_class);
- self.bucket_node_pool.destroy(node);
- } else {
- // move alloc_cursor to end so we can tell size_class later
- const slot_count = @divExact(page_size, size_class);
- bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
- var empty_entry = self.empty_buckets.getEntryFor(node.key);
- empty_entry.set(node);
- }
- } else {
- @memset(old_mem, undefined);
- }
- if (config.verbose_log) {
- log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
- }
- }
-
- // Returns true if an allocation of `size` bytes is within the specified
- // limits if enable_memory_limit is true
- fn isAllocationAllowed(self: *Self, size: usize) bool {
- if (config.enable_memory_limit) {
- const new_req_bytes = self.total_requested_bytes + size;
- if (new_req_bytes > self.requested_memory_limit)
- return false;
- self.total_requested_bytes = new_req_bytes;
- }
-
- return true;
- }
-
- fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
- const self: *Self = @ptrCast(@alignCast(ctx));
- self.mutex.lock();
- defer self.mutex.unlock();
- if (!self.isAllocationAllowed(len)) return null;
- return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null;
- }
-
- fn allocInner(
- self: *Self,
- len: usize,
- log2_ptr_align: Allocator.Log2Align,
- ret_addr: usize,
- ) Allocator.Error![*]u8 {
- const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
- if (new_aligned_size > largest_bucket_object_size) {
- try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
- const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
- return error.OutOfMemory;
- const slice = ptr[0..len];
-
- const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
- if (config.retain_metadata and !config.never_unmap) {
- // Backing allocator may be reusing memory that we're retaining metadata for
- assert(!gop.found_existing or gop.value_ptr.freed);
- } else {
- assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
- }
- gop.value_ptr.bytes = slice;
- if (config.enable_memory_limit)
- gop.value_ptr.requested_size = len;
- gop.value_ptr.captureStackTrace(ret_addr, .alloc);
- if (config.retain_metadata) {
- gop.value_ptr.freed = false;
- if (config.never_unmap) {
- gop.value_ptr.log2_ptr_align = log2_ptr_align;
- }
- }
-
- if (config.verbose_log) {
- log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
- }
- return slice.ptr;
- }
-
- const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
- const slot = try self.allocSlot(new_size_class, ret_addr);
- if (config.safety) {
- slot.bucket.requestedSizes(new_size_class)[slot.slot_index] = @intCast(len);
- slot.bucket.log2PtrAligns(new_size_class)[slot.slot_index] = log2_ptr_align;
- }
- if (config.verbose_log) {
- log.info("small alloc {d} bytes at {*}", .{ len, slot.ptr });
- }
- return slot.ptr;
- }
-
- fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
- const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size);
- errdefer self.backing_allocator.free(page);
-
- const bucket_size = bucketSize(size_class);
- const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size);
- const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr));
- ptr.* = BucketHeader{
- .page = page.ptr,
- .alloc_cursor = 0,
- .used_count = 0,
- };
- // Set the used bits to all zeroes
- @memset(@as([*]u8, @as(*[1]u8, ptr.usedBits(0)))[0..usedBitsCount(size_class)], 0);
- if (config.safety) {
- // Set the requested sizes to zeroes
- @memset(mem.sliceAsBytes(ptr.requestedSizes(size_class)), 0);
- }
- return ptr;
- }
- };
-}
-
-const TraceKind = enum {
- alloc,
- free,
-};
-
-const test_config = Config{};
-
-test "small allocations - free in same order" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var list = std.ArrayList(*u64).init(std.testing.allocator);
- defer list.deinit();
-
- var i: usize = 0;
- while (i < 513) : (i += 1) {
- const ptr = try allocator.create(u64);
- try list.append(ptr);
- }
-
- for (list.items) |ptr| {
- allocator.destroy(ptr);
- }
-}
-
-test "small allocations - free in reverse order" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var list = std.ArrayList(*u64).init(std.testing.allocator);
- defer list.deinit();
-
- var i: usize = 0;
- while (i < 513) : (i += 1) {
- const ptr = try allocator.create(u64);
- try list.append(ptr);
- }
-
- while (list.popOrNull()) |ptr| {
- allocator.destroy(ptr);
- }
-}
-
-test "large allocations" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- const ptr1 = try allocator.alloc(u64, 42768);
- const ptr2 = try allocator.alloc(u64, 52768);
- allocator.free(ptr1);
- const ptr3 = try allocator.alloc(u64, 62768);
- allocator.free(ptr3);
- allocator.free(ptr2);
-}
-
-test "very large allocation" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- try std.testing.expectError(error.OutOfMemory, allocator.alloc(u8, math.maxInt(usize)));
-}
-
-test "realloc" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
- defer allocator.free(slice);
- slice[0] = 0x12;
-
- // This reallocation should keep its pointer address.
- const old_slice = slice;
- slice = try allocator.realloc(slice, 2);
- try std.testing.expect(old_slice.ptr == slice.ptr);
- try std.testing.expect(slice[0] == 0x12);
- slice[1] = 0x34;
-
- // This requires upgrading to a larger size class
- slice = try allocator.realloc(slice, 17);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[1] == 0x34);
-}
-
-test "shrink" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alloc(u8, 20);
- defer allocator.free(slice);
-
- @memset(slice, 0x11);
-
- try std.testing.expect(allocator.resize(slice, 17));
- slice = slice[0..17];
-
- for (slice) |b| {
- try std.testing.expect(b == 0x11);
- }
-
- try std.testing.expect(allocator.resize(slice, 16));
- slice = slice[0..16];
-
- for (slice) |b| {
- try std.testing.expect(b == 0x11);
- }
-}
-
-test "large object - grow" {
- if (builtin.target.isWasm()) {
- // Not expected to pass on targets that do not have memory mapping.
- return error.SkipZigTest;
- }
- var gpa: GeneralPurposeAllocator(test_config) = .{};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
- defer allocator.free(slice1);
-
- const old = slice1;
- slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
- try std.testing.expect(slice1.ptr == old.ptr);
-
- slice1 = try allocator.realloc(slice1, page_size * 2);
- try std.testing.expect(slice1.ptr == old.ptr);
-
- slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
-}
-
-test "realloc small object to large object" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alloc(u8, 70);
- defer allocator.free(slice);
- slice[0] = 0x12;
- slice[60] = 0x34;
-
- // This requires upgrading to a large object
- const large_object_size = page_size * 2 + 50;
- slice = try allocator.realloc(slice, large_object_size);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[60] == 0x34);
-}
-
-test "shrink large object to large object" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alloc(u8, page_size * 2 + 50);
- defer allocator.free(slice);
- slice[0] = 0x12;
- slice[60] = 0x34;
-
- if (!allocator.resize(slice, page_size * 2 + 1)) return;
- slice = slice.ptr[0 .. page_size * 2 + 1];
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[60] == 0x34);
-
- try std.testing.expect(allocator.resize(slice, page_size * 2 + 1));
- slice = slice[0 .. page_size * 2 + 1];
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[60] == 0x34);
-
- slice = try allocator.realloc(slice, page_size * 2);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[60] == 0x34);
-}
-
-test "shrink large object to large object with larger alignment" {
- if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
-
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var debug_buffer: [1000]u8 = undefined;
- var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
- const debug_allocator = fba.allocator();
-
- const alloc_size = page_size * 2 + 50;
- var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
- defer allocator.free(slice);
-
- const big_alignment: usize = switch (builtin.os.tag) {
- .windows => page_size * 32, // Windows aligns to 64K.
- else => page_size * 2,
- };
- // This loop allocates until we find a page that is not aligned to the big
- // alignment. Then we shrink the allocation after the loop, but increase the
- // alignment to the higher one, that we know will force it to realloc.
- var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
- try stuff_to_free.append(slice);
- slice = try allocator.alignedAlloc(u8, 16, alloc_size);
- }
- while (stuff_to_free.popOrNull()) |item| {
- allocator.free(item);
- }
- slice[0] = 0x12;
- slice[60] = 0x34;
-
- slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[60] == 0x34);
-}
-
-test "realloc large object to small object" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alloc(u8, page_size * 2 + 50);
- defer allocator.free(slice);
- slice[0] = 0x12;
- slice[16] = 0x34;
-
- slice = try allocator.realloc(slice, 19);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[16] == 0x34);
-}
-
-test "overridable mutexes" {
- var gpa = GeneralPurposeAllocator(.{ .MutexType = std.Thread.Mutex }){
- .backing_allocator = std.testing.allocator,
- .mutex = std.Thread.Mutex{},
- };
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- const ptr = try allocator.create(i32);
- defer allocator.destroy(ptr);
-}
-
-test "non-page-allocator backing allocator" {
- var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- const ptr = try allocator.create(i32);
- defer allocator.destroy(ptr);
-}
-
-test "realloc large object to larger alignment" {
- if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
-
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var debug_buffer: [1000]u8 = undefined;
- var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
- const debug_allocator = fba.allocator();
-
- var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
- defer allocator.free(slice);
-
- const big_alignment: usize = switch (builtin.os.tag) {
- .windows => page_size * 32, // Windows aligns to 64K.
- else => page_size * 2,
- };
- // This loop allocates until we find a page that is not aligned to the big alignment.
- var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
- try stuff_to_free.append(slice);
- slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
- }
- while (stuff_to_free.popOrNull()) |item| {
- allocator.free(item);
- }
- slice[0] = 0x12;
- slice[16] = 0x34;
-
- slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[16] == 0x34);
-
- slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[16] == 0x34);
-
- slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100);
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[16] == 0x34);
-}
-
-test "large object shrinks to small but allocation fails during shrink" {
- if (builtin.target.isWasm()) {
- // Not expected to pass on targets that do not have memory mapping.
- return error.SkipZigTest;
- }
-
- var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, .{ .fail_index = 3 });
- var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() };
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- var slice = try allocator.alloc(u8, page_size * 2 + 50);
- defer allocator.free(slice);
- slice[0] = 0x12;
- slice[3] = 0x34;
-
- // Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
-
- try std.testing.expect(allocator.resize(slice, 4));
- slice = slice[0..4];
- try std.testing.expect(slice[0] == 0x12);
- try std.testing.expect(slice[3] == 0x34);
-}
-
-test "objects of size 1024 and 2048" {
- var gpa = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- const slice = try allocator.alloc(u8, 1025);
- const slice2 = try allocator.alloc(u8, 3000);
-
- allocator.free(slice);
- allocator.free(slice2);
-}
-
-test "setting a memory cap" {
- var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- gpa.setRequestedMemoryLimit(1010);
-
- const small = try allocator.create(i32);
- try std.testing.expect(gpa.total_requested_bytes == 4);
-
- const big = try allocator.alloc(u8, 1000);
- try std.testing.expect(gpa.total_requested_bytes == 1004);
-
- try std.testing.expectError(error.OutOfMemory, allocator.create(u64));
-
- allocator.destroy(small);
- try std.testing.expect(gpa.total_requested_bytes == 1000);
-
- allocator.free(big);
- try std.testing.expect(gpa.total_requested_bytes == 0);
-
- const exact = try allocator.alloc(u8, 1010);
- try std.testing.expect(gpa.total_requested_bytes == 1010);
- allocator.free(exact);
-}
-
-test "double frees" {
- // use a GPA to back a GPA to check for leaks of the latter's metadata
- var backing_gpa = GeneralPurposeAllocator(.{ .safety = true }){};
- defer std.testing.expect(backing_gpa.deinit() == .ok) catch @panic("leak");
-
- const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
- var gpa = GPA{ .backing_allocator = backing_gpa.allocator() };
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- // detect a small allocation double free, even though bucket is emptied
- const index: usize = 6;
- const size_class: usize = @as(usize, 1) << 6;
- const small = try allocator.alloc(u8, size_class);
- try std.testing.expect(GPA.searchBucket(&gpa.buckets[index], @intFromPtr(small.ptr), gpa.cur_buckets[index]) != null);
- allocator.free(small);
- try std.testing.expect(GPA.searchBucket(&gpa.buckets[index], @intFromPtr(small.ptr), gpa.cur_buckets[index]) == null);
- try std.testing.expect(GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null) != null);
-
- // detect a large allocation double free
- const large = try allocator.alloc(u8, 2 * page_size);
- try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
- try std.testing.expectEqual(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.bytes, large);
- allocator.free(large);
- try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(large.ptr)));
- try std.testing.expect(gpa.large_allocations.getEntry(@intFromPtr(large.ptr)).?.value_ptr.freed);
-
- const normal_small = try allocator.alloc(u8, size_class);
- defer allocator.free(normal_small);
- const normal_large = try allocator.alloc(u8, 2 * page_size);
- defer allocator.free(normal_large);
-
- // check that flushing retained metadata doesn't disturb live allocations
- gpa.flushRetainedMetadata();
- try std.testing.expect(gpa.empty_buckets.root == null);
- try std.testing.expect(GPA.searchBucket(&gpa.buckets[index], @intFromPtr(normal_small.ptr), gpa.cur_buckets[index]) != null);
- try std.testing.expect(gpa.large_allocations.contains(@intFromPtr(normal_large.ptr)));
- try std.testing.expect(!gpa.large_allocations.contains(@intFromPtr(large.ptr)));
-}
-
-test "empty bucket size class" {
- const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
- var gpa = GPA{};
- defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
- const allocator = gpa.allocator();
-
- // allocate and free to create an empty bucket
- const size_class: usize = @as(usize, 1) << 6;
- const small = try allocator.alloc(u8, size_class);
- allocator.free(small);
-
- // the metadata tracking system relies on alloc_cursor of empty buckets
- // being set to the slot count so that we can get back the size class.
- const empty_bucket = GPA.searchBucket(&gpa.empty_buckets, @intFromPtr(small.ptr), null).?;
- try std.testing.expect(empty_bucket.emptyBucketSizeClass() == size_class);
-}
-
-test "bug 9995 fix, large allocs count requested size not backing size" {
- // with AtLeast, buffer likely to be larger than requested, especially when shrinking
- var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
- const allocator = gpa.allocator();
-
- var buf = try allocator.alignedAlloc(u8, 1, page_size + 1);
- try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
- buf = try allocator.realloc(buf, 1);
- try std.testing.expect(gpa.total_requested_bytes == 1);
- buf = try allocator.realloc(buf, 2);
- try std.testing.expect(gpa.total_requested_bytes == 2);
-}
-
-test "retain metadata and never unmap" {
- var gpa = std.heap.GeneralPurposeAllocator(.{
- .safety = true,
- .never_unmap = true,
- .retain_metadata = true,
- }){};
- defer std.debug.assert(gpa.deinit() == .ok);
- const allocator = gpa.allocator();
-
- const alloc = try allocator.alloc(u8, 8);
- allocator.free(alloc);
-
- const alloc2 = try allocator.alloc(u8, 8);
- allocator.free(alloc2);
-}
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
deleted file mode 100644
index b5c86c9beb..0000000000
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ /dev/null
@@ -1,118 +0,0 @@
-const std = @import("../std.zig");
-const Allocator = std.mem.Allocator;
-
-/// This allocator is used in front of another allocator and logs to the provided writer
-/// on every call to the allocator. Writer errors are ignored.
-pub fn LogToWriterAllocator(comptime Writer: type) type {
- return struct {
- parent_allocator: Allocator,
- writer: Writer,
-
- const Self = @This();
-
- pub fn init(parent_allocator: Allocator, writer: Writer) Self {
- return Self{
- .parent_allocator = parent_allocator,
- .writer = writer,
- };
- }
-
- pub fn allocator(self: *Self) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- fn alloc(
- ctx: *anyopaque,
- len: usize,
- log2_ptr_align: u8,
- ra: usize,
- ) ?[*]u8 {
- const self: *Self = @ptrCast(@alignCast(ctx));
- self.writer.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
- if (result != null) {
- self.writer.print(" success!\n", .{}) catch {};
- } else {
- self.writer.print(" failure!\n", .{}) catch {};
- }
- return result;
- }
-
- fn resize(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- new_len: usize,
- ra: usize,
- ) bool {
- const self: *Self = @ptrCast(@alignCast(ctx));
- if (new_len <= buf.len) {
- self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
- } else {
- self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
- }
-
- if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
- if (new_len > buf.len) {
- self.writer.print(" success!\n", .{}) catch {};
- }
- return true;
- }
-
- std.debug.assert(new_len > buf.len);
- self.writer.print(" failure!\n", .{}) catch {};
- return false;
- }
-
- fn free(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- ra: usize,
- ) void {
- const self: *Self = @ptrCast(@alignCast(ctx));
- self.writer.print("free : {}\n", .{buf.len}) catch {};
- self.parent_allocator.rawFree(buf, log2_buf_align, ra);
- }
- };
-}
-
-/// This allocator is used in front of another allocator and logs to the provided writer
-/// on every call to the allocator. Writer errors are ignored.
-pub fn logToWriterAllocator(
- parent_allocator: Allocator,
- writer: anytype,
-) LogToWriterAllocator(@TypeOf(writer)) {
- return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
-}
-
-test "LogToWriterAllocator" {
- var log_buf: [255]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&log_buf);
-
- var allocator_buf: [10]u8 = undefined;
- var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- var allocator_state = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer());
- const allocator = allocator_state.allocator();
-
- var a = try allocator.alloc(u8, 10);
- try std.testing.expect(allocator.resize(a, 5));
- a = a[0..5];
- try std.testing.expect(!allocator.resize(a, 20));
- allocator.free(a);
-
- try std.testing.expectEqualSlices(u8,
- \\alloc : 10 success!
- \\shrink: 10 to 5
- \\expand: 5 to 20 failure!
- \\free : 5
- \\
- , fbs.getWritten());
-}
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
deleted file mode 100644
index 706f2ac544..0000000000
--- a/lib/std/heap/logging_allocator.zig
+++ /dev/null
@@ -1,133 +0,0 @@
-const std = @import("../std.zig");
-const Allocator = std.mem.Allocator;
-
-/// This allocator is used in front of another allocator and logs to `std.log`
-/// on every call to the allocator.
-/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn LoggingAllocator(
- comptime success_log_level: std.log.Level,
- comptime failure_log_level: std.log.Level,
-) type {
- return ScopedLoggingAllocator(.default, success_log_level, failure_log_level);
-}
-
-/// This allocator is used in front of another allocator and logs to `std.log`
-/// with the given scope on every call to the allocator.
-/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn ScopedLoggingAllocator(
- comptime scope: @Type(.enum_literal),
- comptime success_log_level: std.log.Level,
- comptime failure_log_level: std.log.Level,
-) type {
- const log = std.log.scoped(scope);
-
- return struct {
- parent_allocator: Allocator,
-
- const Self = @This();
-
- pub fn init(parent_allocator: Allocator) Self {
- return .{
- .parent_allocator = parent_allocator,
- };
- }
-
- pub fn allocator(self: *Self) Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- // This function is required as the `std.log.log` function is not public
- inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
- switch (log_level) {
- .err => log.err(format, args),
- .warn => log.warn(format, args),
- .info => log.info(format, args),
- .debug => log.debug(format, args),
- }
- }
-
- fn alloc(
- ctx: *anyopaque,
- len: usize,
- log2_ptr_align: u8,
- ra: usize,
- ) ?[*]u8 {
- const self: *Self = @ptrCast(@alignCast(ctx));
- const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
- if (result != null) {
- logHelper(
- success_log_level,
- "alloc - success - len: {}, ptr_align: {}",
- .{ len, log2_ptr_align },
- );
- } else {
- logHelper(
- failure_log_level,
- "alloc - failure: OutOfMemory - len: {}, ptr_align: {}",
- .{ len, log2_ptr_align },
- );
- }
- return result;
- }
-
- fn resize(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- new_len: usize,
- ra: usize,
- ) bool {
- const self: *Self = @ptrCast(@alignCast(ctx));
- if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
- if (new_len <= buf.len) {
- logHelper(
- success_log_level,
- "shrink - success - {} to {}, buf_align: {}",
- .{ buf.len, new_len, log2_buf_align },
- );
- } else {
- logHelper(
- success_log_level,
- "expand - success - {} to {}, buf_align: {}",
- .{ buf.len, new_len, log2_buf_align },
- );
- }
-
- return true;
- }
-
- std.debug.assert(new_len > buf.len);
- logHelper(
- failure_log_level,
- "expand - failure - {} to {}, buf_align: {}",
- .{ buf.len, new_len, log2_buf_align },
- );
- return false;
- }
-
- fn free(
- ctx: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
- ra: usize,
- ) void {
- const self: *Self = @ptrCast(@alignCast(ctx));
- self.parent_allocator.rawFree(buf, log2_buf_align, ra);
- logHelper(success_log_level, "free - len: {}", .{buf.len});
- }
- };
-}
-
-/// This allocator is used in front of another allocator and logs to `std.log`
-/// on every call to the allocator.
-/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
- return LoggingAllocator(.debug, .err).init(parent_allocator);
-}
diff --git a/lib/std/heap/sbrk_allocator.zig b/lib/std/heap/sbrk_allocator.zig
index 08933fed52..7cae161bea 100644
--- a/lib/std/heap/sbrk_allocator.zig
+++ b/lib/std/heap/sbrk_allocator.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
const math = std.math;
const Allocator = std.mem.Allocator;
const mem = std.mem;
+const heap = std.heap;
const assert = std.debug.assert;
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
@@ -18,7 +19,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
const max_usize = math.maxInt(usize);
const ushift = math.Log2Int(usize);
const bigpage_size = 64 * 1024;
- const pages_per_bigpage = bigpage_size / mem.page_size;
+ const pages_per_bigpage = bigpage_size / heap.pageSize();
const bigpage_count = max_usize / bigpage_size;
/// Because of storing free list pointers, the minimum size class is 3.
@@ -58,7 +59,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
}
const next_addr = next_addrs[class];
- if (next_addr % mem.page_size == 0) {
+ if (next_addr % heap.pageSize() == 0) {
const addr = allocBigPages(1);
if (addr == 0) return null;
//std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
@@ -153,7 +154,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
big_frees[class] = node.*;
return top_free_ptr;
}
- return sbrk(pow2_pages * pages_per_bigpage * mem.page_size);
+ return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize());
}
};
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 9e3f4f3936..0f56762c27 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -8,26 +8,6 @@ const testing = std.testing;
const Endian = std.builtin.Endian;
const native_endian = builtin.cpu.arch.endian();
-/// Compile time known minimum page size.
-/// https://github.com/ziglang/zig/issues/4082
-pub const page_size = switch (builtin.cpu.arch) {
- .wasm32, .wasm64 => 64 * 1024,
- .aarch64 => switch (builtin.os.tag) {
- .macos, .ios, .watchos, .tvos, .visionos => 16 * 1024,
- else => 4 * 1024,
- },
- .sparc64 => 8 * 1024,
- .loongarch32, .loongarch64 => switch (builtin.os.tag) {
- // Linux default KConfig value is 16KiB
- .linux => 16 * 1024,
- // FIXME:
- // There is no other OS supported yet. Use the same value
- // as Linux for now.
- else => 16 * 1024,
- },
- else => 4 * 1024,
-};
-
/// The standard library currently thoroughly depends on byte size
/// being 8 bits. (see the use of u8 throughout allocation code as
/// the "byte" type.) Code which depends on this can reference this
@@ -38,6 +18,60 @@ pub const byte_size_in_bits = 8;
pub const Allocator = @import("mem/Allocator.zig");
+/// Stored as a power-of-two.
+pub const Alignment = enum(math.Log2Int(usize)) {
+ @"1" = 0,
+ @"2" = 1,
+ @"4" = 2,
+ @"8" = 3,
+ @"16" = 4,
+ @"32" = 5,
+ @"64" = 6,
+ _,
+
+ pub fn toByteUnits(a: Alignment) usize {
+ return @as(usize, 1) << @intFromEnum(a);
+ }
+
+ pub fn fromByteUnits(n: usize) Alignment {
+ assert(std.math.isPowerOfTwo(n));
+ return @enumFromInt(@ctz(n));
+ }
+
+ pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
+ return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
+ }
+
+ pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
+ return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs));
+ }
+
+ pub fn max(lhs: Alignment, rhs: Alignment) Alignment {
+ return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs)));
+ }
+
+ pub fn min(lhs: Alignment, rhs: Alignment) Alignment {
+ return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs)));
+ }
+
+ /// Return next address with this alignment.
+ pub fn forward(a: Alignment, address: usize) usize {
+ const x = (@as(usize, 1) << @intFromEnum(a)) - 1;
+ return (address + x) & ~x;
+ }
+
+ /// Return previous address with this alignment.
+ pub fn backward(a: Alignment, address: usize) usize {
+ const x = (@as(usize, 1) << @intFromEnum(a)) - 1;
+ return address & ~x;
+ }
+
+ /// Return whether address is aligned to this amount.
+ pub fn check(a: Alignment, address: usize) bool {
+ return @ctz(address) >= @intFromEnum(a);
+ }
+};
+
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
/// or the allocator.
pub fn ValidationAllocator(comptime T: type) type {
@@ -58,6 +92,7 @@ pub fn ValidationAllocator(comptime T: type) type {
.vtable = &.{
.alloc = alloc,
.resize = resize,
+ .remap = remap,
.free = free,
},
};
@@ -71,41 +106,54 @@ pub fn ValidationAllocator(comptime T: type) type {
pub fn alloc(
ctx: *anyopaque,
n: usize,
- log2_ptr_align: u8,
+ alignment: mem.Alignment,
ret_addr: usize,
) ?[*]u8 {
assert(n > 0);
const self: *Self = @ptrCast(@alignCast(ctx));
const underlying = self.getUnderlyingAllocatorPtr();
- const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
+ const result = underlying.rawAlloc(n, alignment, ret_addr) orelse
return null;
- assert(mem.isAlignedLog2(@intFromPtr(result), log2_ptr_align));
+ assert(alignment.check(@intFromPtr(result)));
return result;
}
pub fn resize(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: Alignment,
new_len: usize,
ret_addr: usize,
) bool {
const self: *Self = @ptrCast(@alignCast(ctx));
assert(buf.len > 0);
const underlying = self.getUnderlyingAllocatorPtr();
- return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
+ return underlying.rawResize(buf, alignment, new_len, ret_addr);
+ }
+
+ pub fn remap(
+ ctx: *anyopaque,
+ buf: []u8,
+ alignment: Alignment,
+ new_len: usize,
+ ret_addr: usize,
+ ) ?[*]u8 {
+ const self: *Self = @ptrCast(@alignCast(ctx));
+ assert(buf.len > 0);
+ const underlying = self.getUnderlyingAllocatorPtr();
+ return underlying.rawRemap(buf, alignment, new_len, ret_addr);
}
pub fn free(
ctx: *anyopaque,
buf: []u8,
- log2_buf_align: u8,
+ alignment: Alignment,
ret_addr: usize,
) void {
const self: *Self = @ptrCast(@alignCast(ctx));
assert(buf.len > 0);
const underlying = self.getUnderlyingAllocatorPtr();
- underlying.rawFree(buf, log2_buf_align, ret_addr);
+ underlying.rawFree(buf, alignment, ret_addr);
}
pub fn reset(self: *Self) void {
@@ -133,27 +181,9 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
return adjusted;
}
-const fail_allocator = Allocator{
- .ptr = undefined,
- .vtable = &failAllocator_vtable,
-};
-
-const failAllocator_vtable = Allocator.VTable{
- .alloc = failAllocatorAlloc,
- .resize = Allocator.noResize,
- .free = Allocator.noFree,
-};
-
-fn failAllocatorAlloc(_: *anyopaque, n: usize, log2_alignment: u8, ra: usize) ?[*]u8 {
- _ = n;
- _ = log2_alignment;
- _ = ra;
- return null;
-}
-
test "Allocator basics" {
- try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1));
- try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0));
+ try testing.expectError(error.OutOfMemory, testing.failing_allocator.alloc(u8, 1));
+ try testing.expectError(error.OutOfMemory, testing.failing_allocator.allocSentinel(u8, 1, 0));
}
test "Allocator.resize" {
@@ -1068,16 +1098,18 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
// as we don't read into a new page. This should be the case for most architectures
// which use paged memory, however should be confirmed before adding a new arch below.
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
+ const page_size = std.heap.pageSize();
const block_size = @sizeOf(T) * block_len;
const Block = @Vector(block_len, T);
const mask: Block = @splat(sentinel);
- comptime std.debug.assert(std.mem.page_size % block_size == 0);
+ comptime assert(std.heap.page_size_max % @sizeOf(Block) == 0);
+ assert(page_size % @sizeOf(Block) == 0);
// First block may be unaligned
const start_addr = @intFromPtr(&p[i]);
- const offset_in_page = start_addr & (std.mem.page_size - 1);
- if (offset_in_page <= std.mem.page_size - block_size) {
+ const offset_in_page = start_addr & (page_size - 1);
+ if (offset_in_page <= page_size - @sizeOf(Block)) {
// Will not read past the end of a page, full block.
const block: Block = p[i..][0..block_len].*;
const matches = block == mask;
@@ -1097,7 +1129,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
}
}
- std.debug.assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
+ assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
while (true) {
const block: *const Block = @ptrCast(@alignCast(p[i..][0..block_len]));
const matches = block.* == mask;
@@ -1120,23 +1152,24 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
test "indexOfSentinel vector paths" {
const Types = [_]type{ u8, u16, u32, u64 };
const allocator = std.testing.allocator;
+ const page_size = std.heap.pageSize();
inline for (Types) |T| {
const block_len = std.simd.suggestVectorLength(T) orelse continue;
// Allocate three pages so we guarantee a page-crossing address with a full page after
- const memory = try allocator.alloc(T, 3 * std.mem.page_size / @sizeOf(T));
+ const memory = try allocator.alloc(T, 3 * page_size / @sizeOf(T));
defer allocator.free(memory);
@memset(memory, 0xaa);
// Find starting page-alignment = 0
var start: usize = 0;
const start_addr = @intFromPtr(&memory);
- start += (std.mem.alignForward(usize, start_addr, std.mem.page_size) - start_addr) / @sizeOf(T);
- try testing.expect(start < std.mem.page_size / @sizeOf(T));
+ start += (std.mem.alignForward(usize, start_addr, page_size) - start_addr) / @sizeOf(T);
+ try testing.expect(start < page_size / @sizeOf(T));
// Validate all sub-block alignments
- const search_len = std.mem.page_size / @sizeOf(T);
+ const search_len = page_size / @sizeOf(T);
memory[start + search_len] = 0;
for (0..block_len) |offset| {
try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
@@ -1144,7 +1177,7 @@ test "indexOfSentinel vector paths" {
memory[start + search_len] = 0xaa;
// Validate page boundary crossing
- const start_page_boundary = start + (std.mem.page_size / @sizeOf(T));
+ const start_page_boundary = start + (page_size / @sizeOf(T));
memory[start_page_boundary + block_len] = 0;
for (0..block_len) |offset| {
try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index bc3ef028b9..1ad9533116 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -6,29 +6,34 @@ const math = std.math;
const mem = std.mem;
const Allocator = @This();
const builtin = @import("builtin");
+const Alignment = std.mem.Alignment;
pub const Error = error{OutOfMemory};
pub const Log2Align = math.Log2Int(usize);
/// The type erased pointer to the allocator implementation.
-/// Any comparison of this field may result in illegal behavior, since it may be set to
-/// `undefined` in cases where the allocator implementation does not have any associated
-/// state.
+///
+/// Any comparison of this field may result in illegal behavior, since it may
+/// be set to `undefined` in cases where the allocator implementation does not
+/// have any associated state.
ptr: *anyopaque,
vtable: *const VTable,
pub const VTable = struct {
- /// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`.
+ /// Return a pointer to `len` bytes with specified `alignment`, or return
+ /// `null` indicating the allocation failed.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
- alloc: *const fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8,
+ alloc: *const fn (*anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8,
- /// Attempt to expand or shrink memory in place. `buf.len` must equal the
- /// length requested from the most recent successful call to `alloc` or
- /// `resize`. `buf_align` must equal the same value that was passed as the
- /// `ptr_align` parameter to the original `alloc` call.
+ /// Attempt to expand or shrink memory in place.
+ ///
+ /// `memory.len` must equal the length requested from the most recent
+ /// successful call to `alloc`, `resize`, or `remap`. `alignment` must
+ /// equal the same value that was passed as the `alignment` parameter to
+ /// the original `alloc` call.
///
/// A result of `true` indicates the resize was successful and the
/// allocation now has the same address but a size of `new_len`. `false`
@@ -40,72 +45,113 @@ pub const VTable = struct {
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
- resize: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool,
+ resize: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool,
- /// Free and invalidate a buffer.
+ /// Attempt to expand or shrink memory, allowing relocation.
+ ///
+ /// `memory.len` must equal the length requested from the most recent
+ /// successful call to `alloc`, `resize`, or `remap`. `alignment` must
+ /// equal the same value that was passed as the `alignment` parameter to
+ /// the original `alloc` call.
+ ///
+ /// A non-`null` return value indicates the resize was successful. The
+ /// allocation may have same address, or may have been relocated. In either
+ /// case, the allocation now has size of `new_len`. A `null` return value
+ /// indicates that the resize would be equivalent to allocating new memory,
+ /// copying the bytes from the old memory, and then freeing the old memory.
+ /// In such case, it is more efficient for the caller to perform the copy.
+ ///
+ /// `new_len` must be greater than zero.
///
- /// `buf.len` must equal the most recent length returned by `alloc` or
- /// given to a successful `resize` call.
+ /// `ret_addr` is optionally provided as the first return address of the
+ /// allocation call stack. If the value is `0` it means no return address
+ /// has been provided.
+ remap: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8,
+
+ /// Free and invalidate a region of memory.
///
- /// `buf_align` must equal the same value that was passed as the
- /// `ptr_align` parameter to the original `alloc` call.
+ /// `memory.len` must equal the length requested from the most recent
+ /// successful call to `alloc`, `resize`, or `remap`. `alignment` must
+ /// equal the same value that was passed as the `alignment` parameter to
+ /// the original `alloc` call.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
- free: *const fn (ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void,
+ free: *const fn (*anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void,
};
pub fn noResize(
self: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
+ memory: []u8,
+ alignment: Alignment,
new_len: usize,
ret_addr: usize,
) bool {
_ = self;
- _ = buf;
- _ = log2_buf_align;
+ _ = memory;
+ _ = alignment;
_ = new_len;
_ = ret_addr;
return false;
}
+pub fn noRemap(
+ self: *anyopaque,
+ memory: []u8,
+ alignment: Alignment,
+ new_len: usize,
+ ret_addr: usize,
+) ?[*]u8 {
+ _ = self;
+ _ = memory;
+ _ = alignment;
+ _ = new_len;
+ _ = ret_addr;
+ return null;
+}
+
pub fn noFree(
self: *anyopaque,
- buf: []u8,
- log2_buf_align: u8,
+ memory: []u8,
+ alignment: Alignment,
ret_addr: usize,
) void {
_ = self;
- _ = buf;
- _ = log2_buf_align;
+ _ = memory;
+ _ = alignment;
_ = ret_addr;
}
/// This function is not intended to be called except from within the
-/// implementation of an Allocator
-pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
- return self.vtable.alloc(self.ptr, len, ptr_align, ret_addr);
+/// implementation of an `Allocator`.
+pub inline fn rawAlloc(a: Allocator, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
+ return a.vtable.alloc(a.ptr, len, alignment, ret_addr);
}
/// This function is not intended to be called except from within the
-/// implementation of an Allocator
-pub inline fn rawResize(self: Allocator, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
- return self.vtable.resize(self.ptr, buf, log2_buf_align, new_len, ret_addr);
+/// implementation of an `Allocator`.
+pub inline fn rawResize(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
+ return a.vtable.resize(a.ptr, memory, alignment, new_len, ret_addr);
}
/// This function is not intended to be called except from within the
-/// implementation of an Allocator
-pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
- return self.vtable.free(self.ptr, buf, log2_buf_align, ret_addr);
+/// implementation of an `Allocator`.
+pub inline fn rawRemap(a: Allocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
+ return a.vtable.remap(a.ptr, memory, alignment, new_len, ret_addr);
+}
+
+/// This function is not intended to be called except from within the
+/// implementation of an `Allocator`.
+pub inline fn rawFree(a: Allocator, memory: []u8, alignment: Alignment, ret_addr: usize) void {
+ return a.vtable.free(a.ptr, memory, alignment, ret_addr);
}
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
-pub fn create(self: Allocator, comptime T: type) Error!*T {
+pub fn create(a: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
- const ptr: *T = @ptrCast(try self.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
+ const ptr: *T = @ptrCast(try a.allocBytesWithAlignment(@alignOf(T), @sizeOf(T), @returnAddress()));
return ptr;
}
@@ -117,7 +163,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
- self.rawFree(non_const_ptr[0..@sizeOf(T)], log2a(info.alignment), @returnAddress());
+ self.rawFree(non_const_ptr[0..@sizeOf(T)], .fromByteUnits(info.alignment), @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
@@ -215,46 +261,92 @@ fn allocWithSizeAndAlignment(self: Allocator, comptime size: usize, comptime ali
}
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
- // The Zig Allocator interface is not intended to solve alignments beyond
- // the minimum OS page size. For these use cases, the caller must use OS
- // APIs directly.
- comptime assert(alignment <= mem.page_size);
-
if (byte_count == 0) {
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
}
- const byte_ptr = self.rawAlloc(byte_count, log2a(alignment), return_address) orelse return Error.OutOfMemory;
- // TODO: https://github.com/ziglang/zig/issues/4298
+ const byte_ptr = self.rawAlloc(byte_count, .fromByteUnits(alignment), return_address) orelse return Error.OutOfMemory;
@memset(byte_ptr[0..byte_count], undefined);
- return @as([*]align(alignment) u8, @alignCast(byte_ptr));
+ return @alignCast(byte_ptr);
}
-/// Requests to modify the size of an allocation. It is guaranteed to not move
-/// the pointer, however the allocator implementation may refuse the resize
-/// request by returning `false`.
-pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) bool {
- const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
+/// Request to modify the size of an allocation.
+///
+/// It is guaranteed to not move the pointer, however the allocator
+/// implementation may refuse the resize request by returning `false`.
+///
+/// `allocation` may be an empty slice, in which case a new allocation is made.
+///
+/// `new_len` may be zero, in which case the allocation is freed.
+pub fn resize(self: Allocator, allocation: anytype, new_len: usize) bool {
+ const Slice = @typeInfo(@TypeOf(allocation)).pointer;
const T = Slice.child;
- if (new_n == 0) {
- self.free(old_mem);
+ const alignment = Slice.alignment;
+ if (new_len == 0) {
+ self.free(allocation);
return true;
}
- if (old_mem.len == 0) {
+ if (allocation.len == 0) {
return false;
}
- const old_byte_slice = mem.sliceAsBytes(old_mem);
+ const old_memory = mem.sliceAsBytes(allocation);
+ // I would like to use saturating multiplication here, but LLVM cannot lower it
+ // on WebAssembly: https://github.com/ziglang/zig/issues/9660
+ //const new_len_bytes = new_len *| @sizeOf(T);
+ const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return false;
+ return self.rawResize(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress());
+}
+
+/// Request to modify the size of an allocation, allowing relocation.
+///
+/// A non-`null` return value indicates the resize was successful. The
+/// allocation may have same address, or may have been relocated. In either
+/// case, the allocation now has size of `new_len`. A `null` return value
+/// indicates that the resize would be equivalent to allocating new memory,
+/// copying the bytes from the old memory, and then freeing the old memory.
+/// In such case, it is more efficient for the caller to perform those
+/// operations.
+///
+/// `allocation` may be an empty slice, in which case a new allocation is made.
+///
+/// `new_len` may be zero, in which case the allocation is freed.
+pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: {
+ const Slice = @typeInfo(@TypeOf(allocation)).pointer;
+ break :t ?[]align(Slice.alignment) Slice.child;
+} {
+ const Slice = @typeInfo(@TypeOf(allocation)).pointer;
+ const T = Slice.child;
+ const alignment = Slice.alignment;
+ if (new_len == 0) {
+ self.free(allocation);
+ return allocation[0..0];
+ }
+ if (allocation.len == 0) {
+ return null;
+ }
+ const old_memory = mem.sliceAsBytes(allocation);
// I would like to use saturating multiplication here, but LLVM cannot lower it
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
- //const new_byte_count = new_n *| @sizeOf(T);
- const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return false;
- return self.rawResize(old_byte_slice, log2a(Slice.alignment), new_byte_count, @returnAddress());
+ //const new_len_bytes = new_len *| @sizeOf(T);
+ const new_len_bytes = math.mul(usize, @sizeOf(T), new_len) catch return null;
+ const new_ptr = self.rawRemap(old_memory, .fromByteUnits(alignment), new_len_bytes, @returnAddress()) orelse return null;
+ const new_memory: []align(alignment) u8 = @alignCast(new_ptr[0..new_len_bytes]);
+ return mem.bytesAsSlice(T, new_memory);
}
/// This function requests a new byte size for an existing allocation, which
/// can be larger, smaller, or the same size as the old memory allocation.
+///
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
+///
+/// `old_mem` may have length zero, which makes a new allocation.
+///
+/// This function only fails on out-of-memory conditions, unlike:
+/// * `remap` which returns `null` when the `Allocator` implementation cannot
+/// do the realloc more efficiently than the caller
+/// * `resize` which returns `false` when the `Allocator` implementation cannot
+/// change the size without relocating the allocation.
pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
break :t Error![]align(Slice.alignment) Slice.child;
@@ -285,18 +377,17 @@ pub fn reallocAdvanced(
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
- if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
- const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]);
+ if (self.rawRemap(old_byte_slice, .fromByteUnits(Slice.alignment), byte_count, return_address)) |p| {
+ const new_bytes: []align(Slice.alignment) u8 = @alignCast(p[0..byte_count]);
return mem.bytesAsSlice(T, new_bytes);
}
- const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
+ const new_mem = self.rawAlloc(byte_count, .fromByteUnits(Slice.alignment), return_address) orelse
return error.OutOfMemory;
const copy_len = @min(byte_count, old_byte_slice.len);
@memcpy(new_mem[0..copy_len], old_byte_slice[0..copy_len]);
- // TODO https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice, undefined);
- self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
+ self.rawFree(old_byte_slice, .fromByteUnits(Slice.alignment), return_address);
const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]);
return mem.bytesAsSlice(T, new_bytes);
@@ -311,9 +402,8 @@ pub fn free(self: Allocator, memory: anytype) void {
const bytes_len = bytes.len + if (Slice.sentinel() != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @constCast(bytes.ptr);
- // TODO: https://github.com/ziglang/zig/issues/4298
@memset(non_const_ptr[0..bytes_len], undefined);
- self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
+ self.rawFree(non_const_ptr[0..bytes_len], .fromByteUnits(Slice.alignment), @returnAddress());
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@@ -330,17 +420,3 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) Error![:0]T {
new_buf[m.len] = 0;
return new_buf[0..m.len :0];
}
-
-/// TODO replace callsites with `@log2` after this proposal is implemented:
-/// https://github.com/ziglang/zig/issues/13642
-inline fn log2a(x: anytype) switch (@typeInfo(@TypeOf(x))) {
- .int => math.Log2Int(@TypeOf(x)),
- .comptime_int => comptime_int,
- else => @compileError("int please"),
-} {
- switch (@typeInfo(@TypeOf(x))) {
- .int => return math.log2_int(@TypeOf(x), x),
- .comptime_int => return math.log2(x),
- else => @compileError("bad"),
- }
-}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 365fb9f05f..1183408694 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -305,6 +305,13 @@ pub const MAP = switch (native_arch) {
else => @compileError("missing std.os.linux.MAP constants for this architecture"),
};
+pub const MREMAP = packed struct(u32) {
+ MAYMOVE: bool = false,
+ FIXED: bool = false,
+ DONTUNMAP: bool = false,
+ _: u29 = 0,
+};
+
pub const O = switch (native_arch) {
.x86_64 => packed struct(u32) {
ACCMODE: ACCMODE = .RDONLY,
@@ -892,10 +899,6 @@ pub fn umount2(special: [*:0]const u8, flags: u32) usize {
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, offset: i64) usize {
if (@hasField(SYS, "mmap2")) {
- // Make sure the offset is also specified in multiples of page size
- if ((offset & (MMAP2_UNIT - 1)) != 0)
- return @bitCast(-@as(isize, @intFromEnum(E.INVAL)));
-
return syscall6(
.mmap2,
@intFromPtr(address),
@@ -934,6 +937,17 @@ pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize {
return syscall3(.mprotect, @intFromPtr(address), length, protection);
}
+pub fn mremap(old_addr: ?[*]const u8, old_len: usize, new_len: usize, flags: MREMAP, new_addr: ?[*]const u8) usize {
+ return syscall5(
+ .mremap,
+ @intFromPtr(old_addr),
+ old_len,
+ new_len,
+ @as(u32, @bitCast(flags)),
+ @intFromPtr(new_addr),
+ );
+}
+
pub const MSF = struct {
pub const ASYNC = 1;
pub const INVALIDATE = 2;
diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig
index 731877e5ae..2e43709075 100644
--- a/lib/std/os/linux/IoUring.zig
+++ b/lib/std/os/linux/IoUring.zig
@@ -8,6 +8,7 @@ const posix = std.posix;
const linux = std.os.linux;
const testing = std.testing;
const is_linux = builtin.os.tag == .linux;
+const page_size_min = std.heap.page_size_min;
fd: posix.fd_t = -1,
sq: SubmissionQueue,
@@ -1341,8 +1342,8 @@ pub const SubmissionQueue = struct {
dropped: *u32,
array: []u32,
sqes: []linux.io_uring_sqe,
- mmap: []align(mem.page_size) u8,
- mmap_sqes: []align(mem.page_size) u8,
+ mmap: []align(page_size_min) u8,
+ mmap_sqes: []align(page_size_min) u8,
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
@@ -1460,7 +1461,7 @@ pub const BufferGroup = struct {
/// Pointer to the memory shared by the kernel.
/// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
/// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
- br: *align(mem.page_size) linux.io_uring_buf_ring,
+ br: *align(page_size_min) linux.io_uring_buf_ring,
/// Contiguous block of memory of size (buffers_count * buffer_size).
buffers: []u8,
/// Size of each buffer in buffers.
@@ -1555,7 +1556,7 @@ pub const BufferGroup = struct {
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
-pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.page_size) linux.io_uring_buf_ring {
+pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(page_size_min) linux.io_uring_buf_ring {
if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
@@ -1571,7 +1572,7 @@ pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(mem.p
errdefer posix.munmap(mmap);
assert(mmap.len == mmap_size);
- const br: *align(mem.page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
+ const br: *align(page_size_min) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
return br;
}
@@ -1613,9 +1614,9 @@ fn handle_register_buf_ring_result(res: usize) !void {
}
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
-pub fn free_buf_ring(fd: posix.fd_t, br: *align(mem.page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
+pub fn free_buf_ring(fd: posix.fd_t, br: *align(page_size_min) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
unregister_buf_ring(fd, group_id) catch {};
- var mmap: []align(mem.page_size) u8 = undefined;
+ var mmap: []align(page_size_min) u8 = undefined;
mmap.ptr = @ptrCast(br);
mmap.len = entries * @sizeOf(linux.io_uring_buf);
posix.munmap(mmap);
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
index d1292e86dd..3180b04d2b 100644
--- a/lib/std/os/linux/tls.zig
+++ b/lib/std/os/linux/tls.zig
@@ -17,6 +17,7 @@ const assert = std.debug.assert;
const native_arch = @import("builtin").cpu.arch;
const linux = std.os.linux;
const posix = std.posix;
+const page_size_min = std.heap.page_size_min;
/// Represents an ELF TLS variant.
///
@@ -484,13 +485,13 @@ pub fn prepareArea(area: []u8) usize {
};
}
-// The main motivation for the size chosen here is that this is how much ends up being requested for
-// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
-// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
-// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
-// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
-// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
-var main_thread_area_buffer: [0x2100]u8 align(mem.page_size) = undefined;
+/// The main motivation for the size chosen here is that this is how much ends up being requested for
+/// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
+/// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
+/// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
+/// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
+/// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
+var main_thread_area_buffer: [0x2100]u8 align(page_size_min) = undefined;
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
/// and assigns the architecture-specific value to the TP register.
@@ -503,7 +504,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
const area = blk: {
// Fast path for the common case where the TLS data is really small, avoid an allocation and
// use our local buffer.
- if (area_desc.alignment <= mem.page_size and area_desc.size <= main_thread_area_buffer.len) {
+ if (area_desc.alignment <= page_size_min and area_desc.size <= main_thread_area_buffer.len) {
break :blk main_thread_area_buffer[0..area_desc.size];
}
@@ -517,7 +518,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
);
if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
- const area_ptr: [*]align(mem.page_size) u8 = @ptrFromInt(begin_addr);
+ const area_ptr: [*]align(page_size_min) u8 = @ptrFromInt(begin_addr);
// Make sure the slice is correctly aligned.
const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig
index 90f45ef7fb..1882eda476 100644
--- a/lib/std/os/plan9.zig
+++ b/lib/std/os/plan9.zig
@@ -367,8 +367,8 @@ pub fn sbrk(n: usize) usize {
bloc = @intFromPtr(&ExecData.end);
bloc_max = @intFromPtr(&ExecData.end);
}
- const bl = std.mem.alignForward(usize, bloc, std.mem.page_size);
- const n_aligned = std.mem.alignForward(usize, n, std.mem.page_size);
+ const bl = std.mem.alignForward(usize, bloc, std.heap.pageSize());
+ const n_aligned = std.mem.alignForward(usize, n, std.heap.pageSize());
if (bl + n_aligned > bloc_max) {
// we need to allocate
if (brk_(bl + n_aligned) < 0) return 0;
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index d3db40bac2..563b24cf83 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -2016,18 +2016,6 @@ pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter
assert(kernel32.InitOnceExecuteOnce(InitOnce, InitFn, Parameter, Context) != 0);
}
-pub fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *anyopaque) void {
- assert(kernel32.HeapFree(hHeap, dwFlags, lpMem) != 0);
-}
-
-pub fn HeapDestroy(hHeap: HANDLE) void {
- assert(kernel32.HeapDestroy(hHeap) != 0);
-}
-
-pub fn LocalFree(hMem: HLOCAL) void {
- assert(kernel32.LocalFree(hMem) == null);
-}
-
pub const SetFileTimeError = error{Unexpected};
pub fn SetFileTime(
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index f2da7957a8..6110859608 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -42,6 +42,7 @@ const WCHAR = windows.WCHAR;
const WIN32_FIND_DATAW = windows.WIN32_FIND_DATAW;
const Win32Error = windows.Win32Error;
const WORD = windows.WORD;
+const SYSTEM_INFO = windows.SYSTEM_INFO;
// I/O - Filesystem
@@ -527,11 +528,6 @@ pub extern "kernel32" fn HeapCreate(
dwMaximumSize: SIZE_T,
) callconv(.winapi) ?HANDLE;
-// TODO: Wrapper around RtlDestroyHeap (BOOLEAN -> BOOL).
-pub extern "kernel32" fn HeapDestroy(
- hHeap: HANDLE,
-) callconv(.winapi) BOOL;
-
// TODO: Forwarder to RtlReAllocateHeap.
pub extern "kernel32" fn HeapReAlloc(
hHeap: HANDLE,
@@ -584,10 +580,6 @@ pub extern "kernel32" fn VirtualQuery(
dwLength: SIZE_T,
) callconv(.winapi) SIZE_T;
-pub extern "kernel32" fn LocalFree(
- hMem: HLOCAL,
-) callconv(.winapi) ?HLOCAL;
-
// TODO: Getter for peb.ProcessHeap
pub extern "kernel32" fn GetProcessHeap() callconv(.winapi) ?HANDLE;
@@ -667,6 +659,6 @@ pub extern "kernel32" fn SetLastError(
// TODO:
// Wrapper around KUSER_SHARED_DATA.SystemTime.
// Much better to use NtQuerySystemTime or NtQuerySystemTimePrecise for guaranteed 0.1ns precision.
-pub extern "kernel32" fn GetSystemTimeAsFileTime(
- lpSystemTimeAsFileTime: *FILETIME,
-) callconv(.winapi) void;
+pub extern "kernel32" fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: *FILETIME) callconv(.winapi) void;
+
+pub extern "kernel32" fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) callconv(.winapi) void;
diff --git a/lib/std/posix.zig b/lib/std/posix.zig
index c9e67f7d38..a1409705bf 100644
--- a/lib/std/posix.zig
+++ b/lib/std/posix.zig
@@ -24,6 +24,7 @@ const maxInt = std.math.maxInt;
const cast = std.math.cast;
const assert = std.debug.assert;
const native_os = builtin.os.tag;
+const page_size_min = std.heap.page_size_min;
test {
_ = @import("posix/test.zig");
@@ -82,6 +83,7 @@ pub const MAP = system.MAP;
pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN;
pub const MFD = system.MFD;
pub const MMAP2_UNIT = system.MMAP2_UNIT;
+pub const MREMAP = system.MREMAP;
pub const MSF = system.MSF;
pub const MSG = system.MSG;
pub const NAME_MAX = system.NAME_MAX;
@@ -4694,7 +4696,7 @@ pub const MProtectError = error{
OutOfMemory,
} || UnexpectedError;
-pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
+pub fn mprotect(memory: []align(page_size_min) u8, protection: u32) MProtectError!void {
if (native_os == .windows) {
const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
0b000 => windows.PAGE_NOACCESS,
@@ -4759,21 +4761,21 @@ pub const MMapError = error{
/// * SIGSEGV - Attempted write into a region mapped as read-only.
/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file
pub fn mmap(
- ptr: ?[*]align(mem.page_size) u8,
+ ptr: ?[*]align(page_size_min) u8,
length: usize,
prot: u32,
flags: system.MAP,
fd: fd_t,
offset: u64,
-) MMapError![]align(mem.page_size) u8 {
+) MMapError![]align(page_size_min) u8 {
const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
const err: E = if (builtin.link_libc) blk: {
- if (rc != std.c.MAP_FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
+ if (rc != std.c.MAP_FAILED) return @as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..length];
break :blk @enumFromInt(system._errno().*);
} else blk: {
const err = errno(rc);
- if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length];
+ if (err == .SUCCESS) return @as([*]align(page_size_min) u8, @ptrFromInt(rc))[0..length];
break :blk err;
};
switch (err) {
@@ -4799,7 +4801,7 @@ pub fn mmap(
/// Zig's munmap function does not, for two reasons:
/// * It violates the Zig principle that resource deallocation must succeed.
/// * The Windows function, VirtualFree, has this restriction.
-pub fn munmap(memory: []align(mem.page_size) const u8) void {
+pub fn munmap(memory: []align(page_size_min) const u8) void {
switch (errno(system.munmap(memory.ptr, memory.len))) {
.SUCCESS => return,
.INVAL => unreachable, // Invalid parameters.
@@ -4808,12 +4810,46 @@ pub fn munmap(memory: []align(mem.page_size) const u8) void {
}
}
+pub const MRemapError = error{
+ LockedMemoryLimitExceeded,
+ /// Either a bug in the calling code, or the operating system abused the
+ /// EINVAL error code.
+ InvalidSyscallParameters,
+ OutOfMemory,
+} || UnexpectedError;
+
+pub fn mremap(
+ old_address: ?[*]align(page_size_min) u8,
+ old_len: usize,
+ new_len: usize,
+ flags: system.MREMAP,
+ new_address: ?[*]align(page_size_min) u8,
+) MRemapError![]align(page_size_min) u8 {
+ const rc = system.mremap(old_address, old_len, new_len, flags, new_address);
+ const err: E = if (builtin.link_libc) blk: {
+ if (rc != std.c.MAP_FAILED) return @as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..new_len];
+ break :blk @enumFromInt(system._errno().*);
+ } else blk: {
+ const err = errno(rc);
+ if (err == .SUCCESS) return @as([*]align(page_size_min) u8, @ptrFromInt(rc))[0..new_len];
+ break :blk err;
+ };
+ switch (err) {
+ .SUCCESS => unreachable,
+ .AGAIN => return error.LockedMemoryLimitExceeded,
+ .INVAL => return error.InvalidSyscallParameters,
+ .NOMEM => return error.OutOfMemory,
+ .FAULT => unreachable,
+ else => return unexpectedErrno(err),
+ }
+}
+
pub const MSyncError = error{
UnmappedMemory,
PermissionDenied,
} || UnexpectedError;
-pub fn msync(memory: []align(mem.page_size) u8, flags: i32) MSyncError!void {
+pub fn msync(memory: []align(page_size_min) u8, flags: i32) MSyncError!void {
switch (errno(system.msync(memory.ptr, memory.len, flags))) {
.SUCCESS => return,
.PERM => return error.PermissionDenied,
@@ -7135,7 +7171,7 @@ pub const MincoreError = error{
} || UnexpectedError;
/// Determine whether pages are resident in memory.
-pub fn mincore(ptr: [*]align(mem.page_size) u8, length: usize, vec: [*]u8) MincoreError!void {
+pub fn mincore(ptr: [*]align(page_size_min) u8, length: usize, vec: [*]u8) MincoreError!void {
return switch (errno(system.mincore(ptr, length, vec))) {
.SUCCESS => {},
.AGAIN => error.SystemResources,
@@ -7181,7 +7217,7 @@ pub const MadviseError = error{
/// Give advice about use of memory.
/// This syscall is optional and is sometimes configured to be disabled.
-pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void {
+pub fn madvise(ptr: [*]align(page_size_min) u8, length: usize, advice: u32) MadviseError!void {
switch (errno(system.madvise(ptr, length, advice))) {
.SUCCESS => return,
.PERM => return error.PermissionDenied,
diff --git a/lib/std/process.zig b/lib/std/process.zig
index bc798e68e0..dd08e88af2 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -1560,7 +1560,7 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
ReadGroupId,
};
- var buf: [std.mem.page_size]u8 = undefined;
+ var buf: [std.heap.page_size_min]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
var uid: posix.uid_t = 0;
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 8b0d6183cf..a91df35700 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -576,7 +576,7 @@ fn expandStackSize(phdrs: []elf.Phdr) void {
switch (phdr.p_type) {
elf.PT_GNU_STACK => {
if (phdr.p_memsz == 0) break;
- assert(phdr.p_memsz % std.mem.page_size == 0);
+ assert(phdr.p_memsz % std.heap.page_size_min == 0);
// Silently fail if we are unable to get limits.
const limits = std.posix.getrlimit(.STACK) catch break;
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 5c997aebaf..558710015c 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -119,6 +119,13 @@ pub const Options = struct {
args: anytype,
) void = log.defaultLog,
+ /// Overrides `std.heap.page_size_min`.
+ page_size_min: ?usize = null,
+ /// Overrides `std.heap.page_size_max`.
+ page_size_max: ?usize = null,
+ /// Overrides default implementation for determining OS page size at runtime.
+ queryPageSize: fn () usize = heap.defaultQueryPageSize,
+
fmt_max_depth: usize = fmt.default_max_depth,
cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed,
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 1c109c48e2..288c6cdc89 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -7,21 +7,27 @@ const math = std.math;
/// Initialized on startup. Read-only after that.
pub var random_seed: u32 = 0;
-pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
+pub const FailingAllocator = @import("testing/FailingAllocator.zig");
+pub const failing_allocator = failing_allocator_instance.allocator();
+var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), .{
+ .fail_index = 0,
+});
+var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
/// This should only be used in temporary test programs.
pub const allocator = allocator_instance.allocator();
-pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = b: {
- if (!builtin.is_test)
- @compileError("Cannot use testing allocator outside of test block");
+pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{
+ .stack_trace_frames = if (std.debug.sys_can_stack_trace) 10 else 0,
+ .resize_stack_traces = true,
+ // A unique value so that when a default-constructed
+ // GeneralPurposeAllocator is incorrectly passed to testing allocator, or
+ // vice versa, panic occurs.
+ .canary = @truncate(0x2731e675c3a701ba),
+}) = b: {
+ if (!builtin.is_test) @compileError("testing allocator used when not testing");
break :b .init;
};
-pub const failing_allocator = failing_allocator_instance.allocator();
-pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), .{ .fail_index = 0 });
-
-pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
-
/// TODO https://github.com/ziglang/zig/issues/5738
pub var log_level = std.log.Level.warn;
diff --git a/lib/std/testing/FailingAllocator.zig b/lib/std/testing/FailingAllocator.zig
new file mode 100644
index 0000000000..c7767ae02f
--- /dev/null
+++ b/lib/std/testing/FailingAllocator.zig
@@ -0,0 +1,161 @@
+//! Allocator that fails after N allocations, useful for making sure out of
+//! memory conditions are handled correctly.
+//!
+//! To use this, first initialize it and get an allocator with
+//!
+//! `const failing_allocator = &FailingAllocator.init(<allocator>,
+//! <config>).allocator;`
+//!
+//! Then use `failing_allocator` anywhere you would have used a
+//! different allocator.
+const std = @import("../std.zig");
+const mem = std.mem;
+const FailingAllocator = @This();
+
+alloc_index: usize,
+resize_index: usize,
+internal_allocator: mem.Allocator,
+allocated_bytes: usize,
+freed_bytes: usize,
+allocations: usize,
+deallocations: usize,
+stack_addresses: [num_stack_frames]usize,
+has_induced_failure: bool,
+fail_index: usize,
+resize_fail_index: usize,
+
+const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0;
+
+pub const Config = struct {
+ /// The number of successful allocations you can expect from this allocator.
+ /// The next allocation will fail. For example, with `fail_index` equal to
+ /// 2, the following test will pass:
+ ///
+ /// var a = try failing_alloc.create(i32);
+ /// var b = try failing_alloc.create(i32);
+ /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
+ fail_index: usize = std.math.maxInt(usize),
+
+ /// Number of successful resizes to expect from this allocator. The next resize will fail.
+ resize_fail_index: usize = std.math.maxInt(usize),
+};
+
+pub fn init(internal_allocator: mem.Allocator, config: Config) FailingAllocator {
+ return FailingAllocator{
+ .internal_allocator = internal_allocator,
+ .alloc_index = 0,
+ .resize_index = 0,
+ .allocated_bytes = 0,
+ .freed_bytes = 0,
+ .allocations = 0,
+ .deallocations = 0,
+ .stack_addresses = undefined,
+ .has_induced_failure = false,
+ .fail_index = config.fail_index,
+ .resize_fail_index = config.resize_fail_index,
+ };
+}
+
+pub fn allocator(self: *FailingAllocator) mem.Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .remap = remap,
+ .free = free,
+ },
+ };
+}
+
+fn alloc(
+ ctx: *anyopaque,
+ len: usize,
+ alignment: mem.Alignment,
+ return_address: usize,
+) ?[*]u8 {
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
+ if (self.alloc_index == self.fail_index) {
+ if (!self.has_induced_failure) {
+ @memset(&self.stack_addresses, 0);
+ var stack_trace = std.builtin.StackTrace{
+ .instruction_addresses = &self.stack_addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(return_address, &stack_trace);
+ self.has_induced_failure = true;
+ }
+ return null;
+ }
+ const result = self.internal_allocator.rawAlloc(len, alignment, return_address) orelse
+ return null;
+ self.allocated_bytes += len;
+ self.allocations += 1;
+ self.alloc_index += 1;
+ return result;
+}
+
+fn resize(
+ ctx: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ ra: usize,
+) bool {
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
+ if (self.resize_index == self.resize_fail_index)
+ return false;
+ if (!self.internal_allocator.rawResize(memory, alignment, new_len, ra))
+ return false;
+ if (new_len < memory.len) {
+ self.freed_bytes += memory.len - new_len;
+ } else {
+ self.allocated_bytes += new_len - memory.len;
+ }
+ self.resize_index += 1;
+ return true;
+}
+
+fn remap(
+ ctx: *anyopaque,
+ memory: []u8,
+ alignment: mem.Alignment,
+ new_len: usize,
+ ra: usize,
+) ?[*]u8 {
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
+ if (self.resize_index == self.resize_fail_index) return null;
+ const new_ptr = self.internal_allocator.rawRemap(memory, alignment, new_len, ra) orelse return null;
+ if (new_len < memory.len) {
+ self.freed_bytes += memory.len - new_len;
+ } else {
+ self.allocated_bytes += new_len - memory.len;
+ }
+ self.resize_index += 1;
+ return new_ptr;
+}
+
+fn free(
+ ctx: *anyopaque,
+ old_mem: []u8,
+ alignment: mem.Alignment,
+ ra: usize,
+) void {
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
+ self.internal_allocator.rawFree(old_mem, alignment, ra);
+ self.deallocations += 1;
+ self.freed_bytes += old_mem.len;
+}
+
+/// Only valid once `has_induced_failure == true`
+pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace {
+ std.debug.assert(self.has_induced_failure);
+ var len: usize = 0;
+ while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return .{
+ .instruction_addresses = &self.stack_addresses,
+ .index = len,
+ };
+}
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
deleted file mode 100644
index 3a83f313bf..0000000000
--- a/lib/std/testing/failing_allocator.zig
+++ /dev/null
@@ -1,142 +0,0 @@
-const std = @import("../std.zig");
-const mem = std.mem;
-
-pub const Config = struct {
- /// The number of successful allocations you can expect from this allocator.
- /// The next allocation will fail. For example, with `fail_index` equal to
- /// 2, the following test will pass:
- ///
- /// var a = try failing_alloc.create(i32);
- /// var b = try failing_alloc.create(i32);
- /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
- fail_index: usize = std.math.maxInt(usize),
-
- /// Number of successful resizes to expect from this allocator. The next resize will fail.
- resize_fail_index: usize = std.math.maxInt(usize),
-};
-
-/// Allocator that fails after N allocations, useful for making sure out of
-/// memory conditions are handled correctly.
-///
-/// To use this, first initialize it and get an allocator with
-///
-/// `const failing_allocator = &FailingAllocator.init(<allocator>,
-/// <config>).allocator;`
-///
-/// Then use `failing_allocator` anywhere you would have used a
-/// different allocator.
-pub const FailingAllocator = struct {
- alloc_index: usize,
- resize_index: usize,
- internal_allocator: mem.Allocator,
- allocated_bytes: usize,
- freed_bytes: usize,
- allocations: usize,
- deallocations: usize,
- stack_addresses: [num_stack_frames]usize,
- has_induced_failure: bool,
- fail_index: usize,
- resize_fail_index: usize,
-
- const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0;
-
- pub fn init(internal_allocator: mem.Allocator, config: Config) FailingAllocator {
- return FailingAllocator{
- .internal_allocator = internal_allocator,
- .alloc_index = 0,
- .resize_index = 0,
- .allocated_bytes = 0,
- .freed_bytes = 0,
- .allocations = 0,
- .deallocations = 0,
- .stack_addresses = undefined,
- .has_induced_failure = false,
- .fail_index = config.fail_index,
- .resize_fail_index = config.resize_fail_index,
- };
- }
-
- pub fn allocator(self: *FailingAllocator) mem.Allocator {
- return .{
- .ptr = self,
- .vtable = &.{
- .alloc = alloc,
- .resize = resize,
- .free = free,
- },
- };
- }
-
- fn alloc(
- ctx: *anyopaque,
- len: usize,
- log2_ptr_align: u8,
- return_address: usize,
- ) ?[*]u8 {
- const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
- if (self.alloc_index == self.fail_index) {
- if (!self.has_induced_failure) {
- @memset(&self.stack_addresses, 0);
- var stack_trace = std.builtin.StackTrace{
- .instruction_addresses = &self.stack_addresses,
- .index = 0,
- };
- std.debug.captureStackTrace(return_address, &stack_trace);
- self.has_induced_failure = true;
- }
- return null;
- }
- const result = self.internal_allocator.rawAlloc(len, log2_ptr_align, return_address) orelse
- return null;
- self.allocated_bytes += len;
- self.allocations += 1;
- self.alloc_index += 1;
- return result;
- }
-
- fn resize(
- ctx: *anyopaque,
- old_mem: []u8,
- log2_old_align: u8,
- new_len: usize,
- ra: usize,
- ) bool {
- const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
- if (self.resize_index == self.resize_fail_index)
- return false;
- if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
- return false;
- if (new_len < old_mem.len) {
- self.freed_bytes += old_mem.len - new_len;
- } else {
- self.allocated_bytes += new_len - old_mem.len;
- }
- self.resize_index += 1;
- return true;
- }
-
- fn free(
- ctx: *anyopaque,
- old_mem: []u8,
- log2_old_align: u8,
- ra: usize,
- ) void {
- const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
- self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
- self.deallocations += 1;
- self.freed_bytes += old_mem.len;
- }
-
- /// Only valid once `has_induced_failure == true`
- pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace {
- std.debug.assert(self.has_induced_failure);
- var len: usize = 0;
- while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) {
- len += 1;
- }
- return .{
- .instruction_addresses = &self.stack_addresses,
- .index = len,
- };
- }
-};
diff --git a/lib/std/zip.zig b/lib/std/zip.zig
index 0717694c49..c149584fd5 100644
--- a/lib/std/zip.zig
+++ b/lib/std/zip.zig
@@ -162,7 +162,7 @@ pub fn decompress(
var total_uncompressed: u64 = 0;
switch (method) {
.store => {
- var buf: [std.mem.page_size]u8 = undefined;
+ var buf: [4096]u8 = undefined;
while (true) {
const len = try reader.read(&buf);
if (len == 0) break;