aboutsummaryrefslogtreecommitdiff
path: root/lib/std/heap
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-04-11 17:55:25 -0700
committerAndrew Kelley <andrew@ziglang.org>2025-04-13 02:20:32 -0400
commitf32a5d349d2c359a2a1f627aa70e1a7a6f6330ea (patch)
tree05a0fe7de04e6aa91775e954f78dd1a478d7e675 /lib/std/heap
parentec2888858102035f296c01df5aacbd255c35d06f (diff)
downloadzig-f32a5d349d2c359a2a1f627aa70e1a7a6f6330ea.tar.gz
zig-f32a5d349d2c359a2a1f627aa70e1a7a6f6330ea.zip
std: eradicate u29 and embrace std.mem.Alignment
Diffstat (limited to 'lib/std/heap')
-rw-r--r--lib/std/heap/WasmAllocator.zig2
-rw-r--r--lib/std/heap/arena_allocator.zig15
-rw-r--r--lib/std/heap/debug_allocator.zig12
-rw-r--r--lib/std/heap/memory_pool.zig23
4 files changed, 27 insertions, 25 deletions
diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig
index b511a216f7..2045d7c303 100644
--- a/lib/std/heap/WasmAllocator.zig
+++ b/lib/std/heap/WasmAllocator.zig
@@ -218,7 +218,7 @@ test "very large allocation" {
}
test "realloc" {
- var slice = try test_ally.alignedAlloc(u8, @alignOf(u32), 1);
+ var slice = try test_ally.alignedAlloc(u8, .of(u32), 1);
defer test_ally.free(slice);
slice[0] = 0x12;
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index c472ae80c5..e3a76a06c1 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -2,6 +2,7 @@ const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
+const Alignment = std.mem.Alignment;
/// This allocator takes an existing allocator, wraps it, and provides an interface where
/// you can allocate and then free it all together. Calls to free an individual item only
@@ -41,7 +42,7 @@ pub const ArenaAllocator = struct {
data: usize,
node: std.SinglyLinkedList.Node = .{},
};
- const BufNode_alignment: mem.Alignment = .fromByteUnits(@alignOf(BufNode));
+ const BufNode_alignment: Alignment = .fromByteUnits(@alignOf(BufNode));
pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
@@ -181,7 +182,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
+ fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ra: usize) ?[*]u8 {
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
_ = ra;
@@ -214,7 +215,7 @@ pub const ArenaAllocator = struct {
}
}
- fn resize(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool {
+ fn resize(ctx: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
_ = alignment;
_ = ret_addr;
@@ -242,14 +243,14 @@ pub const ArenaAllocator = struct {
fn remap(
context: *anyopaque,
memory: []u8,
- alignment: mem.Alignment,
+ alignment: Alignment,
new_len: usize,
return_address: usize,
) ?[*]u8 {
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
}
- fn free(ctx: *anyopaque, buf: []u8, alignment: mem.Alignment, ret_addr: usize) void {
+ fn free(ctx: *anyopaque, buf: []u8, alignment: Alignment, ret_addr: usize) void {
_ = alignment;
_ = ret_addr;
@@ -279,9 +280,9 @@ test "reset with preheating" {
const total_size: usize = random.intRangeAtMost(usize, 256, 16384);
while (alloced_bytes < total_size) {
const size = random.intRangeAtMost(usize, 16, 256);
- const alignment = 32;
+ const alignment: Alignment = .@"32";
const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size);
- try std.testing.expect(std.mem.isAligned(@intFromPtr(slice.ptr), alignment));
+ try std.testing.expect(alignment.check(@intFromPtr(slice.ptr)));
try std.testing.expectEqual(size, slice.len);
alloced_bytes += slice.len;
}
diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig
index a6b2676b1d..3243f1b1bd 100644
--- a/lib/std/heap/debug_allocator.zig
+++ b/lib/std/heap/debug_allocator.zig
@@ -1120,7 +1120,7 @@ test "realloc" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
- var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
+ var slice = try allocator.alignedAlloc(u8, .of(u32), 1);
defer allocator.free(slice);
slice[0] = 0x12;
@@ -1234,7 +1234,7 @@ test "shrink large object to large object with larger alignment" {
const debug_allocator = fba.allocator();
const alloc_size = default_page_size * 2 + 50;
- var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
defer allocator.free(slice);
const big_alignment: usize = default_page_size * 2;
@@ -1244,7 +1244,7 @@ test "shrink large object to large object with larger alignment" {
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
- slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
}
while (stuff_to_free.pop()) |item| {
allocator.free(item);
@@ -1308,7 +1308,7 @@ test "realloc large object to larger alignment" {
var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
const debug_allocator = fba.allocator();
- var slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
+ var slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
defer allocator.free(slice);
const big_alignment: usize = default_page_size * 2;
@@ -1316,7 +1316,7 @@ test "realloc large object to larger alignment" {
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
- slice = try allocator.alignedAlloc(u8, 16, default_page_size * 2 + 50);
+ slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
}
while (stuff_to_free.pop()) |item| {
allocator.free(item);
@@ -1402,7 +1402,7 @@ test "large allocations count requested size not backing size" {
var gpa: DebugAllocator(.{ .enable_memory_limit = true }) = .{};
const allocator = gpa.allocator();
- var buf = try allocator.alignedAlloc(u8, 1, default_page_size + 1);
+ var buf = try allocator.alignedAlloc(u8, .@"1", default_page_size + 1);
try std.testing.expectEqual(default_page_size + 1, gpa.total_requested_bytes);
buf = try allocator.realloc(buf, 1);
try std.testing.expectEqual(1, gpa.total_requested_bytes);
diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig
index 6b9abfd8dc..2b201f2b54 100644
--- a/lib/std/heap/memory_pool.zig
+++ b/lib/std/heap/memory_pool.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const Alignment = std.mem.Alignment;
const debug_mode = @import("builtin").mode == .Debug;
@@ -8,14 +9,14 @@ pub const MemoryPoolError = error{OutOfMemory};
/// Use this when you need to allocate a lot of objects of the same type,
/// because It outperforms general purpose allocators.
pub fn MemoryPool(comptime Item: type) type {
- return MemoryPoolAligned(Item, @alignOf(Item));
+ return MemoryPoolAligned(Item, .of(Item));
}
/// A memory pool that can allocate objects of a single type very quickly.
/// Use this when you need to allocate a lot of objects of the same type,
/// because It outperforms general purpose allocators.
-pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type {
- if (@alignOf(Item) == alignment) {
+pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: Alignment) type {
+ if (@alignOf(Item) == comptime alignment.toByteUnits()) {
return MemoryPoolExtra(Item, .{});
} else {
return MemoryPoolExtra(Item, .{ .alignment = alignment });
@@ -24,7 +25,7 @@ pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type {
pub const Options = struct {
/// The alignment of the memory pool items. Use `null` for natural alignment.
- alignment: ?u29 = null,
+ alignment: ?Alignment = null,
/// If `true`, the memory pool can allocate additional items after a initial setup.
/// If `false`, the memory pool will not allocate further after a call to `initPreheated`.
@@ -43,17 +44,17 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));
// This needs to be kept in sync with Node.
- const node_alignment = @alignOf(*anyopaque);
+ const node_alignment: Alignment = .of(*anyopaque);
/// Alignment of the memory pool items. This is not necessarily the same
/// as `@alignOf(Item)` as the pool also uses the items for internal means.
- pub const item_alignment = @max(node_alignment, pool_options.alignment orelse @alignOf(Item));
+ pub const item_alignment: Alignment = node_alignment.max(pool_options.alignment orelse .of(Item));
const Node = struct {
- next: ?*align(item_alignment) @This(),
+ next: ?*align(item_alignment.toByteUnits()) @This(),
};
- const NodePtr = *align(item_alignment) Node;
- const ItemPtr = *align(item_alignment) Item;
+ const NodePtr = *align(item_alignment.toByteUnits()) Node;
+ const ItemPtr = *align(item_alignment.toByteUnits()) Item;
arena: std.heap.ArenaAllocator,
free_list: ?NodePtr = null,
@@ -143,7 +144,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
pool.free_list = node;
}
- fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment) [item_size]u8 {
+ fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment.toByteUnits()) [item_size]u8 {
const mem = try pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size);
return mem[0..item_size]; // coerce slice to array pointer
}
@@ -213,7 +214,7 @@ test "greater than pointer manual alignment" {
data: u64,
};
- var pool = MemoryPoolAligned(Foo, 16).init(std.testing.allocator);
+ var pool = MemoryPoolAligned(Foo, .@"16").init(std.testing.allocator);
defer pool.deinit();
const foo: *align(16) Foo = try pool.create();