diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-06-24 16:58:19 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-06-24 16:58:19 -0700 |
| commit | 146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch) | |
| tree | 67e3db8b444d65c667e314770fc983a7fc8ba293 /lib/std/heap | |
| parent | 13853bef0df3c90633021850cc6d6abaeea03282 (diff) | |
| parent | 21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff) | |
| download | zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip | |
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'lib/std/heap')
| -rw-r--r-- | lib/std/heap/PageAllocator.zig | 13 | ||||
| -rw-r--r-- | lib/std/heap/ThreadSafeAllocator.zig | 6 | ||||
| -rw-r--r-- | lib/std/heap/WasmAllocator.zig | 20 | ||||
| -rw-r--r-- | lib/std/heap/WasmPageAllocator.zig | 12 | ||||
| -rw-r--r-- | lib/std/heap/arena_allocator.zig | 24 | ||||
| -rw-r--r-- | lib/std/heap/general_purpose_allocator.zig | 56 | ||||
| -rw-r--r-- | lib/std/heap/log_to_writer_allocator.zig | 6 | ||||
| -rw-r--r-- | lib/std/heap/logging_allocator.zig | 6 | ||||
| -rw-r--r-- | lib/std/heap/memory_pool.zig | 8 |
9 files changed, 75 insertions, 76 deletions
diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 12a0bdcf30..3e92aa5eec 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -27,7 +27,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch return null; - return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr)); + return @ptrCast(addr); } const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered); @@ -40,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { 0, ) catch return null; assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); - const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len); + const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic); return slice.ptr; } @@ -66,7 +66,7 @@ fn resize( // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. w.VirtualFree( - @ptrFromInt(*anyopaque, new_addr_end), + @as(*anyopaque, @ptrFromInt(new_addr_end)), old_addr_end - new_addr_end, w.MEM_DECOMMIT, ); @@ -85,9 +85,9 @@ fn resize( return true; if (new_size_aligned < buf_aligned_len) { - const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned); + const ptr = buf_unaligned.ptr + new_size_aligned; // TODO: if the next_mmap_addr_hint is within the unmapped range, update it - os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); + os.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned])); return true; } @@ -104,7 +104,6 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - const ptr = @alignCast(mem.page_size, slice.ptr); - os.munmap(ptr[0..buf_aligned_len]); + os.munmap(@alignCast(slice.ptr[0..buf_aligned_len])); } } diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig index fe10eb2fdb..12bb095b30 100644 --- a/lib/std/heap/ThreadSafeAllocator.zig +++ b/lib/std/heap/ThreadSafeAllocator.zig @@ -15,7 +15,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -23,7 +23,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -32,7 +32,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad } fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx)); + const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig index e3e436fd2b..60051b688a 100644 --- a/lib/std/heap/WasmAllocator.zig +++ b/lib/std/heap/WasmAllocator.zig @@ -47,7 +47,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* _ = ctx; _ = return_address; // Make room for the freelist next pointer. - const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align); + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); const actual_len = @max(len +| @sizeOf(usize), alignment); const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; const class = math.log2(slot_size) - min_class; @@ -55,7 +55,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* const addr = a: { const top_free_ptr = frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize)))); frees[class] = node.*; break :a top_free_ptr; } @@ -74,11 +74,11 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* break :a next_addr; } }; - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } const bigpages_needed = bigPagesNeeded(actual_len); const addr = allocBigPages(bigpages_needed); - return @ptrFromInt([*]u8, addr); + return @as([*]u8, @ptrFromInt(addr)); } fn resize( @@ -92,7 +92,7 @@ fn resize( _ = return_address; // We don't want to move anything from one size class to another, but we // can recover bytes in between powers of two. - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); @@ -117,20 +117,20 @@ fn free( ) void { _ = ctx; _ = return_address; - const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align); + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); const actual_len = @max(buf.len + @sizeOf(usize), buf_align); const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); const class = math.log2(slot_size) - min_class; const addr = @intFromPtr(buf.ptr); if (class < size_class_count) { - const node = @ptrFromInt(*usize, addr + (slot_size - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize)))); node.* = frees[class]; frees[class] = addr; } else { const bigpages_needed = bigPagesNeeded(actual_len); const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed); const big_slot_size_bytes = pow2_pages * bigpage_size; - const node = @ptrFromInt(*usize, addr + (big_slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize)))); const big_class = math.log2(pow2_pages); node.* = big_frees[big_class]; big_frees[big_class] = addr; @@ -148,14 +148,14 @@ fn allocBigPages(n: usize) usize { const top_free_ptr = big_frees[class]; if (top_free_ptr != 0) { - const node = @ptrFromInt(*usize, top_free_ptr + (slot_size_bytes - @sizeOf(usize))); + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize)))); big_frees[class] = node.*; return top_free_ptr; } const page_index = @wasmMemoryGrow(0, pow2_pages * pages_per_bigpage); if (page_index <= 0) return 0; - const addr = @intCast(u32, page_index) * wasm.page_size; + const addr = @as(u32, @intCast(page_index)) * wasm.page_size; return addr; } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index c77164ee2d..8f484c52f6 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -40,7 +40,7 @@ const FreeBlock = struct { fn getBit(self: FreeBlock, idx: usize) PageStatus { const bit_offset = 0; - return @enumFromInt(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset)); + return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset))); } fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { @@ -63,7 +63,7 @@ const FreeBlock = struct { fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize { @setCold(true); for (self.data, 0..) |segment, i| { - const spills_into_next = @bitCast(i128, segment) < 0; + const spills_into_next = @as(i128, @bitCast(segment)) < 0; const has_enough_bits = @popCount(segment) >= num_pages; if (!spills_into_next and !has_enough_bits) continue; @@ -109,7 +109,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { if (len > maxInt(usize) - (mem.page_size - 1)) return null; const page_count = nPages(len); const page_idx = allocPages(page_count, log2_align) catch return null; - return @ptrFromInt([*]u8, page_idx * mem.page_size); + return @as([*]u8, @ptrFromInt(page_idx * mem.page_size)); } fn allocPages(page_count: usize, log2_align: u8) !usize { @@ -129,7 +129,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { const next_page_addr = next_page_idx * mem.page_size; const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align); const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size); - const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count)); + const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count))); if (result <= 0) return error.OutOfMemory; assert(result == next_page_idx); @@ -137,7 +137,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize { if (drop_page_count > 0) { freePages(next_page_idx, aligned_page_idx); } - return @intCast(usize, aligned_page_idx); + return @as(usize, @intCast(aligned_page_idx)); } fn freePages(start: usize, end: usize) void { @@ -151,7 +151,7 @@ fn freePages(start: usize, end: usize) void { // TODO: would it be better if we use the first page instead? new_end -= 1; - extended.data = @ptrFromInt([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)]; + extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. @memset(extended.data, PageStatus.none_free); } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index a8d6641d8d..d547987f63 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -48,7 +48,7 @@ pub const ArenaAllocator = struct { // this has to occur before the free because the free frees node const next_it = node.next; const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } @@ -128,7 +128,7 @@ pub const ArenaAllocator = struct { const next_it = node.next; if (next_it == null) break node; - const alloc_buf = @ptrCast([*]u8, node)[0..node.data]; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); it = next_it; } else null; @@ -140,7 +140,7 @@ pub const ArenaAllocator = struct { // perfect, no need to invoke the child_allocator if (first_node.data == total_size) return true; - const first_alloc_buf = @ptrCast([*]u8, first_node)[0..first_node.data]; + const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { // successful resize first_node.data = total_size; @@ -151,7 +151,7 @@ pub const ArenaAllocator = struct { return false; }; self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); - const node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), new_ptr)); + const node: *BufNode = @ptrCast(@alignCast(new_ptr)); node.* = .{ .data = total_size }; self.state.buffer_list.first = node; } @@ -166,7 +166,7 @@ pub const ArenaAllocator = struct { const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse return null; - const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr)); + const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); buf_node.* = .{ .data = len }; self.state.buffer_list.prepend(buf_node); self.state.end_index = 0; @@ -174,16 +174,16 @@ pub const ArenaAllocator = struct { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = ra; - const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); var cur_node = if (self.state.buffer_list.first) |first_node| first_node else (self.createNode(0, n + ptr_align) orelse return null); while (true) { - const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; + const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; const adjusted_addr = mem.alignForward(usize, addr, ptr_align); @@ -208,12 +208,12 @@ pub const ArenaAllocator = struct { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); _ = log2_buf_align; _ = ret_addr; const cur_node = self.state.buffer_list.first orelse return false; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { // It's not the most recent allocation, so it cannot be expanded, // but it's fine if they want to make it smaller. @@ -235,10 +235,10 @@ pub const ArenaAllocator = struct { _ = log2_buf_align; _ = ret_addr; - const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx)); + const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); const cur_node = self.state.buffer_list.first orelse return; - const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data]; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { self.state.end_index -= buf.len; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 98375c850e..11f7d9dd27 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -250,7 +250,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { used_count: SlotIndex, fn usedBits(bucket: *BucketHeader, index: usize) *u8 { - return @ptrFromInt(*u8, @intFromPtr(bucket) + @sizeOf(BucketHeader) + index); + return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index)); } fn stackTracePtr( @@ -259,10 +259,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { slot_index: SlotIndex, trace_kind: TraceKind, ) *[stack_n]usize { - const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class); + const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class); const addr = start_ptr + one_trace_size * traces_per_slot * slot_index + @intFromEnum(trace_kind) * @as(usize, one_trace_size); - return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr)); + return @ptrCast(@alignCast(addr)); } fn captureStackTrace( @@ -338,9 +338,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (used_byte != 0) { var bit_index: u3 = 0; while (true) : (bit_index += 1) { - const is_used = @truncate(u1, used_byte >> bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0; if (is_used) { - const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index); + const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index)); const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc); const addr = bucket.page + slot_index * size_class; log.err("memory address 0x{x} leaked: {}", .{ @@ -361,7 +361,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var leaks = false; for (self.buckets, 0..) |optional_bucket, bucket_i| { const first_bucket = optional_bucket orelse continue; - const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i); + const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i)); const used_bits_count = usedBitsCount(size_class); var bucket = first_bucket; while (true) { @@ -385,7 +385,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void { const bucket_size = bucketSize(size_class); - const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size]; + const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size]; self.backing_allocator.free(bucket_slice); } @@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.small_allocations.deinit(self.backing_allocator); } self.* = undefined; - return @enumFromInt(Check, @intFromBool(leaks)); + return @as(Check, @enumFromInt(@intFromBool(leaks))); } fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void { @@ -496,7 +496,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { bucket.alloc_cursor += 1; var used_bits_byte = bucket.usedBits(slot_index / 8); - const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary + const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary used_bits_byte.* |= (@as(u8, 1) << used_bit_index); bucket.used_count += 1; bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc); @@ -667,8 +667,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, ret_addr: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -704,11 +704,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr); }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -739,8 +739,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -786,8 +786,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_old_align_u8: u8, ret_addr: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); - const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8); + const self: *Self = @ptrCast(@alignCast(ctx)); + const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -825,11 +825,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return; }; const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page); - const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class)); const used_byte_index = slot_index / 8; - const used_bit_index = @intCast(u3, slot_index % 8); + const used_bit_index = @as(u3, @intCast(slot_index % 8)); const used_byte = bucket.usedBits(used_byte_index); - const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0; if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); @@ -861,8 +861,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } if (log2_old_align != entry.value_ptr.log2_ptr_align) { log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{ - @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align), - @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)), + @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)), bucketStackTrace(bucket, size_class, slot_index, .alloc), free_stack_trace, }); @@ -896,7 +896,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } else { // move alloc_cursor to end so we can tell size_class later const slot_count = @divExact(page_size, size_class); - bucket.alloc_cursor = @truncate(SlotIndex, slot_count); + bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count)); if (self.empty_buckets) |prev_bucket| { // empty_buckets is ordered newest to oldest through prev so that if // config.never_unmap is false and backing_allocator reuses freed memory @@ -936,11 +936,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); if (!self.isAllocationAllowed(len)) return null; - return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null; + return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null; } fn allocInner( @@ -949,7 +949,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { log2_ptr_align: Allocator.Log2Align, ret_addr: usize, ) Allocator.Error![*]u8 { - const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align)); + const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse @@ -1002,7 +1002,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const bucket_size = bucketSize(size_class); const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size); - const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr); + const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr)); ptr.* = BucketHeader{ .prev = ptr, .next = ptr, diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index b2d83c416b..b5c86c9beb 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -34,7 +34,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("alloc : {}", .{len}) catch {}; const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { @@ -52,7 +52,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { @@ -77,7 +77,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.writer.print("free : {}\n", .{buf.len}) catch {}; self.parent_allocator.rawFree(buf, log2_buf_align, ra); } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0d32b5405e..6924a284e3 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -59,7 +59,7 @@ pub fn ScopedLoggingAllocator( log2_ptr_align: u8, ra: usize, ) ?[*]u8 { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra); if (result != null) { logHelper( @@ -84,7 +84,7 @@ pub fn ScopedLoggingAllocator( new_len: usize, ra: usize, ) bool { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) { if (new_len <= buf.len) { logHelper( @@ -118,7 +118,7 @@ pub fn ScopedLoggingAllocator( log2_buf_align: u8, ra: usize, ) void { - const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx)); + const self: *Self = @ptrCast(@alignCast(ctx)); self.parent_allocator.rawFree(buf, log2_buf_align, ra); logHelper(success_log_level, "free - len: {}", .{buf.len}); } diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index 3fc7dfbfca..b56a15d006 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -70,7 +70,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type var i: usize = 0; while (i < initial_size) : (i += 1) { const raw_mem = try pool.allocNew(); - const free_node = @ptrCast(NodePtr, raw_mem); + const free_node = @as(NodePtr, @ptrCast(raw_mem)); free_node.* = Node{ .next = pool.free_list, }; @@ -106,11 +106,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pool.free_list = item.next; break :blk item; } else if (pool_options.growable) - @ptrCast(NodePtr, try pool.allocNew()) + @as(NodePtr, @ptrCast(try pool.allocNew())) else return error.OutOfMemory; - const ptr = @ptrCast(ItemPtr, node); + const ptr = @as(ItemPtr, @ptrCast(node)); ptr.* = undefined; return ptr; } @@ -120,7 +120,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pub fn destroy(pool: *Pool, ptr: ItemPtr) void { ptr.* = undefined; - const node = @ptrCast(NodePtr, ptr); + const node = @as(NodePtr, @ptrCast(ptr)); node.* = Node{ .next = pool.free_list, }; |
