Are you linking libc? In this case, {#syntax#}std.heap.c_allocator{#endsyntax#} is likely
@@ -10114,7 +10114,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const ptr = try allocator.create(i32);
std.debug.print("ptr={*}\n", .{ptr});
@@ -10281,7 +10281,7 @@ test "string literal to constant slice" {
For example, the function's documentation may say "caller owns the returned memory", in which case
the code that calls the function must have a plan for when to free that memory. Probably in this situation,
- the function will accept an {#syntax#}*Allocator{#endsyntax#} parameter.
+ the function will accept an {#syntax#}Allocator{#endsyntax#} parameter.
Sometimes the lifetime of a pointer may be more complicated. For example, the
@@ -10820,7 +10820,7 @@ const std = @import("std");
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = &general_purpose_allocator.allocator;
+ const gpa = general_purpose_allocator.getAllocator();
const args = try std.process.argsAlloc(gpa);
defer std.process.argsFree(gpa, args);
@@ -10842,7 +10842,7 @@ const PreopenList = std.fs.wasi.PreopenList;
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = &general_purpose_allocator.allocator;
+ const gpa = general_purpose_allocator.getAllocator();
var preopens = PreopenList.init(gpa);
defer preopens.deinit();
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 413b23cd48..191ed4de94 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -173,12 +173,12 @@ pub const Loop = struct {
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
const resume_node_count = std.math.max(extra_thread_count, 1);
- self.eventfd_resume_nodes = try self.arena.allocator.alloc(
+ self.eventfd_resume_nodes = try self.arena.getAllocator().alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
);
- self.extra_threads = try self.arena.allocator.alloc(Thread, extra_thread_count);
+ self.extra_threads = try self.arena.getAllocator().alloc(Thread, extra_thread_count);
try self.initOsData(extra_thread_count);
errdefer self.deinitOsData();
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 5cc7f8ef65..2e58c8c5d9 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -98,7 +98,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
var astgen: AstGen = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.tree = &tree,
};
defer astgen.deinit(gpa);
@@ -1939,6 +1939,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa);
defer block_arena.deinit();
+ const block_arena_allocator = block_arena.getAllocator();
var noreturn_src_node: Ast.Node.Index = 0;
var scope = parent_scope;
@@ -1959,13 +1960,13 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
}
switch (node_tags[statement]) {
// zig fmt: off
- .global_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.globalVarDecl(statement)),
- .local_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.localVarDecl(statement)),
- .simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)),
- .aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)),
+ .global_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.globalVarDecl(statement)),
+ .local_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.localVarDecl(statement)),
+ .simple_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.simpleVarDecl(statement)),
+ .aligned_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.alignedVarDecl(statement)),
- .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_normal),
- .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_error),
+ .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_normal),
+ .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_error),
.assign => try assign(gz, scope, statement),
diff --git a/src/Compilation.zig b/src/Compilation.zig
index f6ee58b5ef..7c855862fd 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -412,28 +412,29 @@ pub const AllErrors = struct {
errors: *std.ArrayList(Message),
module_err_msg: Module.ErrorMsg,
) !void {
- const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len);
+ const allocator = arena.getAllocator();
+ const notes = try allocator.alloc(Message, module_err_msg.notes.len);
for (notes) |*note, i| {
const module_note = module_err_msg.notes[i];
const source = try module_note.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_note.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
- const file_path = try module_note.src_loc.file_scope.fullPath(&arena.allocator);
+ const file_path = try module_note.src_loc.file_scope.fullPath(allocator);
note.* = .{
.src = .{
.src_path = file_path,
- .msg = try arena.allocator.dupe(u8, module_note.msg),
+ .msg = try allocator.dupe(u8, module_note.msg),
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
- .source_line = try arena.allocator.dupe(u8, loc.source_line),
+ .source_line = try allocator.dupe(u8, loc.source_line),
},
};
}
if (module_err_msg.src_loc.lazy == .entire_file) {
try errors.append(.{
.plain = .{
- .msg = try arena.allocator.dupe(u8, module_err_msg.msg),
+ .msg = try allocator.dupe(u8, module_err_msg.msg),
},
});
return;
@@ -441,16 +442,16 @@ pub const AllErrors = struct {
const source = try module_err_msg.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_err_msg.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
- const file_path = try module_err_msg.src_loc.file_scope.fullPath(&arena.allocator);
+ const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
try errors.append(.{
.src = .{
.src_path = file_path,
- .msg = try arena.allocator.dupe(u8, module_err_msg.msg),
+ .msg = try allocator.dupe(u8, module_err_msg.msg),
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
.notes = notes,
- .source_line = try arena.allocator.dupe(u8, loc.source_line),
+ .source_line = try allocator.dupe(u8, loc.source_line),
},
});
}
@@ -548,11 +549,12 @@ pub const AllErrors = struct {
msg: []const u8,
optional_children: ?AllErrors,
) !void {
- const duped_msg = try arena.allocator.dupe(u8, msg);
+ const allocator = arena.getAllocator();
+ const duped_msg = try allocator.dupe(u8, msg);
if (optional_children) |*children| {
try errors.append(.{ .plain = .{
.msg = duped_msg,
- .notes = try dupeList(children.list, &arena.allocator),
+ .notes = try dupeList(children.list, allocator),
} });
} else {
try errors.append(.{ .plain = .{ .msg = duped_msg } });
@@ -786,7 +788,7 @@ fn addPackageTableToCacheHash(
seen_table: *std.AutoHashMap(*Package, void),
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
) (error{OutOfMemory} || std.os.GetCwdError)!void {
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const packages = try allocator.alloc(Package.Table.KV, pkg_table.count());
{
@@ -850,7 +852,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// initialization and then is freed in deinit().
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// We put the `Compilation` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
@@ -1208,7 +1210,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
{
var local_arena = std.heap.ArenaAllocator.init(gpa);
defer local_arena.deinit();
- var seen_table = std.AutoHashMap(*Package, void).init(&local_arena.allocator);
+ var seen_table = std.AutoHashMap(*Package, void).init(local_arena.getAllocator());
try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes);
}
hash.add(valgrind);
@@ -2011,6 +2013,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var arena = std.heap.ArenaAllocator.init(self.gpa);
errdefer arena.deinit();
+ const arena_allocator = arena.getAllocator();
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
@@ -2024,8 +2027,8 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// C error reporting bubbling up.
try errors.append(.{
.src = .{
- .src_path = try arena.allocator.dupe(u8, c_object.src.src_path),
- .msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{
+ .src_path = try arena_allocator.dupe(u8, c_object.src.src_path),
+ .msg = try std.fmt.allocPrint(arena_allocator, "unable to build C object: {s}", .{
err_msg.msg,
}),
.byte_offset = 0,
@@ -2050,7 +2053,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// must have completed successfully.
const tree = try entry.key_ptr.*.getTree(module.gpa);
assert(tree.errors.len == 0);
- try AllErrors.addZir(&arena.allocator, &errors, entry.key_ptr.*);
+ try AllErrors.addZir(arena_allocator, &errors, entry.key_ptr.*);
}
}
}
@@ -2089,7 +2092,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
try errors.append(.{
.plain = .{
- .msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}),
+ .msg = try std.fmt.allocPrint(arena_allocator, "no entry point found", .{}),
},
});
}
@@ -2121,7 +2124,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
assert(errors.items.len == self.totalErrorCount());
return AllErrors{
- .list = try arena.allocator.dupe(AllErrors.Message, errors.items),
+ .list = try arena_allocator.dupe(AllErrors.Message, errors.items),
.arena = arena.state,
};
}
@@ -2292,7 +2295,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
defer tmp_arena.deinit();
- const sema_arena = &tmp_arena.allocator;
+ const sema_arena = tmp_arena.getAllocator();
const sema_frame = tracy.namedFrame("sema");
var sema_frame_ended = false;
@@ -2387,7 +2390,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.init(gpa),
- .typedefs_arena = &typedefs_arena.allocator,
+ .typedefs_arena = typedefs_arena.getAllocator(),
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
@@ -2841,7 +2844,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const digest = if (!actual_hit) digest: {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const tmp_digest = man.hash.peek();
const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
@@ -3096,7 +3099,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const c_source_basename = std.fs.path.basename(c_object.src.src_path);
@@ -4417,7 +4420,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// Here we use the legacy stage1 C++ compiler to compile Zig code.
const mod = comp.bin_file.options.module.?;
@@ -4454,7 +4457,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
_ = try man.addFile(main_zig_file, null);
{
- var seen_table = std.AutoHashMap(*Package, void).init(&arena_allocator.allocator);
+ var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.getAllocator());
try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man });
}
man.hash.add(comp.bin_file.options.valgrind);
diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig
index 0fd26532f0..e99bfc7464 100644
--- a/src/DepTokenizer.zig
+++ b/src/DepTokenizer.zig
@@ -878,7 +878,7 @@ test "error prereq - continuation expecting end-of-line" {
// - tokenize input, emit textual representation, and compare to expect
fn depTokenizer(input: []const u8, expect: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
defer arena_allocator.deinit();
var it: Tokenizer = .{ .bytes = input };
diff --git a/src/Module.zig b/src/Module.zig
index a40dcd1419..d016418d8d 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -517,7 +517,7 @@ pub const Decl = struct {
pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void {
assert(decl.value_arena == null);
- const arena_state = try arena.allocator.create(std.heap.ArenaAllocator.State);
+ const arena_state = try arena.getAllocator().create(std.heap.ArenaAllocator.State);
arena_state.* = arena.state;
decl.value_arena = arena_state;
}
@@ -3159,10 +3159,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const gpa = mod.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const struct_obj = try new_decl_arena.allocator.create(Module.Struct);
- const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj);
- const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty);
+ const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
+ const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
+ const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const ty_ty = comptime Type.initTag(.type);
struct_obj.* = .{
.owner_decl = undefined, // set below
@@ -3202,12 +3203,13 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
+ const sema_arena_allocator = sema_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &sema_arena.allocator,
- .perm_arena = &new_decl_arena.allocator,
+ .arena = sema_arena_allocator,
+ .perm_arena = new_decl_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.func = null,
@@ -3216,7 +3218,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &new_decl_arena.allocator, null);
+ var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null);
defer wip_captures.deinit();
var block_scope: Sema.Block = .{
@@ -3265,15 +3267,17 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// We need the memory for the Type to go into the arena for the Decl
var decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer decl_arena.deinit();
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
+ const analysis_arena_allocator = analysis_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena_allocator,
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -3296,7 +3300,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
log.debug("semaDecl {*} ({s})", .{ decl, decl.name });
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Sema.Block = .{
@@ -3356,7 +3360,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// not the struct itself.
try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty);
- const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+ const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
if (decl.is_usingnamespace) {
const ty_ty = Type.initTag(.type);
@@ -3370,7 +3374,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
decl.ty = ty_ty;
- decl.val = try Value.Tag.ty.create(&decl_arena.allocator, ty);
+ decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty);
decl.align_val = Value.initTag(.null_value);
decl.linksection_val = Value.initTag(.null_value);
decl.has_tv = true;
@@ -3400,10 +3404,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.clearValues(gpa);
}
- decl.ty = try decl_tv.ty.copy(&decl_arena.allocator);
- decl.val = try decl_tv.val.copy(&decl_arena.allocator);
- decl.align_val = try align_val.copy(&decl_arena.allocator);
- decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
+ decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.align_val = try align_val.copy(decl_arena_allocator);
+ decl.linksection_val = try linksection_val.copy(decl_arena_allocator);
decl.@"addrspace" = address_space;
decl.has_tv = true;
decl.owns_tv = owns_tv;
@@ -3453,7 +3457,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.owns_tv = true;
queue_linker_work = true;
- const copied_init = try variable.init.copy(&decl_arena.allocator);
+ const copied_init = try variable.init.copy(decl_arena_allocator);
variable.init = copied_init;
}
},
@@ -3476,10 +3480,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
},
}
- decl.ty = try decl_tv.ty.copy(&decl_arena.allocator);
- decl.val = try decl_tv.val.copy(&decl_arena.allocator);
- decl.align_val = try align_val.copy(&decl_arena.allocator);
- decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
+ decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.align_val = try align_val.copy(decl_arena_allocator);
+ decl.linksection_val = try linksection_val.copy(decl_arena_allocator);
decl.@"addrspace" = address_space;
decl.has_tv = true;
decl_arena_state.* = decl_arena.state;
@@ -4128,12 +4132,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
// Use the Decl's arena for captured values.
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = arena,
- .perm_arena = &decl_arena.allocator,
+ .perm_arena = decl_arena_allocator,
.code = decl.getFileScope().zir,
.owner_decl = decl,
.func = func,
@@ -4147,7 +4152,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
sema.air_extra.items.len += reserved_count;
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var inner_block: Sema.Block = .{
@@ -4751,7 +4756,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
// decl reference it as a slice.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = &new_decl_arena.allocator;
+ const arena = new_decl_arena.getAllocator();
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
@@ -4770,10 +4775,10 @@ pub fn populateTestFunctions(mod: *Module) !void {
const test_name_decl = n: {
var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer name_decl_arena.deinit();
- const bytes = try name_decl_arena.allocator.dupe(u8, test_name_slice);
+ const bytes = try arena.dupe(u8, test_name_slice);
const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
- .ty = try Type.Tag.array_u8.create(&name_decl_arena.allocator, bytes.len),
- .val = try Value.Tag.bytes.create(&name_decl_arena.allocator, bytes),
+ .ty = try Type.Tag.array_u8.create(arena, bytes.len),
+ .val = try Value.Tag.bytes.create(arena, bytes),
});
try test_name_decl.finalizeNewArena(&name_decl_arena);
break :n test_name_decl;
@@ -4802,7 +4807,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
{
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = &new_decl_arena.allocator;
+ const arena = new_decl_arena.getAllocator();
// This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
diff --git a/src/Sema.zig b/src/Sema.zig
index ce0c5c8ed7..9e0aa2f75e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -418,7 +418,7 @@ pub const Block = struct {
finished: bool,
pub fn arena(wad: *WipAnonDecl) Allocator {
- return &wad.new_decl_arena.allocator;
+ return wad.new_decl_arena.getAllocator();
}
pub fn deinit(wad: *WipAnonDecl) void {
@@ -1594,10 +1594,11 @@ fn zirStructDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const struct_obj = try new_decl_arena.allocator.create(Module.Struct);
- const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj);
- const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty);
+ const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
+ const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
+ const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1698,15 +1699,16 @@ fn zirEnumDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumFull);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumFull);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull);
enum_ty_payload.* = .{
.base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1790,17 +1792,17 @@ fn zirEnumDecl(
break :blk try sema.resolveType(block, src, tag_type_ref);
}
const bits = std.math.log2_int_ceil(usize, fields_len);
- break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits);
+ break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits);
};
enum_obj.tag_ty = tag_ty;
}
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
if (bag != 0) break true;
} else false;
if (any_values) {
- try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{
+ try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{
.ty = enum_obj.tag_ty,
});
}
@@ -1820,7 +1822,7 @@ fn zirEnumDecl(
extra_index += 1;
// This string needs to outlive the ZIR code.
- const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try new_decl_arena_allocator.dupe(u8, field_name_zir);
const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
@@ -1843,12 +1845,12 @@ fn zirEnumDecl(
// that points to this default value expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val;
- const copied_tag_val = try tag_val.copy(&new_decl_arena.allocator);
+ const copied_tag_val = try tag_val.copy(new_decl_arena_allocator);
enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{
.ty = enum_obj.tag_ty,
});
} else if (any_values) {
- const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i);
+ const tag_val = try Value.Tag.int_u64.create(new_decl_arena_allocator, field_i);
enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty });
}
}
@@ -1887,16 +1889,17 @@ fn zirUnionDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const union_obj = try new_decl_arena.allocator.create(Module.Union);
+ const union_obj = try new_decl_arena_allocator.create(Module.Union);
const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union";
- const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union);
+ const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union);
union_payload.* = .{
.base = .{ .tag = type_tag },
.data = union_obj,
};
const union_ty = Type.initPayload(&union_payload.base);
- const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty);
+ const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1955,15 +1958,16 @@ fn zirOpaqueDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const opaque_obj = try new_decl_arena.allocator.create(Module.Opaque);
- const opaque_ty_payload = try new_decl_arena.allocator.create(Type.Payload.Opaque);
+ const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque);
+ const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque);
opaque_ty_payload.* = .{
.base = .{ .tag = .@"opaque" },
.data = opaque_obj,
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
- const opaque_val = try Value.Tag.ty.create(&new_decl_arena.allocator, opaque_ty);
+ const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -2008,10 +2012,11 @@ fn zirErrorSetDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const error_set = try new_decl_arena.allocator.create(Module.ErrorSet);
- const error_set_ty = try Type.Tag.error_set.create(&new_decl_arena.allocator, error_set);
- const error_set_val = try Value.Tag.ty.create(&new_decl_arena.allocator, error_set_ty);
+ const error_set = try new_decl_arena_allocator.create(Module.ErrorSet);
+ const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set);
+ const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty);
const type_name = try sema.createTypeName(block, name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -2019,9 +2024,9 @@ fn zirErrorSetDecl(
}, type_name);
new_decl.owns_tv = true;
errdefer sema.mod.abortAnonDecl(new_decl);
- const names = try new_decl_arena.allocator.alloc([]const u8, fields.len);
+ const names = try new_decl_arena_allocator.alloc([]const u8, fields.len);
for (fields) |str_index, i| {
- names[i] = try new_decl_arena.allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
+ names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
}
error_set.* = .{
.owner_decl = new_decl,
@@ -3935,7 +3940,7 @@ fn analyzeCall(
{
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
for (memoized_call_key.args) |*arg| {
arg.* = try arg.*.copy(arena);
@@ -4069,6 +4074,7 @@ fn analyzeCall(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
// Re-run the block that creates the function, with the comptime parameters
// pre-populated inside `inst_map`. This causes `param_comptime` and
@@ -4078,13 +4084,13 @@ fn analyzeCall(
.mod = mod,
.gpa = gpa,
.arena = sema.arena,
- .perm_arena = &new_decl_arena.allocator,
+ .perm_arena = new_decl_arena_allocator,
.code = fn_zir,
.owner_decl = new_decl,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
- .comptime_args = try new_decl_arena.allocator.alloc(TypedValue, uncasted_args.len),
+ .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
.preallocated_new_func = new_module_func,
};
@@ -4168,7 +4174,7 @@ fn analyzeCall(
else => continue,
}
const arg = child_sema.inst_map.get(inst).?;
- const copied_arg_ty = try child_sema.typeOf(arg).copy(&new_decl_arena.allocator);
+ const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator);
if (child_sema.resolveMaybeUndefValAllowVariables(
&child_block,
.unneeded,
@@ -4176,7 +4182,7 @@ fn analyzeCall(
) catch unreachable) |arg_val| {
child_sema.comptime_args[arg_i] = .{
.ty = copied_arg_ty,
- .val = try arg_val.copy(&new_decl_arena.allocator),
+ .val = try arg_val.copy(new_decl_arena_allocator),
};
} else {
child_sema.comptime_args[arg_i] = .{
@@ -4191,8 +4197,8 @@ fn analyzeCall(
try wip_captures.finalize();
// Populate the Decl ty/val with the function and its type.
- new_decl.ty = try child_sema.typeOf(new_func_inst).copy(&new_decl_arena.allocator);
- new_decl.val = try Value.Tag.function.create(&new_decl_arena.allocator, new_func);
+ new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator);
+ new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
new_decl.analysis = .complete;
log.debug("generic function '{s}' instantiated with type {}", .{
@@ -6047,8 +6053,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
defer arena.deinit();
const target = sema.mod.getTarget();
- const min_int = try operand_ty.minInt(&arena.allocator, target);
- const max_int = try operand_ty.maxInt(&arena.allocator, target);
+ const min_int = try operand_ty.minInt(arena.getAllocator(), target);
+ const max_int = try operand_ty.maxInt(arena.getAllocator(), target);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
@@ -12795,7 +12801,7 @@ const ComptimePtrMutationKit = struct {
fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
- return &self.decl_arena.allocator;
+ return self.decl_arena.getAllocator();
}
fn finishArena(self: *ComptimePtrMutationKit) void {
@@ -14287,6 +14293,7 @@ fn semaStructFields(
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14294,8 +14301,8 @@ fn semaStructFields(
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena.getAllocator(),
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -14304,7 +14311,7 @@ fn semaStructFields(
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
@@ -14328,7 +14335,7 @@ fn semaStructFields(
try wip_captures.finalize();
- try struct_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
+ try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
@@ -14359,7 +14366,7 @@ fn semaStructFields(
extra_index += 1;
// This string needs to outlive the ZIR code.
- const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try decl_arena_allocator.dupe(u8, field_name_zir);
const field_ty: Type = if (field_type_ref == .none)
Type.initTag(.noreturn)
else
@@ -14371,7 +14378,7 @@ fn semaStructFields(
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.value_ptr.* = .{
- .ty = try field_ty.copy(&decl_arena.allocator),
+ .ty = try field_ty.copy(decl_arena_allocator),
.abi_align = Value.initTag(.abi_align_default),
.default_val = Value.initTag(.unreachable_value),
.is_comptime = is_comptime,
@@ -14385,7 +14392,7 @@ fn semaStructFields(
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val;
- gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator);
+ gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator);
}
if (has_default) {
const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
@@ -14396,7 +14403,7 @@ fn semaStructFields(
// But only resolve the source location if we need to emit a compile error.
const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse
return sema.failWithNeededComptime(&block_scope, src);
- gop.value_ptr.default_val = try default_val.copy(&decl_arena.allocator);
+ gop.value_ptr.default_val = try default_val.copy(decl_arena_allocator);
}
}
}
@@ -14454,6 +14461,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa);
defer union_obj.owner_decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14461,8 +14469,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena.getAllocator(),
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -14471,7 +14479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
@@ -14495,7 +14503,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
try wip_captures.finalize();
- try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
+ try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
var int_tag_ty: Type = undefined;
var enum_field_names: ?*Module.EnumNumbered.NameMap = null;
@@ -14571,7 +14579,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
}
// This string needs to outlive the ZIR code.
- const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try decl_arena_allocator.dupe(u8, field_name_zir);
if (enum_field_names) |set| {
set.putAssumeCapacity(field_name, {});
}
@@ -14589,7 +14597,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.value_ptr.* = .{
- .ty = try field_ty.copy(&decl_arena.allocator),
+ .ty = try field_ty.copy(decl_arena_allocator),
.abi_align = Value.initTag(.abi_align_default),
};
@@ -14598,7 +14606,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val;
- gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator);
+ gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator);
} else {
gop.value_ptr.abi_align = Value.initTag(.abi_align_default);
}
@@ -14615,15 +14623,16 @@ fn generateUnionTagTypeNumbered(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered);
enum_ty_payload.* = .{
.base = .{ .tag = .enum_numbered },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
// TODO better type name
const new_decl = try mod.createAnonymousDecl(block, .{
.ty = Type.type,
@@ -14640,8 +14649,8 @@ fn generateUnionTagTypeNumbered(
.node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
- try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty });
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
+ try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty });
try new_decl.finalizeNewArena(&new_decl_arena);
return enum_ty;
}
@@ -14651,15 +14660,16 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple);
enum_ty_payload.* = .{
.base = .{ .tag = .enum_simple },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
// TODO better type name
const new_decl = try mod.createAnonymousDecl(block, .{
.ty = Type.type,
@@ -14674,7 +14684,7 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type
.node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
try new_decl.finalizeNewArena(&new_decl_arena);
return enum_ty;
}
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 43776dea67..142bf1a146 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -390,6 +390,7 @@ pub const DeclGen = struct {
// Fall back to generic implementation.
var arena = std.heap.ArenaAllocator.init(dg.module.gpa);
defer arena.deinit();
+ const arena_allocator = arena.getAllocator();
try writer.writeAll("{");
var index: usize = 0;
@@ -397,7 +398,7 @@ pub const DeclGen = struct {
const elem_ty = ty.elemType();
while (index < len) : (index += 1) {
if (index != 0) try writer.writeAll(",");
- const elem_val = try val.elemValue(&arena.allocator, index);
+ const elem_val = try val.elemValue(arena_allocator, index);
try dg.renderValue(writer, elem_ty, elem_val);
}
if (ty.sentinel()) |sentinel_val| {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index e326b2a677..31d3461846 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -331,7 +331,7 @@ pub const Object = struct {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
@@ -779,7 +779,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
@@ -837,7 +837,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const struct_obj = t.castTag(.@"struct").?.data;
@@ -871,7 +871,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const union_obj = t.cast(Type.Payload.Union).?.data;
const target = dg.module.getTarget();
@@ -2485,7 +2485,7 @@ pub const FuncGen = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
diff --git a/src/crash_report.zig b/src/crash_report.zig
index f11f95fe0c..92c37d2ac8 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -85,7 +85,7 @@ fn dumpStatusReport() !void {
const anal = zir_state orelse return;
// Note: We have the panic mutex here, so we can safely use the global crash heap.
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
- const allocator = &fba.allocator;
+ const allocator = fba.getAllocator();
const stderr = io.getStdErr().writer();
const block: *Sema.Block = anal.block;
diff --git a/src/glibc.zig b/src/glibc.zig
index e6e67e4f49..c3f2da599e 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -65,7 +65,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
var all_versions = std.ArrayListUnmanaged(std.builtin.Version){};
var all_functions = std.ArrayListUnmanaged(Fn){};
@@ -256,7 +256,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crti_o => {
@@ -711,7 +711,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const target = comp.getTarget();
const target_version = target.os.version_range.linux.glibc;
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 9986c922ba..908df3ca25 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -89,7 +89,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "c++";
const output_mode = .Lib;
@@ -236,7 +236,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "c++abi";
const output_mode = .Lib;
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 57f1f8c78e..47089cc779 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -15,7 +15,7 @@ pub fn buildTsan(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "tsan";
const output_mode = .Lib;
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 50c329c6d6..dabd8631b9 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -17,7 +17,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "unwind";
const output_mode = .Lib;
diff --git a/src/link.zig b/src/link.zig
index b57be64d42..0b191ca8da 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -628,7 +628,7 @@ pub const File = struct {
var arena_allocator = std.heap.ArenaAllocator.init(base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/C.zig b/src/link/C.zig
index cbd36ebab5..6bdace3fca 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -128,7 +128,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = &self.arena.allocator,
+ .typedefs_arena = self.arena.getAllocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
@@ -193,7 +193,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = &self.arena.allocator,
+ .typedefs_arena = self.arena.getAllocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index d5e3e6caa3..00bddfe578 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -877,7 +877,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 6670f1a8b6..200ca48898 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1243,7 +1243,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index bd26b64ad2..5e0e766483 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -412,7 +412,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
@@ -5379,7 +5379,7 @@ fn snapshotState(self: *MachO) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const out_file = try emit.directory.handle.createFile("snapshots.json", .{
.truncate = self.cold_start,
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index c4c42940b8..bc7e4d71a4 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -168,7 +168,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
try fn_map_res.value_ptr.functions.put(gpa, decl, out);
} else {
const file = decl.getFileScope();
- const arena = &self.path_arena.allocator;
+ const arena = self.path_arena.getAllocator();
// each file gets a symbol
fn_map_res.value_ptr.* = .{
.sym_index = blk: {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index a8606ac27f..7ffd067596 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -950,7 +950,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 84257de388..fe5ef2af9c 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -120,7 +120,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, inner.len);
for (inner) |doc, i| {
out[i] = .{ .v4 = doc };
}
@@ -130,7 +130,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV4", .{});
const inner = lib_stub.yaml.parse(TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
out[0] = .{ .v4 = inner };
break :blk out;
}
@@ -148,7 +148,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV3", .{});
const inner = lib_stub.yaml.parse(TbdV3) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
out[0] = .{ .v3 = inner };
break :blk out;
}
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 4392befb59..261caee717 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -248,15 +248,16 @@ pub const Yaml = struct {
pub fn load(allocator: Allocator, source: []const u8) !Yaml {
var arena = ArenaAllocator.init(allocator);
+ const arena_allocator = arena.getAllocator();
- var tree = Tree.init(&arena.allocator);
+ var tree = Tree.init(arena_allocator);
try tree.parse(source);
- var docs = std.ArrayList(Value).init(&arena.allocator);
+ var docs = std.ArrayList(Value).init(arena_allocator);
try docs.ensureUnusedCapacity(tree.docs.items.len);
for (tree.docs.items) |node| {
- const value = try Value.fromNode(&arena.allocator, &tree, node, null);
+ const value = try Value.fromNode(arena_allocator, &tree, node, null);
docs.appendAssumeCapacity(value);
}
@@ -299,7 +300,7 @@ pub const Yaml = struct {
.Pointer => |info| {
switch (info.size) {
.Slice => {
- var parsed = try self.arena.allocator.alloc(info.child, self.docs.items.len);
+ var parsed = try self.arena.getAllocator().alloc(info.child, self.docs.items.len);
for (self.docs.items) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
@@ -361,7 +362,7 @@ pub const Yaml = struct {
inline for (struct_info.fields) |field| {
const value: ?Value = map.get(field.name) orelse blk: {
- const field_name = try mem.replaceOwned(u8, &self.arena.allocator, field.name, "_", "-");
+ const field_name = try mem.replaceOwned(u8, self.arena.getAllocator(), field.name, "_", "-");
break :blk map.get(field_name);
};
@@ -382,7 +383,7 @@ pub const Yaml = struct {
fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T {
const ptr_info = @typeInfo(T).Pointer;
- const arena = &self.arena.allocator;
+ const arena = self.arena.getAllocator();
switch (ptr_info.size) {
.Slice => {
diff --git a/src/main.zig b/src/main.zig
index 52272db8ef..c97415ff29 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -139,7 +139,7 @@ pub fn main() anyerror!void {
const gpa = gpa: {
if (!builtin.link_libc) {
gpa_need_deinit = true;
- break :gpa &general_purpose_allocator.allocator;
+ break :gpa general_purpose_allocator.getAllocator();
}
// We would prefer to use raw libc allocator here, but cannot
// use it if it won't support the alignment we need.
@@ -153,7 +153,7 @@ pub fn main() anyerror!void {
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = try process.argsAlloc(arena);
@@ -3619,7 +3619,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
@@ -3818,7 +3818,7 @@ fn fmtPathFile(
var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (fmt.color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
diff --git a/src/mingw.zig b/src/mingw.zig
index b2628553b9..6f02ebf395 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crt2_o => {
@@ -281,7 +281,7 @@ fn add_cc_args(
pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) {
error.FileNotFound => {
diff --git a/src/musl.zig b/src/musl.zig
index b9d00c4b12..7c3957fdd7 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crti_o => {
diff --git a/src/print_air.zig b/src/print_air.zig
index 86fc6a6396..ce53a26aeb 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -47,7 +47,7 @@ pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void {
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.air = air,
.zir = zir,
.liveness = liveness,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 9532b33ccd..996898b4ac 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -19,7 +19,7 @@ pub fn renderAsTextToFile(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = 0,
@@ -74,7 +74,7 @@ pub fn renderInstructionContext(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = if (indent < 2) 2 else indent,
@@ -106,7 +106,7 @@ pub fn renderSingleInstruction(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = indent,
diff --git a/src/stage1.zig b/src/stage1.zig
index 942be66889..810dcc477b 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -38,7 +38,7 @@ pub fn main(argc: c_int, argv: [*][*:0]u8) callconv(.C) c_int {
const gpa = std.heap.c_allocator;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"});
for (args) |*arg, i| {
diff --git a/src/test.zig b/src/test.zig
index a9c1905b36..74147069e8 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -692,7 +692,7 @@ pub const TestContext = struct {
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 109535d081..5700592550 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -373,13 +373,14 @@ pub fn translate(
// from this function.
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
+ const arena_allocator = arena.getAllocator();
var context = Context{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena_allocator,
.source_manager = ast_unit.getSourceManager(),
.alias_list = AliasList.init(gpa),
- .global_scope = try arena.allocator.create(Scope.Root),
+ .global_scope = try arena_allocator.create(Scope.Root),
.clang_context = ast_unit.getASTContext(),
.pattern_list = try PatternList.init(gpa),
};
diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig
index 18906cb6c7..b2235ad53e 100644
--- a/src/wasi_libc.zig
+++ b/src/wasi_libc.zig
@@ -67,7 +67,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crt1_reactor_o => {
diff --git a/test/cli.zig b/test/cli.zig
index 3f50ebe403..20a2143f51 100644
--- a/test/cli.zig
+++ b/test/cli.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- a = &arena.allocator;
+ a = arena.getAllocator();
const zig_exe_rel = try (arg_it.next(a) orelse {
std.debug.print("Expected first argument to be path to zig compiler\n", .{});
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 68d8f2a807..46cbdd77f6 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -491,7 +491,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() !void {
\\ var allocator_buf: [10]u8 = undefined;
\\ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- \\ const allocator = &std.heap.loggingAllocator(&fixedBufferAllocator.allocator).allocator;
+ \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.getAllocator()).getAllocator();
\\
\\ var a = try allocator.alloc(u8, 10);
\\ a = allocator.shrink(a, 5);
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 09a165304c..3ed4743275 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -6550,9 +6550,9 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.objErrStage1("method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: *Allocator,
+ \\ allocator: Allocator,
\\
- \\ pub fn init(allocator: *Allocator) List {
+ \\ pub fn init(allocator: Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -6573,7 +6573,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ x.init();
\\}
, &[_][]const u8{
- "tmp.zig:23:5: error: expected type '*Allocator', found '*List'",
+ "tmp.zig:23:5: error: expected type 'Allocator', found '*List'",
});
ctx.objErrStage1("binary not on number literal",
@@ -7569,7 +7569,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\
\\export fn entry() void {
\\ const a = MdNode.Header {
- \\ .text = MdText.init(&std.testing.allocator),
+ \\ .text = MdText.init(std.testing.allocator),
\\ .weight = HeaderWeight.H1,
\\ };
\\ _ = a;
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index 35fe6e5c6a..f0dea39ccb 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -16,7 +16,7 @@ const Token = union(enum) {
};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
-var global_allocator = &gpa.allocator;
+const global_allocator = gpa.getAllocator();
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig
index 80ec97877a..a8b16a05ca 100644
--- a/test/standalone/cat/main.zig
+++ b/test/standalone/cat/main.zig
@@ -8,7 +8,7 @@ const warn = std.log.warn;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = try process.argsAlloc(arena);
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index 0480866867..e4ad6927b2 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -4,7 +4,7 @@ const g = @import("spirv/grammar.zig");
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
if (args.len != 2) {
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index f845c58b56..a99d14752f 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -25,7 +25,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const ally = &arena.allocator;
+ const ally = arena.getAllocator();
var symbols = std.ArrayList(Symbol).init(ally);
var sections = std.ArrayList([]const u8).init(ally);
diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig
index 648a76ebfb..93e0674605 100644
--- a/tools/merge_anal_dumps.zig
+++ b/tools/merge_anal_dumps.zig
@@ -9,7 +9,7 @@ pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index fa5fdb0042..1a90f99343 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -284,7 +284,7 @@ const LibCVendor = enum {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
var search_paths = std.ArrayList([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig
index 4cc60ca4ea..83c47f1481 100644
--- a/tools/update-license-headers.zig
+++ b/tools/update-license-headers.zig
@@ -10,7 +10,7 @@ pub fn main() !void {
defer root_node.end();
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const args = try std.process.argsAlloc(arena);
const path_to_walk = args[1];
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index c999db2899..90a96e0572 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -450,8 +450,13 @@ const cpu_targets = struct {
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
+<<<<<<< HEAD
const allocator = &arena.allocator;
+=======
+
+ const allocator = arena.getAllocator();
+>>>>>>> 11157e318 (allocgate: stage 1 and 2 building)
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 2eccb0ee1b..70bc5a1c74 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -769,7 +769,7 @@ const llvm_targets = [_]LlvmTarget{
pub fn main() anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = &arena_state.allocator;
+ const arena = arena_state.getAllocator();
const args = try std.process.argsAlloc(arena);
if (args.len <= 1) {
@@ -845,7 +845,7 @@ fn processOneTarget(job: Job) anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = &arena_state.allocator;
+ const arena = arena_state.getAllocator();
var progress_node = job.root_progress.start(llvm_target.zig_name, 3);
progress_node.activate();
diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig
index 6232a2e8f0..7cccb47e1c 100644
--- a/tools/update_glibc.zig
+++ b/tools/update_glibc.zig
@@ -133,7 +133,7 @@ const Function = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25
const zig_src_dir = args[2]; // path to the source checkout of zig, lib dir, e.g. ~/zig-src/lib
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 756d311ecc..0c6c570a31 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -48,7 +48,7 @@ const Version = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
--
cgit v1.2.3
From 1093b09a989edb8553e79b061bb15c5745f5d193 Mon Sep 17 00:00:00 2001
From: Lee Cannon
Date: Fri, 29 Oct 2021 02:08:41 +0100
Subject: allocgate: renamed getAllocator function to allocator
---
ci/srht/update-download-page.zig | 2 +-
doc/docgen.zig | 2 +-
doc/langref.html.in | 8 +++---
lib/std/Thread.zig | 2 +-
lib/std/array_list.zig | 6 ++--
lib/std/atomic/queue.zig | 2 +-
lib/std/atomic/stack.zig | 2 +-
lib/std/build.zig | 6 ++--
lib/std/build/OptionsStep.zig | 2 +-
lib/std/builtin.zig | 2 +-
lib/std/child_process.zig | 4 +--
lib/std/crypto/benchmark.zig | 2 +-
lib/std/debug.zig | 2 +-
lib/std/event/loop.zig | 4 +--
lib/std/fs/test.zig | 14 +++++-----
lib/std/hash/benchmark.zig | 2 +-
lib/std/heap.zig | 40 +++++++++++++--------------
lib/std/heap/arena_allocator.zig | 2 +-
lib/std/heap/general_purpose_allocator.zig | 44 +++++++++++++++---------------
lib/std/heap/log_to_writer_allocator.zig | 4 +--
lib/std/heap/logging_allocator.zig | 2 +-
lib/std/json.zig | 14 +++++-----
lib/std/json/write_stream.zig | 2 +-
lib/std/mem.zig | 4 +--
lib/std/net.zig | 4 +--
lib/std/os/test.zig | 2 +-
lib/std/process.zig | 2 +-
lib/std/special/build_runner.zig | 2 +-
lib/std/special/test_runner.zig | 2 +-
lib/std/testing.zig | 6 ++--
lib/std/testing/failing_allocator.zig | 2 +-
lib/std/zig/parser_test.zig | 8 +++---
lib/std/zig/perf_test.zig | 2 +-
lib/std/zig/string_literal.zig | 2 +-
src/AstGen.zig | 4 +--
src/Compilation.zig | 24 ++++++++--------
src/DepTokenizer.zig | 2 +-
src/Module.zig | 16 +++++------
src/Sema.zig | 34 +++++++++++------------
src/codegen/c.zig | 2 +-
src/codegen/llvm.zig | 10 +++----
src/crash_report.zig | 2 +-
src/glibc.zig | 6 ++--
src/libcxx.zig | 4 +--
src/libtsan.zig | 2 +-
src/libunwind.zig | 2 +-
src/link.zig | 2 +-
src/link/C.zig | 4 +--
src/link/Coff.zig | 2 +-
src/link/Elf.zig | 2 +-
src/link/MachO.zig | 4 +--
src/link/Plan9.zig | 2 +-
src/link/Wasm.zig | 2 +-
src/link/tapi.zig | 6 ++--
src/link/tapi/yaml.zig | 8 +++---
src/main.zig | 8 +++---
src/mingw.zig | 4 +--
src/musl.zig | 2 +-
src/print_air.zig | 2 +-
src/print_zir.zig | 6 ++--
src/stage1.zig | 2 +-
src/test.zig | 2 +-
src/translate_c.zig | 2 +-
src/wasi_libc.zig | 2 +-
test/cli.zig | 2 +-
test/compare_output.zig | 2 +-
test/standalone/brace_expansion/main.zig | 2 +-
test/standalone/cat/main.zig | 2 +-
tools/gen_spirv_spec.zig | 2 +-
tools/gen_stubs.zig | 2 +-
tools/merge_anal_dumps.zig | 2 +-
tools/process_headers.zig | 2 +-
tools/update-license-headers.zig | 2 +-
tools/update_clang_options.zig | 7 +----
tools/update_cpu_features.zig | 4 +--
tools/update_glibc.zig | 2 +-
tools/update_spirv_features.zig | 2 +-
77 files changed, 202 insertions(+), 207 deletions(-)
(limited to 'src')
diff --git a/ci/srht/update-download-page.zig b/ci/srht/update-download-page.zig
index b16a8609a8..daaee18faf 100644
--- a/ci/srht/update-download-page.zig
+++ b/ci/srht/update-download-page.zig
@@ -6,7 +6,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const out_dir = "out";
try std.fs.cwd().makePath(out_dir);
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 1f6ff74617..08502f0b79 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -21,7 +21,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
var args_it = process.args();
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 631c948628..b2f211468e 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -10061,7 +10061,7 @@ const expect = std.testing.expect;
test "using an allocator" {
var buffer: [100]u8 = undefined;
- const allocator = std.heap.FixedBufferAllocator.init(&buffer).getAllocator();
+ const allocator = std.heap.FixedBufferAllocator.init(&buffer).allocator();
const result = try concat(allocator, "foo", "bar");
try expect(std.mem.eql(u8, "foobar", result));
}
@@ -10114,7 +10114,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const ptr = try allocator.create(i32);
std.debug.print("ptr={*}\n", .{ptr});
@@ -10820,7 +10820,7 @@ const std = @import("std");
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = general_purpose_allocator.getAllocator();
+ const gpa = general_purpose_allocator.allocator();
const args = try std.process.argsAlloc(gpa);
defer std.process.argsFree(gpa, args);
@@ -10842,7 +10842,7 @@ const PreopenList = std.fs.wasi.PreopenList;
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = general_purpose_allocator.getAllocator();
+ const gpa = general_purpose_allocator.allocator();
var preopens = PreopenList.init(gpa);
defer preopens.deinit();
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 61f19c20d0..855c44c032 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -460,7 +460,7 @@ const WindowsThreadImpl = struct {
errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
- const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable;
+ const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator().create(Instance) catch unreachable;
instance.* = .{
.fn_args = args,
.thread = .{
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index d88dae95ff..fe98caf25f 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = arena.getAllocator();
+ const a = arena.allocator();
const init = [_]i32{ 1, 2, 3, 4, 5 };
const new = [_]i32{ 0, 0, 0 };
@@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
// use an arena allocator to make sure realloc returns error.OutOfMemory
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = arena.getAllocator();
+ const a = arena.allocator();
{
var list = ArrayList(i32).init(a);
@@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
test "std.ArrayList(u0)" {
// An ArrayList on zero-sized types should not need to allocate
- const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator();
+ const a = testing.FailingAllocator.init(testing.allocator, 0).allocator();
var list = ArrayList(u0).init(a);
defer list.deinit();
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 3b4a14110c..6c502ef37e 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -177,7 +177,7 @@ test "std.atomic.Queue" {
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
- var a = fixed_buffer_allocator.getThreadSafeAllocator();
+ var a = fixed_buffer_allocator.threadSafeAllocator();
var queue = Queue(i32).init();
var context = Context{
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index c1b368b571..a6396bb22b 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -89,7 +89,7 @@ test "std.atomic.stack" {
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
- var a = fixed_buffer_allocator.getThreadSafeAllocator();
+ var a = fixed_buffer_allocator.threadSafeAllocator();
var stack = Stack(i32).init();
var context = Context{
diff --git a/lib/std/build.zig b/lib/std/build.zig
index dba27f86b9..378af18b2c 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" {
defer arena.deinit();
const builder = try Builder.create(
- arena.getAllocator(),
+ arena.allocator(),
"zig",
"zig-cache",
"zig-cache",
@@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- arena.getAllocator(),
+ arena.allocator(),
"test",
"test",
"test",
@@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" {
defer arena.deinit();
var builder = try Builder.create(
- arena.getAllocator(),
+ arena.allocator(),
"test",
"test",
"test",
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig
index d3ac0d4196..d106b05171 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/build/OptionsStep.zig
@@ -274,7 +274,7 @@ test "OptionsStep" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- arena.getAllocator(),
+ arena.allocator(),
"test",
"test",
"test",
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index e0acc237d9..9ce8c1c38e 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -75,7 +75,7 @@ pub const StackTrace = struct {
};
const tty_config = std.debug.detectTTYConfig();
try writer.writeAll("\n");
- std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| {
+ std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
try writer.writeAll("\n");
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index cc9f1b2801..13e14c7f34 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -541,7 +541,7 @@ pub const ChildProcess = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
// The POSIX standard does not allow malloc() between fork() and execve(),
// and `self.allocator` may be a libc allocator.
@@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
- const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap);
+ const environ = try createNullDelimitedEnvMap(arena.allocator(), &envmap);
try testing.expectEqual(@as(usize, 5), environ.len);
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 52e56ddf18..9fd3c42544 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -363,7 +363,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(fixed.getAllocator());
+ const args = try std.process.argsAlloc(fixed.allocator());
var filter: ?[]u8 = "";
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index b6990d675d..64db6eeadc 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1566,7 +1566,7 @@ fn getDebugInfoAllocator() mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = debug_info_arena_allocator.getAllocator();
+ const allocator = debug_info_arena_allocator.allocator();
debug_info_allocator = allocator;
return allocator;
}
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 191ed4de94..23c89aabc5 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -173,12 +173,12 @@ pub const Loop = struct {
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
const resume_node_count = std.math.max(extra_thread_count, 1);
- self.eventfd_resume_nodes = try self.arena.getAllocator().alloc(
+ self.eventfd_resume_nodes = try self.arena.allocator().alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
);
- self.extra_threads = try self.arena.getAllocator().alloc(Thread, extra_thread_count);
+ self.extra_threads = try self.arena.allocator().alloc(Thread, extra_thread_count);
try self.initOsData(extra_thread_count);
errdefer self.deinitOsData();
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 437ff5620d..1ab6608327 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -52,7 +52,7 @@ test "accessAbsolute" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
@@ -71,7 +71,7 @@ test "openDirAbsolute" {
try tmp.dir.makeDir("subdir");
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
@@ -111,7 +111,7 @@ test "readLinkAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
@@ -162,7 +162,7 @@ test "Dir.Iterator" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
var entries = std.ArrayList(Dir.Entry).init(allocator);
@@ -207,7 +207,7 @@ test "Dir.realpath smoke test" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
@@ -482,7 +482,7 @@ test "renameAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
@@ -993,7 +993,7 @@ test ". and .. in absolute functions" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index 5b278ca0b1..f6f1da1894 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -165,7 +165,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(fixed.getAllocator());
+ const args = try std.process.argsAlloc(fixed.allocator());
var filter: ?[]u8 = "";
var count: usize = mode(128 * MiB);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 213953553f..c9a5062570 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -573,7 +573,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
};
}
- pub fn getAllocator(self: *HeapAllocator) Allocator {
+ pub fn allocator(self: *HeapAllocator) Allocator {
return Allocator.init(self, alloc, resize);
}
@@ -680,14 +680,14 @@ pub const FixedBufferAllocator = struct {
};
}
- /// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe
- pub fn getAllocator(self: *FixedBufferAllocator) Allocator {
+ /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
+ pub fn allocator(self: *FixedBufferAllocator) Allocator {
return Allocator.init(self, alloc, resize);
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
- pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator {
+ pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
}
@@ -775,7 +775,7 @@ pub const FixedBufferAllocator = struct {
}
};
-pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator");
+pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator");
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
@@ -909,7 +909,7 @@ test "HeapAllocator" {
if (builtin.os.tag == .windows) {
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
- const allocator = heap_allocator.getAllocator();
+ const allocator = heap_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
@@ -921,7 +921,7 @@ test "HeapAllocator" {
test "ArenaAllocator" {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();
- const allocator = arena_allocator.getAllocator();
+ const allocator = arena_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
@@ -932,7 +932,7 @@ test "ArenaAllocator" {
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
- const allocator = fixed_buffer_allocator.getAllocator();
+ const allocator = fixed_buffer_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
@@ -943,7 +943,7 @@ test "FixedBufferAllocator" {
test "FixedBufferAllocator.reset" {
var buf: [8]u8 align(@alignOf(u64)) = undefined;
var fba = FixedBufferAllocator.init(buf[0..]);
- const allocator = fba.getAllocator();
+ const allocator = fba.allocator();
const X = 0xeeeeeeeeeeeeeeee;
const Y = 0xffffffffffffffff;
@@ -976,7 +976,7 @@ test "FixedBufferAllocator Reuse memory on realloc" {
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- const allocator = fixed_buffer_allocator.getAllocator();
+ const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 5);
try testing.expect(slice0.len == 5);
@@ -988,7 +988,7 @@ test "FixedBufferAllocator Reuse memory on realloc" {
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- const allocator = fixed_buffer_allocator.getAllocator();
+ const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 2);
slice0[0] = 1;
@@ -1005,16 +1005,16 @@ test "FixedBufferAllocator Reuse memory on realloc" {
test "Thread safe FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
- try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator());
- try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator());
- try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator());
- try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocator(fixed_buffer_allocator.threadSafeAllocator());
+ try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
+ try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
+ try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
}
/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = validationAllocator.getAllocator();
+ const allocator = validationAllocator.allocator();
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
@@ -1060,7 +1060,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = validationAllocator.getAllocator();
+ const allocator = validationAllocator.allocator();
// Test a few alignment values, smaller and bigger than the type's one
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
@@ -1090,7 +1090,7 @@ pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = validationAllocator.getAllocator();
+ const allocator = validationAllocator.allocator();
//Maybe a platform's page_size is actually the same as or
// very near usize?
@@ -1122,10 +1122,10 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = validationAllocator.getAllocator();
+ const allocator = validationAllocator.allocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator();
+ const debug_allocator = FixedBufferAllocator.init(&debug_buffer).allocator();
const alloc_size = mem.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index 65b0839945..c5d7d5ec9d 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -23,7 +23,7 @@ pub const ArenaAllocator = struct {
}
};
- pub fn getAllocator(self: *ArenaAllocator) Allocator {
+ pub fn allocator(self: *ArenaAllocator) Allocator {
return Allocator.init(self, alloc, resize);
}
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index d4f1dde299..822db6fb1b 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -280,7 +280,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
};
- pub fn getAllocator(self: *Self) Allocator {
+ pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize);
}
@@ -830,7 +830,7 @@ const test_config = Config{};
test "small allocations - free in same order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -849,7 +849,7 @@ test "small allocations - free in same order" {
test "small allocations - free in reverse order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -868,7 +868,7 @@ test "small allocations - free in reverse order" {
test "large allocations" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
@@ -881,7 +881,7 @@ test "large allocations" {
test "realloc" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
@@ -903,7 +903,7 @@ test "realloc" {
test "shrink" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
@@ -926,7 +926,7 @@ test "shrink" {
test "large object - grow" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
@@ -944,7 +944,7 @@ test "large object - grow" {
test "realloc small object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
@@ -961,7 +961,7 @@ test "realloc small object to large object" {
test "shrink large object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -984,10 +984,10 @@ test "shrink large object to large object" {
test "shrink large object to large object with larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator();
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
@@ -1019,7 +1019,7 @@ test "shrink large object to large object with larger alignment" {
test "realloc large object to small object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1037,7 +1037,7 @@ test "overrideable mutexes" {
.mutex = std.Thread.Mutex{},
};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1046,7 +1046,7 @@ test "overrideable mutexes" {
test "non-page-allocator backing allocator" {
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1055,10 +1055,10 @@ test "non-page-allocator backing allocator" {
test "realloc large object to larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator();
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1094,9 +1094,9 @@ test "realloc large object to larger alignment" {
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
- var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() };
+ var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1113,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" {
test "objects of size 1024 and 2048" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
@@ -1125,7 +1125,7 @@ test "objects of size 1024 and 2048" {
test "setting a memory cap" {
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
gpa.setRequestedMemoryLimit(1010);
@@ -1154,9 +1154,9 @@ test "double frees" {
defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak");
const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
- var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() };
+ var gpa = GPA{ .backing_allocator = backing_gpa.allocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = gpa.getAllocator();
+ const allocator = gpa.allocator();
// detect a small allocation double free, even though bucket is emptied
const index: usize = 6;
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index 1f3146f79f..cab1724312 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -17,7 +17,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
};
}
- pub fn getAllocator(self: *Self) Allocator {
+ pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize);
}
@@ -82,7 +82,7 @@ test "LogToWriterAllocator" {
var allocator_buf: [10]u8 = undefined;
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator();
+ const allocator = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer()).allocator();
var a = try allocator.alloc(u8, 10);
a = allocator.shrink(a, 5);
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 34dc554dee..da9e731fd5 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -32,7 +32,7 @@ pub fn ScopedLoggingAllocator(
};
}
- pub fn getAllocator(self: *Self) Allocator {
+ pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize);
}
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 978213a596..658fec6b79 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -2033,7 +2033,7 @@ test "parse into tagged union" {
{ // failing allocations should be bubbled up instantly without trying next member
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
- const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
+ const options = ParseOptions{ .allocator = fail_alloc.allocator() };
const T = union(enum) {
// both fields here match the input
string: []const u8,
@@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" {
test "parseFree descends into tagged union" {
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
- const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
+ const options = ParseOptions{ .allocator = fail_alloc.allocator() };
const T = union(enum) {
int: i32,
float: f64,
@@ -2364,7 +2364,7 @@ pub const Parser = struct {
var arena = ArenaAllocator.init(p.allocator);
errdefer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
while (try s.next()) |token| {
try p.transition(allocator, input, s.i - 1, token);
@@ -2746,13 +2746,13 @@ fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value {
test "parsing empty string gives appropriate error" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), ""));
+ try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.allocator(), ""));
}
test "integer after float has proper type" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- const json = try testParse(arena_allocator.getAllocator(),
+ const json = try testParse(arena_allocator.allocator(),
\\{
\\ "float": 3.14,
\\ "ints": [1, 2, 3]
@@ -2787,7 +2787,7 @@ test "escaped characters" {
\\}
;
- const obj = (try testParse(arena_allocator.getAllocator(), input)).Object;
+ const obj = (try testParse(arena_allocator.allocator(), input)).Object;
try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
@@ -2813,7 +2813,7 @@ test "string copy option" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- const allocator = arena_allocator.getAllocator();
+ const allocator = arena_allocator.allocator();
const tree_nocopy = try Parser.init(allocator, false).parse(input);
const obj_nocopy = tree_nocopy.root.Object;
diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig
index 2ef6fa3a86..3393f8a6ee 100644
--- a/lib/std/json/write_stream.zig
+++ b/lib/std/json/write_stream.zig
@@ -243,7 +243,7 @@ test "json write stream" {
try w.beginObject();
try w.objectField("object");
- try w.emitJson(try getJsonObject(arena_allocator.getAllocator()));
+ try w.emitJson(try getJsonObject(arena_allocator.allocator()));
try w.objectField("string");
try w.emitString("This is a string");
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 4b43069d1e..37b1141272 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -46,13 +46,13 @@ pub fn ValidationAllocator(comptime T: type) type {
};
}
- pub fn getAllocator(self: *Self) Allocator {
+ pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize);
}
fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
if (T == Allocator) return self.underlying_allocator;
- return self.underlying_allocator.getAllocator();
+ return self.underlying_allocator.allocator();
}
pub fn alloc(
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 4f5ce84034..6199d739d7 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -704,7 +704,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
- const result = try arena.getAllocator().create(AddressList);
+ const result = try arena.allocator().create(AddressList);
result.* = AddressList{
.arena = arena,
.addrs = undefined,
@@ -712,7 +712,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A
};
break :blk result;
};
- const arena = result.arena.getAllocator();
+ const arena = result.arena.allocator();
errdefer result.arena.deinit();
if (builtin.target.os.tag == .windows or builtin.link_libc) {
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index fb5105706c..3e6603677c 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -58,7 +58,7 @@ test "open smoke test" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const base_path = blk: {
const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 6b45a7e7aa..243978591b 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -854,7 +854,7 @@ pub fn execve(
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index ab844fef57..d798d2ab6f 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
var args = try process.argsAlloc(allocator);
defer process.argsFree(allocator, args);
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index f90e8aa58e..9a52ebdbb1 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined;
var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer);
fn processArgs() void {
- const args = std.process.argsAlloc(args_allocator.getAllocator()) catch {
+ const args = std.process.argsAlloc(args_allocator.allocator()) catch {
@panic("Too many bytes passed over the CLI to the test runner");
};
if (args.len != 2) {
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index b588abbd8c..e5c2afab40 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -7,11 +7,11 @@ const print = std.debug.print;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
-pub const allocator = allocator_instance.getAllocator();
+pub const allocator = allocator_instance.allocator();
pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
-pub const failing_allocator = failing_allocator_instance.getAllocator();
-pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0);
+pub const failing_allocator = failing_allocator_instance.allocator();
+pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), 0);
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index 137af925ad..15da5091fb 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -40,7 +40,7 @@ pub const FailingAllocator = struct {
};
}
- pub fn getAllocator(self: *FailingAllocator) mem.Allocator {
+ pub fn allocator(self: *FailingAllocator) mem.Allocator {
return mem.Allocator.init(self, alloc, resize);
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 0fb4357917..ef716ffb32 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -5351,8 +5351,8 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize));
- const allocator = failing_allocator.getAllocator();
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), maxInt(usize));
+ const allocator = failing_allocator.allocator();
var anything_changed: bool = undefined;
const result_source = try testParse(source, allocator, &anything_changed);
try std.testing.expectEqualStrings(expected_source, result_source);
@@ -5369,9 +5369,9 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index);
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), fail_index);
var anything_changed: bool = undefined;
- if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| {
+ if (testParse(source, failing_allocator.allocator(), &anything_changed)) |_| {
return error.NondeterministicMemoryUsage;
} else |err| switch (err) {
error.OutOfMemory => {
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index d2286914b0..d3fc90eaea 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -33,7 +33,7 @@ pub fn main() !void {
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = fixed_buf_alloc.getAllocator();
+ var allocator = fixed_buf_alloc.allocator();
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index 5e44e5f8f3..1eaab26e3a 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -147,7 +147,7 @@ test "parse" {
var fixed_buf_mem: [32]u8 = undefined;
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
- var alloc = fixed_buf_alloc.getAllocator();
+ var alloc = fixed_buf_alloc.allocator();
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 2e58c8c5d9..f51a73e12a 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -98,7 +98,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
var astgen: AstGen = .{
.gpa = gpa,
- .arena = arena.getAllocator(),
+ .arena = arena.allocator(),
.tree = &tree,
};
defer astgen.deinit(gpa);
@@ -1939,7 +1939,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa);
defer block_arena.deinit();
- const block_arena_allocator = block_arena.getAllocator();
+ const block_arena_allocator = block_arena.allocator();
var noreturn_src_node: Ast.Node.Index = 0;
var scope = parent_scope;
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 7c855862fd..6589c980bf 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -412,7 +412,7 @@ pub const AllErrors = struct {
errors: *std.ArrayList(Message),
module_err_msg: Module.ErrorMsg,
) !void {
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const notes = try allocator.alloc(Message, module_err_msg.notes.len);
for (notes) |*note, i| {
const module_note = module_err_msg.notes[i];
@@ -549,7 +549,7 @@ pub const AllErrors = struct {
msg: []const u8,
optional_children: ?AllErrors,
) !void {
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const duped_msg = try allocator.dupe(u8, msg);
if (optional_children) |*children| {
try errors.append(.{ .plain = .{
@@ -788,7 +788,7 @@ fn addPackageTableToCacheHash(
seen_table: *std.AutoHashMap(*Package, void),
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
) (error{OutOfMemory} || std.os.GetCwdError)!void {
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const packages = try allocator.alloc(Package.Table.KV, pkg_table.count());
{
@@ -852,7 +852,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// initialization and then is freed in deinit().
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
// We put the `Compilation` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
@@ -1210,7 +1210,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
{
var local_arena = std.heap.ArenaAllocator.init(gpa);
defer local_arena.deinit();
- var seen_table = std.AutoHashMap(*Package, void).init(local_arena.getAllocator());
+ var seen_table = std.AutoHashMap(*Package, void).init(local_arena.allocator());
try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes);
}
hash.add(valgrind);
@@ -2013,7 +2013,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var arena = std.heap.ArenaAllocator.init(self.gpa);
errdefer arena.deinit();
- const arena_allocator = arena.getAllocator();
+ const arena_allocator = arena.allocator();
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
@@ -2295,7 +2295,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
defer tmp_arena.deinit();
- const sema_arena = tmp_arena.getAllocator();
+ const sema_arena = tmp_arena.allocator();
const sema_frame = tracy.namedFrame("sema");
var sema_frame_ended = false;
@@ -2390,7 +2390,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.init(gpa),
- .typedefs_arena = typedefs_arena.getAllocator(),
+ .typedefs_arena = typedefs_arena.allocator(),
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
@@ -2844,7 +2844,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const digest = if (!actual_hit) digest: {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const tmp_digest = man.hash.peek();
const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
@@ -3099,7 +3099,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const c_source_basename = std.fs.path.basename(c_object.src.src_path);
@@ -4420,7 +4420,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
// Here we use the legacy stage1 C++ compiler to compile Zig code.
const mod = comp.bin_file.options.module.?;
@@ -4457,7 +4457,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
_ = try man.addFile(main_zig_file, null);
{
- var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.getAllocator());
+ var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.allocator());
try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man });
}
man.hash.add(comp.bin_file.options.valgrind);
diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig
index e99bfc7464..09712baa99 100644
--- a/src/DepTokenizer.zig
+++ b/src/DepTokenizer.zig
@@ -878,7 +878,7 @@ test "error prereq - continuation expecting end-of-line" {
// - tokenize input, emit textual representation, and compare to expect
fn depTokenizer(input: []const u8, expect: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
defer arena_allocator.deinit();
var it: Tokenizer = .{ .bytes = input };
diff --git a/src/Module.zig b/src/Module.zig
index d016418d8d..0e5e3690bd 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -517,7 +517,7 @@ pub const Decl = struct {
pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void {
assert(decl.value_arena == null);
- const arena_state = try arena.getAllocator().create(std.heap.ArenaAllocator.State);
+ const arena_state = try arena.allocator().create(std.heap.ArenaAllocator.State);
arena_state.* = arena.state;
decl.value_arena = arena_state;
}
@@ -3159,7 +3159,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const gpa = mod.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
@@ -3203,7 +3203,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
- const sema_arena_allocator = sema_arena.getAllocator();
+ const sema_arena_allocator = sema_arena.allocator();
var sema: Sema = .{
.mod = mod,
@@ -3267,11 +3267,11 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// We need the memory for the Type to go into the arena for the Decl
var decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer decl_arena.deinit();
- const decl_arena_allocator = decl_arena.getAllocator();
+ const decl_arena_allocator = decl_arena.allocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- const analysis_arena_allocator = analysis_arena.getAllocator();
+ const analysis_arena_allocator = analysis_arena.allocator();
var sema: Sema = .{
.mod = mod,
@@ -4132,7 +4132,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
// Use the Decl's arena for captured values.
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
- const decl_arena_allocator = decl_arena.getAllocator();
+ const decl_arena_allocator = decl_arena.allocator();
var sema: Sema = .{
.mod = mod,
@@ -4756,7 +4756,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
// decl reference it as a slice.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = new_decl_arena.getAllocator();
+ const arena = new_decl_arena.allocator();
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
@@ -4807,7 +4807,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
{
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = new_decl_arena.getAllocator();
+ const arena = new_decl_arena.allocator();
// This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
diff --git a/src/Sema.zig b/src/Sema.zig
index 9e0aa2f75e..7bf36d9ae0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -418,7 +418,7 @@ pub const Block = struct {
finished: bool,
pub fn arena(wad: *WipAnonDecl) Allocator {
- return wad.new_decl_arena.getAllocator();
+ return wad.new_decl_arena.allocator();
}
pub fn deinit(wad: *WipAnonDecl) void {
@@ -1594,7 +1594,7 @@ fn zirStructDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
@@ -1699,7 +1699,7 @@ fn zirEnumDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull);
const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull);
@@ -1889,7 +1889,7 @@ fn zirUnionDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const union_obj = try new_decl_arena_allocator.create(Module.Union);
const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union";
@@ -1958,7 +1958,7 @@ fn zirOpaqueDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque);
const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque);
@@ -2012,7 +2012,7 @@ fn zirErrorSetDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const error_set = try new_decl_arena_allocator.create(Module.ErrorSet);
const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set);
@@ -3940,7 +3940,7 @@ fn analyzeCall(
{
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
for (memoized_call_key.args) |*arg| {
arg.* = try arg.*.copy(arena);
@@ -4074,7 +4074,7 @@ fn analyzeCall(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
// Re-run the block that creates the function, with the comptime parameters
// pre-populated inside `inst_map`. This causes `param_comptime` and
@@ -6053,8 +6053,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
defer arena.deinit();
const target = sema.mod.getTarget();
- const min_int = try operand_ty.minInt(arena.getAllocator(), target);
- const max_int = try operand_ty.maxInt(arena.getAllocator(), target);
+ const min_int = try operand_ty.minInt(arena.allocator(), target);
+ const max_int = try operand_ty.maxInt(arena.allocator(), target);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
@@ -12801,7 +12801,7 @@ const ComptimePtrMutationKit = struct {
fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
- return self.decl_arena.getAllocator();
+ return self.decl_arena.allocator();
}
fn finishArena(self: *ComptimePtrMutationKit) void {
@@ -14293,7 +14293,7 @@ fn semaStructFields(
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
- const decl_arena_allocator = decl_arena.getAllocator();
+ const decl_arena_allocator = decl_arena.allocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14301,7 +14301,7 @@ fn semaStructFields(
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = analysis_arena.getAllocator(),
+ .arena = analysis_arena.allocator(),
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
@@ -14461,7 +14461,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa);
defer union_obj.owner_decl.value_arena.?.* = decl_arena.state;
- const decl_arena_allocator = decl_arena.getAllocator();
+ const decl_arena_allocator = decl_arena.allocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14469,7 +14469,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = analysis_arena.getAllocator(),
+ .arena = analysis_arena.allocator(),
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
@@ -14623,7 +14623,7 @@ fn generateUnionTagTypeNumbered(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered);
const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered);
@@ -14660,7 +14660,7 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.getAllocator();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple);
const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple);
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 142bf1a146..f54ae7f76d 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -390,7 +390,7 @@ pub const DeclGen = struct {
// Fall back to generic implementation.
var arena = std.heap.ArenaAllocator.init(dg.module.gpa);
defer arena.deinit();
- const arena_allocator = arena.getAllocator();
+ const arena_allocator = arena.allocator();
try writer.writeAll("{");
var index: usize = 0;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 31d3461846..4600c2e07e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -331,7 +331,7 @@ pub const Object = struct {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
@@ -779,7 +779,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
@@ -837,7 +837,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const struct_obj = t.castTag(.@"struct").?.data;
@@ -871,7 +871,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const union_obj = t.cast(Type.Payload.Union).?.data;
const target = dg.module.getTarget();
@@ -2485,7 +2485,7 @@ pub const FuncGen = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
diff --git a/src/crash_report.zig b/src/crash_report.zig
index 92c37d2ac8..5d96ebc669 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -85,7 +85,7 @@ fn dumpStatusReport() !void {
const anal = zir_state orelse return;
// Note: We have the panic mutex here, so we can safely use the global crash heap.
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
- const allocator = fba.getAllocator();
+ const allocator = fba.allocator();
const stderr = io.getStdErr().writer();
const block: *Sema.Block = anal.block;
diff --git a/src/glibc.zig b/src/glibc.zig
index c3f2da599e..e67c3360e9 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -65,7 +65,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
var all_versions = std.ArrayListUnmanaged(std.builtin.Version){};
var all_functions = std.ArrayListUnmanaged(Fn){};
@@ -256,7 +256,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
switch (crt_file) {
.crti_o => {
@@ -711,7 +711,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const target = comp.getTarget();
const target_version = target.os.version_range.linux.glibc;
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 908df3ca25..fe96207c48 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -89,7 +89,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const root_name = "c++";
const output_mode = .Lib;
@@ -236,7 +236,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const root_name = "c++abi";
const output_mode = .Lib;
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 47089cc779..0f05957387 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -15,7 +15,7 @@ pub fn buildTsan(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const root_name = "tsan";
const output_mode = .Lib;
diff --git a/src/libunwind.zig b/src/libunwind.zig
index dabd8631b9..95c58936fa 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -17,7 +17,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const root_name = "unwind";
const output_mode = .Lib;
diff --git a/src/link.zig b/src/link.zig
index 0b191ca8da..79a1c63462 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -628,7 +628,7 @@ pub const File = struct {
var arena_allocator = std.heap.ArenaAllocator.init(base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/C.zig b/src/link/C.zig
index 6bdace3fca..6599008c73 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -128,7 +128,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = self.arena.getAllocator(),
+ .typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
@@ -193,7 +193,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = self.arena.getAllocator(),
+ .typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 00bddfe578..2445b11caf 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -877,7 +877,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 200ca48898..24f8a02b95 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1243,7 +1243,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 5e0e766483..db2b8ffc42 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -412,7 +412,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
@@ -5379,7 +5379,7 @@ fn snapshotState(self: *MachO) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const out_file = try emit.directory.handle.createFile("snapshots.json", .{
.truncate = self.cold_start,
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index bc7e4d71a4..7dfbee2a1f 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -168,7 +168,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
try fn_map_res.value_ptr.functions.put(gpa, decl, out);
} else {
const file = decl.getFileScope();
- const arena = self.path_arena.getAllocator();
+ const arena = self.path_arena.allocator();
// each file gets a symbol
fn_map_res.value_ptr.* = .{
.sym_index = blk: {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 7ffd067596..367f3376f0 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -950,7 +950,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index fe5ef2af9c..7a55a5104d 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -120,7 +120,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, inner.len);
+ var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
for (inner) |doc, i| {
out[i] = .{ .v4 = doc };
}
@@ -130,7 +130,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV4", .{});
const inner = lib_stub.yaml.parse(TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, 1);
out[0] = .{ .v4 = inner };
break :blk out;
}
@@ -148,7 +148,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV3", .{});
const inner = lib_stub.yaml.parse(TbdV3) catch break :err;
- var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, 1);
out[0] = .{ .v3 = inner };
break :blk out;
}
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 261caee717..7c1997604d 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -248,7 +248,7 @@ pub const Yaml = struct {
pub fn load(allocator: Allocator, source: []const u8) !Yaml {
var arena = ArenaAllocator.init(allocator);
- const arena_allocator = arena.getAllocator();
+ const arena_allocator = arena.allocator();
var tree = Tree.init(arena_allocator);
try tree.parse(source);
@@ -300,7 +300,7 @@ pub const Yaml = struct {
.Pointer => |info| {
switch (info.size) {
.Slice => {
- var parsed = try self.arena.getAllocator().alloc(info.child, self.docs.items.len);
+ var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len);
for (self.docs.items) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
@@ -362,7 +362,7 @@ pub const Yaml = struct {
inline for (struct_info.fields) |field| {
const value: ?Value = map.get(field.name) orelse blk: {
- const field_name = try mem.replaceOwned(u8, self.arena.getAllocator(), field.name, "_", "-");
+ const field_name = try mem.replaceOwned(u8, self.arena.allocator(), field.name, "_", "-");
break :blk map.get(field_name);
};
@@ -383,7 +383,7 @@ pub const Yaml = struct {
fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T {
const ptr_info = @typeInfo(T).Pointer;
- const arena = self.arena.getAllocator();
+ const arena = self.arena.allocator();
switch (ptr_info.size) {
.Slice => {
diff --git a/src/main.zig b/src/main.zig
index c97415ff29..981a76a364 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -139,7 +139,7 @@ pub fn main() anyerror!void {
const gpa = gpa: {
if (!builtin.link_libc) {
gpa_need_deinit = true;
- break :gpa general_purpose_allocator.getAllocator();
+ break :gpa general_purpose_allocator.allocator();
}
// We would prefer to use raw libc allocator here, but cannot
// use it if it won't support the alignment we need.
@@ -153,7 +153,7 @@ pub fn main() anyerror!void {
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = arena_instance.getAllocator();
+ const arena = arena_instance.allocator();
const args = try process.argsAlloc(arena);
@@ -3619,7 +3619,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
@@ -3818,7 +3818,7 @@ fn fmtPathFile(
var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (fmt.color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
diff --git a/src/mingw.zig b/src/mingw.zig
index 6f02ebf395..264740c333 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
switch (crt_file) {
.crt2_o => {
@@ -281,7 +281,7 @@ fn add_cc_args(
pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) {
error.FileNotFound => {
diff --git a/src/musl.zig b/src/musl.zig
index 7c3957fdd7..cad6246c98 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
switch (crt_file) {
.crti_o => {
diff --git a/src/print_air.zig b/src/print_air.zig
index ce53a26aeb..3e503735b9 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -47,7 +47,7 @@ pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void {
var writer: Writer = .{
.gpa = gpa,
- .arena = arena.getAllocator(),
+ .arena = arena.allocator(),
.air = air,
.zir = zir,
.liveness = liveness,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 996898b4ac..401d41cd50 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -19,7 +19,7 @@ pub fn renderAsTextToFile(
var writer: Writer = .{
.gpa = gpa,
- .arena = arena.getAllocator(),
+ .arena = arena.allocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = 0,
@@ -74,7 +74,7 @@ pub fn renderInstructionContext(
var writer: Writer = .{
.gpa = gpa,
- .arena = arena.getAllocator(),
+ .arena = arena.allocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = if (indent < 2) 2 else indent,
@@ -106,7 +106,7 @@ pub fn renderSingleInstruction(
var writer: Writer = .{
.gpa = gpa,
- .arena = arena.getAllocator(),
+ .arena = arena.allocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = indent,
diff --git a/src/stage1.zig b/src/stage1.zig
index 810dcc477b..8e6090af0b 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -38,7 +38,7 @@ pub fn main(argc: c_int, argv: [*][*:0]u8) callconv(.C) c_int {
const gpa = std.heap.c_allocator;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = arena_instance.getAllocator();
+ const arena = arena_instance.allocator();
const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"});
for (args) |*arg, i| {
diff --git a/src/test.zig b/src/test.zig
index 74147069e8..44faea0ed9 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -692,7 +692,7 @@ pub const TestContext = struct {
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 5700592550..03bb59469a 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -373,7 +373,7 @@ pub fn translate(
// from this function.
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
- const arena_allocator = arena.getAllocator();
+ const arena_allocator = arena.allocator();
var context = Context{
.gpa = gpa,
diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig
index b2235ad53e..fb60c98e56 100644
--- a/src/wasi_libc.zig
+++ b/src/wasi_libc.zig
@@ -67,7 +67,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
switch (crt_file) {
.crt1_reactor_o => {
diff --git a/test/cli.zig b/test/cli.zig
index 20a2143f51..d4afe417ce 100644
--- a/test/cli.zig
+++ b/test/cli.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- a = arena.getAllocator();
+ a = arena.allocator();
const zig_exe_rel = try (arg_it.next(a) orelse {
std.debug.print("Expected first argument to be path to zig compiler\n", .{});
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 46cbdd77f6..8a0bfc1ac7 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -491,7 +491,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() !void {
\\ var allocator_buf: [10]u8 = undefined;
\\ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.getAllocator()).getAllocator();
+ \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.allocator()).allocator();
\\
\\ var a = try allocator.alloc(u8, 10);
\\ a = allocator.shrink(a, 5);
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index f0dea39ccb..6bc5501853 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -16,7 +16,7 @@ const Token = union(enum) {
};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
-const global_allocator = gpa.getAllocator();
+const global_allocator = gpa.allocator();
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig
index a8b16a05ca..740e73a33e 100644
--- a/test/standalone/cat/main.zig
+++ b/test/standalone/cat/main.zig
@@ -8,7 +8,7 @@ const warn = std.log.warn;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
- const arena = arena_instance.getAllocator();
+ const arena = arena_instance.allocator();
const args = try process.argsAlloc(arena);
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index e4ad6927b2..465404e5d1 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -4,7 +4,7 @@ const g = @import("spirv/grammar.zig");
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
if (args.len != 2) {
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index a99d14752f..56ea7715a7 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -25,7 +25,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const ally = arena.getAllocator();
+ const ally = arena.allocator();
var symbols = std.ArrayList(Symbol).init(ally);
var sections = std.ArrayList([]const u8).init(ally);
diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig
index 93e0674605..7c77e76a02 100644
--- a/tools/merge_anal_dumps.zig
+++ b/tools/merge_anal_dumps.zig
@@ -9,7 +9,7 @@ pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index 1a90f99343..fea50b30df 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -284,7 +284,7 @@ const LibCVendor = enum {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
var search_paths = std.ArrayList([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig
index 83c47f1481..4e415784f8 100644
--- a/tools/update-license-headers.zig
+++ b/tools/update-license-headers.zig
@@ -10,7 +10,7 @@ pub fn main() !void {
defer root_node.end();
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const arena = arena_allocator.getAllocator();
+ const arena = arena_allocator.allocator();
const args = try std.process.argsAlloc(arena);
const path_to_walk = args[1];
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 90a96e0572..7360f96560 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -450,13 +450,8 @@ const cpu_targets = struct {
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
-<<<<<<< HEAD
- const allocator = &arena.allocator;
-=======
-
- const allocator = arena.getAllocator();
->>>>>>> 11157e318 (allocgate: stage 1 and 2 building)
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 70bc5a1c74..73c05d8cf1 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -769,7 +769,7 @@ const llvm_targets = [_]LlvmTarget{
pub fn main() anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = arena_state.getAllocator();
+ const arena = arena_state.allocator();
const args = try std.process.argsAlloc(arena);
if (args.len <= 1) {
@@ -845,7 +845,7 @@ fn processOneTarget(job: Job) anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = arena_state.getAllocator();
+ const arena = arena_state.allocator();
var progress_node = job.root_progress.start(llvm_target.zig_name, 3);
progress_node.activate();
diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig
index 7cccb47e1c..e450f8c6d4 100644
--- a/tools/update_glibc.zig
+++ b/tools/update_glibc.zig
@@ -133,7 +133,7 @@ const Function = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25
const zig_src_dir = args[2]; // path to the source checkout of zig, lib dir, e.g. ~/zig-src/lib
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 0c6c570a31..8972ab641c 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -48,7 +48,7 @@ const Version = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = arena.getAllocator();
+ const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
--
cgit v1.2.3
From 23866b1f81010277b204d6f3f5db23d020a76400 Mon Sep 17 00:00:00 2001
From: Lee Cannon
Date: Wed, 3 Nov 2021 12:49:31 +0000
Subject: allocgate: update code to use new interface
---
lib/std/heap/general_purpose_allocator.zig | 2 +-
src/tracy.zig | 12 ++++++------
2 files changed, 7 insertions(+), 7 deletions(-)
(limited to 'src')
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index fa2536cfaa..be7651980c 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -1192,7 +1192,7 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
// with AtLeast, buffer likely to be larger than requested, especially when shrinking
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
const allocator = gpa.allocator();
-
+
var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
buf = try allocator.reallocAtLeast(buf, 1);
diff --git a/src/tracy.zig b/src/tracy.zig
index 8abd78110f..83e31e5764 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -109,11 +109,14 @@ pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) {
pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return struct {
- allocator: std.mem.Allocator,
parent_allocator: std.mem.Allocator,
const Self = @This();
+ pub fn allocator(self: *Self) std.mem.Allocator {
+ return std.mem.Allocator.init(self, allocFn, resizeFn);
+ }
+
pub fn init(allocator: std.mem.Allocator) Self {
return .{
.parent_allocator = allocator,
@@ -124,8 +127,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
};
}
- fn allocFn(allocator: std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
+ fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr);
if (result) |data| {
if (data.len != 0) {
@@ -141,9 +143,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return result;
}
- fn resizeFn(allocator: std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
+ fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
// this condition is to handle free being called on an empty slice that was never even allocated
// example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}`
--
cgit v1.2.3
From f68cda738ad0d3e9bc0f328befad301d9e23756e Mon Sep 17 00:00:00 2001
From: Lee Cannon
Date: Sat, 6 Nov 2021 00:54:35 +0000
Subject: allocgate: split free out from resize
---
lib/std/heap.zig | 141 +++++++++++++----
lib/std/heap/arena_allocator.zig | 16 +-
lib/std/heap/general_purpose_allocator.zig | 245 ++++++++++++++++++++---------
lib/std/heap/log_to_writer_allocator.zig | 20 ++-
lib/std/heap/logging_allocator.zig | 20 ++-
lib/std/mem.zig | 20 ++-
lib/std/mem/Allocator.zig | 114 +++++++++++---
lib/std/testing/failing_allocator.zig | 22 ++-
src/tracy.zig | 20 ++-
9 files changed, 458 insertions(+), 160 deletions(-)
(limited to 'src')
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index e005101c6b..2d3a96676d 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -132,10 +132,6 @@ const CAllocator = struct {
) Allocator.Error!usize {
_ = buf_align;
_ = return_address;
- if (new_len == 0) {
- alignedFree(buf.ptr);
- return 0;
- }
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
@@ -147,6 +143,17 @@ const CAllocator = struct {
}
return error.OutOfMemory;
}
+
+ fn free(
+ _: *c_void,
+ buf: []u8,
+ buf_align: u29,
+ return_address: usize,
+ ) void {
+ _ = buf_align;
+ _ = return_address;
+ alignedFree(buf.ptr);
+ }
};
/// Supports the full Allocator interface, including alignment, and exploiting
@@ -159,6 +166,7 @@ pub const c_allocator = Allocator{
const c_allocator_vtable = Allocator.VTable{
.alloc = CAllocator.alloc,
.resize = CAllocator.resize,
+ .free = CAllocator.free,
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
@@ -173,6 +181,7 @@ pub const raw_c_allocator = Allocator{
const raw_c_allocator_vtable = Allocator.VTable{
.alloc = rawCAlloc,
.resize = rawCResize,
+ .free = rawCFree,
};
fn rawCAlloc(
@@ -199,16 +208,23 @@ fn rawCResize(
) Allocator.Error!usize {
_ = old_align;
_ = ret_addr;
- if (new_len == 0) {
- c.free(buf.ptr);
- return 0;
- }
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
return error.OutOfMemory;
}
+fn rawCFree(
+ _: *c_void,
+ buf: []u8,
+ old_align: u29,
+ ret_addr: usize,
+) void {
+ _ = old_align;
+ _ = ret_addr;
+ c.free(buf.ptr);
+}
+
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (builtin.target.isWasm())
@@ -238,6 +254,7 @@ const PageAllocator = struct {
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
+ .free = free,
};
fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
@@ -351,16 +368,6 @@ const PageAllocator = struct {
if (builtin.os.tag == .windows) {
const w = os.windows;
- if (new_size == 0) {
- // From the docs:
- // "If the dwFreeType parameter is MEM_RELEASE, this parameter
- // must be 0 (zero). The function frees the entire region that
- // is reserved in the initial allocation call to VirtualAlloc."
- // So we can only use MEM_RELEASE when actually releasing the
- // whole allocation.
- w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
- return 0;
- }
if (new_size <= buf_unaligned.len) {
const base_addr = @ptrToInt(buf_unaligned.ptr);
const old_addr_end = base_addr + buf_unaligned.len;
@@ -391,8 +398,6 @@ const PageAllocator = struct {
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
- if (new_size_aligned == 0)
- return 0;
return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
@@ -400,6 +405,19 @@ const PageAllocator = struct {
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
return error.OutOfMemory;
}
+
+ fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
+ _ = buf_align;
+ _ = return_address;
+
+ if (builtin.os.tag == .windows) {
+ os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE);
+ } else {
+ const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
+ const ptr = @alignCast(mem.page_size, buf_unaligned.ptr);
+ os.munmap(ptr[0..buf_aligned_len]);
+ }
+ }
};
const WasmPageAllocator = struct {
@@ -412,6 +430,7 @@ const WasmPageAllocator = struct {
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
+ .free = free,
};
const PageStatus = enum(u1) {
@@ -571,7 +590,21 @@ const WasmPageAllocator = struct {
const base = nPages(@ptrToInt(buf.ptr));
freePages(base + new_n, base + current_n);
}
- return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
+ return alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
+ }
+
+ fn free(
+ _: *c_void,
+ buf: []u8,
+ buf_align: u29,
+ return_address: usize,
+ ) void {
+ _ = buf_align;
+ _ = return_address;
+ const aligned_len = mem.alignForward(buf.len, mem.page_size);
+ const current_n = nPages(aligned_len);
+ const base = nPages(@ptrToInt(buf.ptr));
+ freePages(base, base + current_n);
}
};
@@ -588,7 +621,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
pub fn allocator(self: *HeapAllocator) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
pub fn deinit(self: *HeapAllocator) void {
@@ -644,10 +677,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
) error{OutOfMemory}!usize {
_ = buf_align;
_ = return_address;
- if (new_size == 0) {
- os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
- return 0;
- }
const root_addr = getRecordPtr(buf).*;
const align_offset = @ptrToInt(buf.ptr) - root_addr;
@@ -669,6 +698,17 @@ pub const HeapAllocator = switch (builtin.os.tag) {
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
return return_len;
}
+
+ fn free(
+ self: *HeapAllocator,
+ buf: []u8,
+ buf_align: u29,
+ return_address: usize,
+ ) void {
+ _ = buf_align;
+ _ = return_address;
+ os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
+ }
},
else => @compileError("Unsupported OS"),
};
@@ -696,13 +736,18 @@ pub const FixedBufferAllocator = struct {
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
pub fn allocator(self: *FixedBufferAllocator) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
- return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
+ return Allocator.init(
+ self,
+ threadSafeAlloc,
+ Allocator.NoResize(FixedBufferAllocator).noResize,
+ Allocator.NoOpFree(FixedBufferAllocator).noOpFree,
+ );
}
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
@@ -715,7 +760,7 @@ pub const FixedBufferAllocator = struct {
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
/// then we won't be able to determine what the last allocation was. This is because
- /// the alignForward operation done in alloc is not reverisible.
+ /// the alignForward operation done in alloc is not reversible.
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
@@ -751,13 +796,13 @@ pub const FixedBufferAllocator = struct {
if (!self.isLastAllocation(buf)) {
if (new_size > buf.len)
return error.OutOfMemory;
- return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align);
+ return mem.alignAllocLen(buf.len, new_size, len_align);
}
if (new_size <= buf.len) {
const sub = buf.len - new_size;
self.end_index -= sub;
- return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
+ return mem.alignAllocLen(buf.len - sub, new_size, len_align);
}
const add = new_size - buf.len;
@@ -768,6 +813,21 @@ pub const FixedBufferAllocator = struct {
return new_size;
}
+ fn free(
+ self: *FixedBufferAllocator,
+ buf: []u8,
+ buf_align: u29,
+ return_address: usize,
+ ) void {
+ _ = buf_align;
+ _ = return_address;
+ assert(self.ownsSlice(buf)); // sanity check
+
+ if (self.isLastAllocation(buf)) {
+ self.end_index -= buf.len;
+ }
+ }
+
fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
@@ -810,7 +870,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
fn alloc(
@@ -821,7 +881,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
return_address: usize,
) error{OutOfMemory}![]u8 {
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
- return self.fallback_allocator.vtable.alloc(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
+ return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address);
}
fn resize(
@@ -835,7 +895,20 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
} else {
- return self.fallback_allocator.vtable.resize(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
+ return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address);
+ }
+ }
+
+ fn free(
+ self: *Self,
+ buf: []u8,
+ buf_align: u29,
+ return_address: usize,
+ ) void {
+ if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
+ return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address);
+ } else {
+ return self.fallback_allocator.rawFree(buf, buf_align, return_address);
}
}
};
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index 35e3f0ada2..c5a8d6bc7e 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -24,7 +24,7 @@ pub const ArenaAllocator = struct {
};
pub fn allocator(self: *ArenaAllocator) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
const BufNode = std.SinglyLinkedList([]u8).Node;
@@ -47,7 +47,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.vtable.alloc(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
+ const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
@@ -111,4 +111,16 @@ pub const ArenaAllocator = struct {
return error.OutOfMemory;
}
}
+
+ fn free(self: *ArenaAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
+ _ = buf_align;
+ _ = ret_addr;
+
+ const cur_node = self.state.buffer_list.first orelse return;
+ const cur_buf = cur_node.data[@sizeOf(BufNode)..];
+
+ if (@ptrToInt(cur_buf.ptr) + self.state.end_index == @ptrToInt(buf.ptr) + buf.len) {
+ self.state.end_index -= buf.len;
+ }
+ }
};
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index be7651980c..5687b1efb2 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -281,7 +281,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
};
pub fn allocator(self: *Self) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
fn bucketStackTrace(
@@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var it = self.large_allocations.iterator();
while (it.next()) |large| {
if (large.value_ptr.freed) {
- _ = self.backing_allocator.vtable.resize(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
+ self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.ptr_align, @returnAddress());
}
}
}
@@ -529,9 +529,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.retain_metadata and entry.value_ptr.freed) {
if (config.safety) {
reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
- // Recoverable if this is a free.
- if (new_size == 0)
- return @as(usize, 0);
@panic("Unrecoverable double free");
} else {
unreachable;
@@ -555,7 +552,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
- // that is impossible to guarantee after calling backing_allocator.vtable.resize.
+ // that is impossible to guarantee after calling backing_allocator.rawResize.
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
@@ -568,29 +565,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.total_requested_bytes = prev_req_bytes;
};
- const result_len = if (config.never_unmap and new_size == 0)
- 0
- else
- try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
+ const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr);
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
}
- if (result_len == 0) {
- if (config.verbose_log) {
- log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
- }
-
- if (!config.retain_metadata) {
- assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr)));
- } else {
- entry.value_ptr.freed = true;
- entry.value_ptr.captureStackTrace(ret_addr, .free);
- }
- return 0;
- }
-
if (config.verbose_log) {
log.info("large resize {d} bytes at {*} to {d}", .{
old_mem.len, old_mem.ptr, new_size,
@@ -601,6 +581,64 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return result_len;
}
+ /// This function assumes the object is in the large object storage regardless
+ /// of the parameters.
+ fn freeLarge(
+ self: *Self,
+ old_mem: []u8,
+ old_align: u29,
+ ret_addr: usize,
+ ) void {
+ _ = old_align;
+
+ const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
+ if (config.safety) {
+ @panic("Invalid free");
+ } else {
+ unreachable;
+ }
+ };
+
+ if (config.retain_metadata and entry.value_ptr.freed) {
+ if (config.safety) {
+ reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
+ return;
+ } else {
+ unreachable;
+ }
+ }
+
+ if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace = StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &free_stack_trace);
+ log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {s} Free: {s}", .{
+ entry.value_ptr.bytes.len,
+ old_mem.len,
+ entry.value_ptr.getStackTrace(.alloc),
+ free_stack_trace,
+ });
+ }
+
+ if (config.enable_memory_limit) {
+ self.total_requested_bytes -= entry.value_ptr.requested_size;
+ }
+
+ if (config.verbose_log) {
+ log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
+ }
+
+ if (!config.retain_metadata) {
+ assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr)));
+ } else {
+ entry.value_ptr.freed = true;
+ entry.value_ptr.captureStackTrace(ret_addr, .free);
+ }
+ }
+
pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
self.requested_memory_limit = limit;
}
@@ -656,9 +694,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (!is_used) {
if (config.safety) {
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
- // Recoverable if this is a free.
- if (new_size == 0)
- return @as(usize, 0);
@panic("Unrecoverable double free");
} else {
unreachable;
@@ -678,52 +713,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.total_requested_bytes = prev_req_bytes;
};
- if (new_size == 0) {
- // Capture stack trace to be the "first free", in case a double free happens.
- bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
-
- used_byte.* &= ~(@as(u8, 1) << used_bit_index);
- bucket.used_count -= 1;
- if (bucket.used_count == 0) {
- if (bucket.next == bucket) {
- // it's the only bucket and therefore the current one
- self.buckets[bucket_index] = null;
- } else {
- bucket.next.prev = bucket.prev;
- bucket.prev.next = bucket.next;
- self.buckets[bucket_index] = bucket.prev;
- }
- if (!config.never_unmap) {
- self.backing_allocator.free(bucket.page[0..page_size]);
- }
- if (!config.retain_metadata) {
- self.freeBucket(bucket, size_class);
- } else {
- // move alloc_cursor to end so we can tell size_class later
- const slot_count = @divExact(page_size, size_class);
- bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
- if (self.empty_buckets) |prev_bucket| {
- // empty_buckets is ordered newest to oldest through prev so that if
- // config.never_unmap is false and backing_allocator reuses freed memory
- // then searchBuckets will always return the newer, relevant bucket
- bucket.prev = prev_bucket;
- bucket.next = prev_bucket.next;
- prev_bucket.next = bucket;
- bucket.next.prev = bucket;
- } else {
- bucket.prev = bucket;
- bucket.next = bucket;
- }
- self.empty_buckets = bucket;
- }
- } else {
- @memset(old_mem.ptr, undefined, old_mem.len);
- }
- if (config.verbose_log) {
- log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
- }
- return @as(usize, 0);
- }
const new_aligned_size = math.max(new_size, old_align);
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
if (new_size_class <= size_class) {
@@ -740,6 +729,114 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return error.OutOfMemory;
}
+ fn free(
+ self: *Self,
+ old_mem: []u8,
+ old_align: u29,
+ ret_addr: usize,
+ ) void {
+ const held = self.mutex.acquire();
+ defer held.release();
+
+ assert(old_mem.len != 0);
+
+ const aligned_size = math.max(old_mem.len, old_align);
+ if (aligned_size > largest_bucket_object_size) {
+ self.freeLarge(old_mem, old_align, ret_addr);
+ return;
+ }
+ const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
+
+ var bucket_index = math.log2(size_class_hint);
+ var size_class: usize = size_class_hint;
+ const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
+ if (searchBucket(self.buckets[bucket_index], @ptrToInt(old_mem.ptr))) |bucket| {
+ // move bucket to head of list to optimize search for nearby allocations
+ self.buckets[bucket_index] = bucket;
+ break bucket;
+ }
+ size_class *= 2;
+ } else blk: {
+ if (config.retain_metadata) {
+ if (!self.large_allocations.contains(@ptrToInt(old_mem.ptr))) {
+ // object not in active buckets or a large allocation, so search empty buckets
+ if (searchBucket(self.empty_buckets, @ptrToInt(old_mem.ptr))) |bucket| {
+ // bucket is empty so is_used below will always be false and we exit there
+ break :blk bucket;
+ } else {
+ @panic("Invalid free");
+ }
+ }
+ }
+ self.freeLarge(old_mem, old_align, ret_addr);
+ return;
+ };
+ const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
+ const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+ const used_byte_index = slot_index / 8;
+ const used_bit_index = @intCast(u3, slot_index % 8);
+ const used_byte = bucket.usedBits(used_byte_index);
+ const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+ if (!is_used) {
+ if (config.safety) {
+ reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
+ // Recoverable if this is a free.
+ return;
+ } else {
+ unreachable;
+ }
+ }
+
+ // Definitely an in-use small alloc now.
+ if (config.enable_memory_limit) {
+ self.total_requested_bytes -= old_mem.len;
+ }
+
+ // Capture stack trace to be the "first free", in case a double free happens.
+ bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
+
+ used_byte.* &= ~(@as(u8, 1) << used_bit_index);
+ bucket.used_count -= 1;
+ if (bucket.used_count == 0) {
+ if (bucket.next == bucket) {
+ // it's the only bucket and therefore the current one
+ self.buckets[bucket_index] = null;
+ } else {
+ bucket.next.prev = bucket.prev;
+ bucket.prev.next = bucket.next;
+ self.buckets[bucket_index] = bucket.prev;
+ }
+ if (!config.never_unmap) {
+ self.backing_allocator.free(bucket.page[0..page_size]);
+ }
+ if (!config.retain_metadata) {
+ self.freeBucket(bucket, size_class);
+ } else {
+ // move alloc_cursor to end so we can tell size_class later
+ const slot_count = @divExact(page_size, size_class);
+ bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
+ if (self.empty_buckets) |prev_bucket| {
+ // empty_buckets is ordered newest to oldest through prev so that if
+ // config.never_unmap is false and backing_allocator reuses freed memory
+ // then searchBuckets will always return the newer, relevant bucket
+ bucket.prev = prev_bucket;
+ bucket.next = prev_bucket.next;
+ prev_bucket.next = bucket;
+ bucket.next.prev = bucket;
+ } else {
+ bucket.prev = bucket;
+ bucket.next = bucket;
+ }
+ self.empty_buckets = bucket;
+ }
+ } else {
+ @memset(old_mem.ptr, undefined, old_mem.len);
+ }
+ if (config.verbose_log) {
+ log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
+ }
+ }
+
// Returns true if an allocation of `size` bytes is within the specified
// limits if enable_memory_limit is true
fn isAllocationAllowed(self: *Self, size: usize) bool {
@@ -764,7 +861,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
- const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
+ const slice = try self.backing_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
if (config.retain_metadata and !config.never_unmap) {
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index 5019a015bc..fa8c19e0a0 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -18,7 +18,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
}
pub fn allocator(self: *Self) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
fn alloc(
@@ -29,7 +29,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
ra: usize,
) error{OutOfMemory}![]u8 {
self.writer.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
+ const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
if (result) |_| {
self.writer.print(" success!\n", .{}) catch {};
} else |_| {
@@ -46,14 +46,12 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- if (new_len == 0) {
- self.writer.print("free : {}\n", .{buf.len}) catch {};
- } else if (new_len <= buf.len) {
+ if (new_len <= buf.len) {
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
} else {
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
- if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
+ if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len > buf.len) {
self.writer.print(" success!\n", .{}) catch {};
}
@@ -64,6 +62,16 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
return e;
}
}
+
+ fn free(
+ self: *Self,
+ buf: []u8,
+ buf_align: u29,
+ ra: usize,
+ ) void {
+ self.writer.print("free : {}\n", .{buf.len}) catch {};
+ self.parent_allocator.rawFree(buf, buf_align, ra);
+ }
};
}
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 21c3546b11..b631cd0b1b 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -33,7 +33,7 @@ pub fn ScopedLoggingAllocator(
}
pub fn allocator(self: *Self) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
// This function is required as the `std.log.log` function is not public
@@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator(
len_align: u29,
ra: usize,
) error{OutOfMemory}![]u8 {
- const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
+ const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
if (result) |_| {
logHelper(
success_log_level,
@@ -78,10 +78,8 @@ pub fn ScopedLoggingAllocator(
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
- if (new_len == 0) {
- logHelper(success_log_level, "free - success - len: {}", .{buf.len});
- } else if (new_len <= buf.len) {
+ if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
+ if (new_len <= buf.len) {
logHelper(
success_log_level,
"shrink - success - {} to {}, len_align: {}, buf_align: {}",
@@ -106,6 +104,16 @@ pub fn ScopedLoggingAllocator(
return err;
}
}
+
+ fn free(
+ self: *Self,
+ buf: []u8,
+ buf_align: u29,
+ ra: usize,
+ ) void {
+ self.parent_allocator.rawFree(buf, buf_align, ra);
+ logHelper(success_log_level, "free - len: {}", .{buf.len});
+ }
};
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 0594873514..9c7ce3867b 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -47,7 +47,7 @@ pub fn ValidationAllocator(comptime T: type) type {
}
pub fn allocator(self: *Self) Allocator {
- return Allocator.init(self, alloc, resize);
+ return Allocator.init(self, alloc, resize, free);
}
fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
@@ -70,7 +70,7 @@ pub fn ValidationAllocator(comptime T: type) type {
}
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.vtable.alloc(underlying.ptr, n, ptr_align, len_align, ret_addr);
+ const result = try underlying.rawAlloc(n, ptr_align, len_align, ret_addr);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@@ -95,7 +95,7 @@ pub fn ValidationAllocator(comptime T: type) type {
assert(new_len >= len_align);
}
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.vtable.resize(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
+ const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr);
if (len_align == 0) {
assert(result == new_len);
} else {
@@ -104,6 +104,19 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
+
+ pub fn free(
+ self: *Self,
+ buf: []u8,
+ buf_align: u29,
+ ret_addr: usize,
+ ) void {
+ _ = self;
+ _ = buf_align;
+ _ = ret_addr;
+ assert(buf.len > 0);
+ }
+
pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
pub fn reset(self: *Self) void {
self.underlying_allocator.reset();
@@ -139,6 +152,7 @@ const fail_allocator = Allocator{
const failAllocator_vtable = Allocator.VTable{
.alloc = failAllocatorAlloc,
.resize = Allocator.NoResize(c_void).noResize,
+ .free = Allocator.NoOpFree(c_void).noOpFree,
};
fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index df3974f795..6edad7e05b 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -5,6 +5,7 @@ const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const Allocator = @This();
+const builtin = @import("builtin");
pub const Error = error{OutOfMemory};
@@ -28,9 +29,6 @@ pub const VTable = struct {
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
///
- /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
- /// longer be passed to `resize`.
- ///
/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
/// unmodified and error.OutOfMemory MUST be returned.
@@ -40,36 +38,54 @@ pub const VTable = struct {
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
/// accepting more bytes of memory from the allocator than requested.
///
- /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+ /// `new_len` must be greater than zero, greater than or equal to `len_align` and must be aligned by `len_align`.
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+
+ /// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`.
+ /// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call.
+ ///
+ /// `ret_addr` is optionally provided as the first return address of the allocation call stack.
+ /// If the value is `0` it means no return address has been provided.
+ free: fn (ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void,
};
pub fn init(
pointer: anytype,
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+ comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void,
) Allocator {
const Ptr = @TypeOf(pointer);
- assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
- assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
+ const ptr_info = @typeInfo(Ptr);
+
+ assert(ptr_info == .Pointer); // Must be a pointer
+ assert(ptr_info.Pointer.size == .One); // Must be a single-item pointer
+
+ const alignment = ptr_info.Pointer.alignment;
+
const gen = struct {
fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
- const alignment = @typeInfo(Ptr).Pointer.alignment;
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
- return allocFn(self, len, ptr_align, len_align, ret_addr);
+ return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr });
}
fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
- const alignment = @typeInfo(Ptr).Pointer.alignment;
+ assert(new_len != 0);
+ const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr });
+ }
+ fn free(ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void {
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
- return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
+ @call(.{ .modifier = .always_inline }, freeFn, .{ self, buf, buf_align, ret_addr });
}
};
+
const vtable = VTable{
.alloc = gen.alloc,
.resize = gen.resize,
+ .free = gen.free,
};
return .{
@@ -100,6 +116,56 @@ pub fn NoResize(comptime AllocatorType: type) type {
};
}
+/// Set freeFn to `NoOpFree(AllocatorType).noOpFree` if free is a no-op.
+pub fn NoOpFree(comptime AllocatorType: type) type {
+ return struct {
+ pub fn noOpFree(
+ self: *AllocatorType,
+ buf: []u8,
+ buf_align: u29,
+ ret_addr: usize,
+ ) void {
+ _ = self;
+ _ = buf;
+ _ = buf_align;
+ _ = ret_addr;
+ }
+ };
+}
+
+/// Set freeFn to `PanicFree(AllocatorType).noOpFree` if free is not a supported operation.
+pub fn PanicFree(comptime AllocatorType: type) type {
+ return struct {
+ pub fn noOpFree(
+ self: *AllocatorType,
+ buf: []u8,
+ buf_align: u29,
+ ret_addr: usize,
+ ) void {
+ _ = self;
+ _ = buf;
+ _ = buf_align;
+ _ = ret_addr;
+ @panic("free is not a supported operation for the allocator: " ++ @typeName(AllocatorType));
+ }
+ };
+}
+
+/// This function is not intended to be called except from within the implementation of an Allocator
+pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
+ return self.vtable.alloc(self.ptr, len, ptr_align, len_align, ret_addr);
+}
+
+/// This function is not intended to be called except from within the implementation of an Allocator
+pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+ return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr);
+}
+
+/// This function is not intended to be called except from within the implementation of an Allocator
+pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
+ return self.vtable.free(self.ptr, buf, buf_align, ret_addr);
+}
+
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
@@ -133,8 +199,7 @@ fn reallocBytes(
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
old_alignment: u29,
- /// If `new_byte_count` is 0 then this is a free and it is guaranteed that
- /// `old_mem.len != 0`.
+ /// `new_byte_count` must be greater than zero
new_byte_count: usize,
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
@@ -147,18 +212,20 @@ fn reallocBytes(
return_address: usize,
) Error![]u8 {
if (old_mem.len == 0) {
- const new_mem = try self.vtable.alloc(self.ptr, new_byte_count, new_alignment, len_align, return_address);
+ const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
}
+ assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free
+
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
if (new_byte_count <= old_mem.len) {
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
return old_mem.ptr[0..shrunk_len];
}
- if (self.vtable.resize(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
+ if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
@@ -184,11 +251,11 @@ fn moveBytes(
) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
- const new_mem = try self.vtable.alloc(self.ptr, new_len, new_alignment, len_align, return_address);
+ const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr, undefined, old_mem.len);
- _ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address);
+ self.rawFree(old_mem, old_align, return_address);
return new_mem;
}
@@ -207,7 +274,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
+ self.rawFree(non_const_ptr[0..@sizeOf(T)], info.alignment, @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
@@ -326,7 +393,7 @@ pub fn allocAdvancedWithRetAddr(
.exact => 0,
.at_least => size_of_T,
};
- const byte_slice = try self.vtable.alloc(self.ptr, byte_count, a, len_align, return_address);
+ const byte_slice = try self.rawAlloc(byte_count, a, len_align, return_address);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
@@ -351,7 +418,7 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const rc = try self.vtable.resize(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
+ const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
@@ -465,6 +532,11 @@ pub fn alignedShrinkWithRetAddr(
if (new_n == old_mem.len)
return old_mem;
+ if (new_n == 0) {
+ self.free(old_mem);
+ return @as([*]align(new_alignment) T, undefined)[0..0];
+ }
+
assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
@@ -489,7 +561,7 @@ pub fn free(self: Allocator, memory: anytype) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(non_const_ptr, undefined, bytes_len);
- _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress());
+ self.rawFree(non_const_ptr[0..bytes_len], Slice.alignment, @returnAddress());
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@@ -520,5 +592,5 @@ pub fn shrinkBytes(
return_address: usize,
) usize {
assert(new_len <= buf.len);
- return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
+ return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable;
}
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index c26163a6f3..5e1084037e 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -41,7 +41,7 @@ pub const FailingAllocator = struct {
}
pub fn allocator(self: *FailingAllocator) mem.Allocator {
- return mem.Allocator.init(self, alloc, resize);
+ return mem.Allocator.init(self, alloc, resize, free);
}
fn alloc(
@@ -54,7 +54,7 @@ pub const FailingAllocator = struct {
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.vtable.alloc(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
+ const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
@@ -69,18 +69,26 @@ pub const FailingAllocator = struct {
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const r = self.internal_allocator.vtable.resize(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
+ const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};
- if (new_len == 0) {
- self.deallocations += 1;
- self.freed_bytes += old_mem.len;
- } else if (r < old_mem.len) {
+ if (r < old_mem.len) {
self.freed_bytes += old_mem.len - r;
} else {
self.allocated_bytes += r - old_mem.len;
}
return r;
}
+
+ fn free(
+ self: *FailingAllocator,
+ old_mem: []u8,
+ old_align: u29,
+ ra: usize,
+ ) void {
+ self.internal_allocator.rawFree(old_mem, old_align, ra);
+ self.deallocations += 1;
+ self.freed_bytes += old_mem.len;
+ }
};
diff --git a/src/tracy.zig b/src/tracy.zig
index 83e31e5764..064374030f 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -155,13 +155,10 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
}
}
- if (resized_len != 0) {
- // this was a shrink or a resize
- if (name) |n| {
- allocNamed(buf.ptr, resized_len, n);
- } else {
- alloc(buf.ptr, resized_len);
- }
+ if (name) |n| {
+ allocNamed(buf.ptr, resized_len, n);
+ } else {
+ alloc(buf.ptr, resized_len);
}
return resized_len;
@@ -172,6 +169,15 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return err;
}
}
+
+ fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
+ self.parent_allocator.rawFree(buf, buf_align, ret_addr);
+ if (name) |n| {
+ freeNamed(buf.ptr, n);
+ } else {
+ free(buf.ptr);
+ }
+ }
};
}
--
cgit v1.2.3
From 066eaa5e9cbfde172449f6d95bb884c7d86ac10c Mon Sep 17 00:00:00 2001
From: Lee Cannon
Date: Sun, 7 Nov 2021 01:40:06 +0000
Subject: allocgate: change resize to return optional instead of error
---
lib/std/build/OptionsStep.zig | 2 +-
lib/std/heap.zig | 34 ++++---
lib/std/heap/arena_allocator.zig | 19 ++--
lib/std/heap/general_purpose_allocator.zig | 20 ++--
lib/std/heap/log_to_writer_allocator.zig | 13 +--
lib/std/heap/logging_allocator.zig | 18 ++--
lib/std/mem.zig | 8 +-
lib/std/mem/Allocator.zig | 148 ++++++++---------------------
lib/std/testing/failing_allocator.zig | 7 +-
src/link/MachO.zig | 2 +-
src/link/tapi.zig | 2 +-
src/main.zig | 2 +-
src/tracy.zig | 25 ++---
test/compare_output.zig | 6 +-
tools/update-linux-headers.zig | 2 +-
15 files changed, 112 insertions(+), 196 deletions(-)
(limited to 'src')
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig
index d106b05171..eae5983845 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/build/OptionsStep.zig
@@ -350,5 +350,5 @@ test "OptionsStep" {
\\
, options.contents.items);
- _ = try std.zig.parse(&arena.allocator, try options.contents.toOwnedSliceSentinel(0));
+ _ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0));
}
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 2d3a96676d..4ea0ff718f 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -129,7 +129,7 @@ const CAllocator = struct {
new_len: usize,
len_align: u29,
return_address: usize,
- ) Allocator.Error!usize {
+ ) ?usize {
_ = buf_align;
_ = return_address;
if (new_len <= buf.len) {
@@ -141,7 +141,7 @@ const CAllocator = struct {
return mem.alignAllocLen(full_len, new_len, len_align);
}
}
- return error.OutOfMemory;
+ return null;
}
fn free(
@@ -205,13 +205,13 @@ fn rawCResize(
new_len: usize,
len_align: u29,
ret_addr: usize,
-) Allocator.Error!usize {
+) ?usize {
_ = old_align;
_ = ret_addr;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
- return error.OutOfMemory;
+ return null;
}
fn rawCFree(
@@ -361,7 +361,7 @@ const PageAllocator = struct {
new_size: usize,
len_align: u29,
return_address: usize,
- ) Allocator.Error!usize {
+ ) ?usize {
_ = buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
@@ -387,7 +387,7 @@ const PageAllocator = struct {
if (new_size_aligned <= old_size_aligned) {
return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- return error.OutOfMemory;
+ return null;
}
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
@@ -403,7 +403,7 @@ const PageAllocator = struct {
// TODO: call mremap
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
- return error.OutOfMemory;
+ return null;
}
fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
@@ -579,11 +579,11 @@ const WasmPageAllocator = struct {
new_len: usize,
len_align: u29,
return_address: usize,
- ) error{OutOfMemory}!usize {
+ ) ?usize {
_ = buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
- if (new_len > aligned_len) return error.OutOfMemory;
+ if (new_len > aligned_len) return null;
const current_n = nPages(aligned_len);
const new_n = nPages(new_len);
if (new_n != current_n) {
@@ -674,7 +674,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
new_size: usize,
len_align: u29,
return_address: usize,
- ) error{OutOfMemory}!usize {
+ ) ?usize {
_ = buf_align;
_ = return_address;
@@ -686,7 +686,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
@intToPtr(*c_void, root_addr),
amt,
- ) orelse return error.OutOfMemory;
+ ) orelse return null;
assert(new_ptr == @intToPtr(*c_void, root_addr));
const return_len = init: {
if (len_align == 0) break :init new_size;
@@ -788,14 +788,13 @@ pub const FixedBufferAllocator = struct {
new_size: usize,
len_align: u29,
return_address: usize,
- ) Allocator.Error!usize {
+ ) ?usize {
_ = buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
- if (new_size > buf.len)
- return error.OutOfMemory;
+ if (new_size > buf.len) return null;
return mem.alignAllocLen(buf.len, new_size, len_align);
}
@@ -806,9 +805,8 @@ pub const FixedBufferAllocator = struct {
}
const add = new_size - buf.len;
- if (add + self.end_index > self.buffer.len) {
- return error.OutOfMemory;
- }
+ if (add + self.end_index > self.buffer.len) return null;
+
self.end_index += add;
return new_size;
}
@@ -891,7 +889,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
new_len: usize,
len_align: u29,
return_address: usize,
- ) error{OutOfMemory}!usize {
+ ) ?usize {
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
} else {
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index c5a8d6bc7e..4bc5d58c1a 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -78,26 +78,23 @@ pub const ArenaAllocator = struct {
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
// Try to grow the buffer in-place
- cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) catch |err| switch (err) {
- error.OutOfMemory => {
- // Allocate a new node if that's not possible
- cur_node = try self.createNode(cur_buf.len, n + ptr_align);
- continue;
- },
+ cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse {
+ // Allocate a new node if that's not possible
+ cur_node = try self.createNode(cur_buf.len, n + ptr_align);
+ continue;
};
}
}
- fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
+ fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
_ = buf_align;
_ = len_align;
_ = ret_addr;
- const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
+ const cur_node = self.state.buffer_list.first orelse return null;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) {
- if (new_len > buf.len)
- return error.OutOfMemory;
+ if (new_len > buf.len) return null;
return new_len;
}
@@ -108,7 +105,7 @@ pub const ArenaAllocator = struct {
self.state.end_index += new_len - buf.len;
return new_len;
} else {
- return error.OutOfMemory;
+ return null;
}
}
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 5687b1efb2..8160bc2a66 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -517,7 +517,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
new_size: usize,
len_align: u29,
ret_addr: usize,
- ) Error!usize {
+ ) ?usize {
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
if (config.safety) {
@panic("Invalid free");
@@ -557,7 +557,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
- return error.OutOfMemory;
+ return null;
}
self.total_requested_bytes = new_req_bytes;
}
@@ -565,7 +565,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.total_requested_bytes = prev_req_bytes;
};
- const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr);
+ const result_len = self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr) orelse return null;
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
@@ -650,7 +650,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
new_size: usize,
len_align: u29,
ret_addr: usize,
- ) Error!usize {
+ ) ?usize {
self.mutex.lock();
defer self.mutex.unlock();
@@ -705,7 +705,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
- return error.OutOfMemory;
+ return null;
}
self.total_requested_bytes = new_req_bytes;
}
@@ -726,7 +726,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
return new_size;
}
- return error.OutOfMemory;
+ return null;
}
fn free(
@@ -735,8 +735,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_align: u29,
ret_addr: usize,
) void {
- const held = self.mutex.acquire();
- defer held.release();
+ self.mutex.lock();
+ defer self.mutex.unlock();
assert(old_mem.len != 0);
@@ -850,7 +850,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return true;
}
- fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
+ fn alloc(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
self.mutex.lock();
defer self.mutex.unlock();
@@ -1065,7 +1065,7 @@ test "shrink large object to large object" {
slice[0] = 0x12;
slice[60] = 0x34;
- slice = try allocator.resize(slice, page_size * 2 + 1);
+ slice = allocator.resize(slice, page_size * 2 + 1) orelse return;
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index fa8c19e0a0..c63c1a826f 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -45,22 +45,23 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
new_len: usize,
len_align: u29,
ra: usize,
- ) error{OutOfMemory}!usize {
+ ) ?usize {
if (new_len <= buf.len) {
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
} else {
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
+
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len > buf.len) {
self.writer.print(" success!\n", .{}) catch {};
}
return resized_len;
- } else |e| {
- std.debug.assert(new_len > buf.len);
- self.writer.print(" failure!\n", .{}) catch {};
- return e;
}
+
+ std.debug.assert(new_len > buf.len);
+ self.writer.print(" failure!\n", .{}) catch {};
+ return null;
}
fn free(
@@ -95,7 +96,7 @@ test "LogToWriterAllocator" {
var a = try allocator.alloc(u8, 10);
a = allocator.shrink(a, 5);
try std.testing.expect(a.len == 5);
- try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
+ try std.testing.expect(allocator.resize(a, 20) == null);
allocator.free(a);
try std.testing.expectEqualSlices(u8,
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index b631cd0b1b..0bd0755cfc 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -77,7 +77,7 @@ pub fn ScopedLoggingAllocator(
new_len: usize,
len_align: u29,
ra: usize,
- ) error{OutOfMemory}!usize {
+ ) ?usize {
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len <= buf.len) {
logHelper(
@@ -94,15 +94,15 @@ pub fn ScopedLoggingAllocator(
}
return resized_len;
- } else |err| {
- std.debug.assert(new_len > buf.len);
- logHelper(
- failure_log_level,
- "expand - failure: {s} - {} to {}, len_align: {}, buf_align: {}",
- .{ @errorName(err), buf.len, new_len, len_align, buf_align },
- );
- return err;
}
+
+ std.debug.assert(new_len > buf.len);
+ logHelper(
+ failure_log_level,
+ "expand - failure - {} to {}, len_align: {}, buf_align: {}",
+ .{ buf.len, new_len, len_align, buf_align },
+ );
+ return null;
}
fn free(
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 9c7ce3867b..c310835b61 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -88,14 +88,14 @@ pub fn ValidationAllocator(comptime T: type) type {
new_len: usize,
len_align: u29,
ret_addr: usize,
- ) Allocator.Error!usize {
+ ) ?usize {
assert(buf.len > 0);
if (len_align != 0) {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr);
+ const result = underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr) orelse return null;
if (len_align == 0) {
assert(result == new_len);
} else {
@@ -188,7 +188,7 @@ test "Allocator.resize" {
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intCast(T, i);
- values = try testing.allocator.resize(values, values.len + 10);
+ values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
try testing.expect(values.len == 110);
}
@@ -203,7 +203,7 @@ test "Allocator.resize" {
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intToFloat(T, i);
- values = try testing.allocator.resize(values, values.len + 10);
+ values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
try testing.expect(values.len == 110);
}
}
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 6edad7e05b..29fbf7c2c1 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -29,9 +29,9 @@ pub const VTable = struct {
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
///
- /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
+ /// `null` can only be returned if `new_len` is greater than `buf.len`.
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
- /// unmodified and error.OutOfMemory MUST be returned.
+ /// unmodified and `null` MUST be returned.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
@@ -42,7 +42,7 @@ pub const VTable = struct {
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
- resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+ resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize,
/// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`.
/// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call.
@@ -55,7 +55,7 @@ pub const VTable = struct {
pub fn init(
pointer: anytype,
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
- comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+ comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize,
comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void,
) Allocator {
const Ptr = @TypeOf(pointer);
@@ -71,7 +71,7 @@ pub fn init(
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr });
}
- fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+ fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
assert(new_len != 0);
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr });
@@ -104,14 +104,12 @@ pub fn NoResize(comptime AllocatorType: type) type {
new_len: usize,
len_align: u29,
ret_addr: usize,
- ) Error!usize {
+ ) ?usize {
_ = self;
_ = buf_align;
_ = len_align;
_ = ret_addr;
- if (new_len > buf.len)
- return error.OutOfMemory;
- return new_len;
+ return if (new_len > buf.len) null else new_len;
}
};
}
@@ -157,7 +155,7 @@ pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u
}
/// This function is not intended to be called except from within the implementation of an Allocator
-pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr);
}
@@ -166,99 +164,6 @@ pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usiz
return self.vtable.free(self.ptr, buf, buf_align, ret_addr);
}
-/// Realloc is used to modify the size or alignment of an existing allocation,
-/// as well as to provide the allocator with an opportunity to move an allocation
-/// to a better location.
-/// When the size/alignment is greater than the previous allocation, this function
-/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
-/// When the size/alignment is less than or equal to the previous allocation,
-/// this function returns `error.OutOfMemory` when the allocator decides the client
-/// would be better off keeping the extra alignment/size. Clients will call
-/// `vtable.resize` when they require the allocator to track a new alignment/size,
-/// and so this function should only return success when the allocator considers
-/// the reallocation desirable from the allocator's perspective.
-/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
-/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
-/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
-/// is less than or equal to the old allocation, because it cannot reclaim the memory,
-/// and thus the `std.ArrayList` would be better off retaining its capacity.
-/// When `reallocFn` returns,
-/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
-/// as `old_mem` was when `reallocFn` is called. The bytes of
-/// `return_value[old_mem.len..]` have undefined values.
-/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
-fn reallocBytes(
- self: Allocator,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `vtable.alloc` or `vtable.resize`.
- /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
- /// is guaranteed to be >= 1.
- old_mem: []u8,
- /// If `old_mem.len == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be the same as what was passed to `allocFn`.
- /// Guaranteed to be >= 1.
- /// Guaranteed to be a power of 2.
- old_alignment: u29,
- /// `new_byte_count` must be greater than zero
- new_byte_count: usize,
- /// Guaranteed to be >= 1.
- /// Guaranteed to be a power of 2.
- /// Returned slice's pointer must have this alignment.
- new_alignment: u29,
- /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
- /// non-zero means the length of the returned slice must be aligned by `len_align`
- /// `new_len` must be aligned by `len_align`
- len_align: u29,
- return_address: usize,
-) Error![]u8 {
- if (old_mem.len == 0) {
- const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address);
- // TODO: https://github.com/ziglang/zig/issues/4298
- @memset(new_mem.ptr, undefined, new_byte_count);
- return new_mem;
- }
-
- assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free
-
- if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
- if (new_byte_count <= old_mem.len) {
- const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
- return old_mem.ptr[0..shrunk_len];
- }
- if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
- assert(resized_len >= new_byte_count);
- // TODO: https://github.com/ziglang/zig/issues/4298
- @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
- return old_mem.ptr[0..resized_len];
- } else |_| {}
- }
- if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
- return error.OutOfMemory;
- }
- return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address);
-}
-
-/// Move the given memory to a new location in the given allocator to accomodate a new
-/// size and alignment.
-fn moveBytes(
- self: Allocator,
- old_mem: []u8,
- old_align: u29,
- new_len: usize,
- new_alignment: u29,
- len_align: u29,
- return_address: usize,
-) Error![]u8 {
- assert(old_mem.len > 0);
- assert(new_len > 0);
- const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address);
- @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
- // TODO https://github.com/ziglang/zig/issues/4298
- @memset(old_mem.ptr, undefined, old_mem.len);
- self.rawFree(old_mem, old_align, return_address);
- return new_mem;
-}
-
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: Allocator, comptime T: type) Error!*T {
@@ -409,7 +314,7 @@ pub fn allocAdvancedWithRetAddr(
}
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
-pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
+pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) ?@TypeOf(old_mem) {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == 0) {
@@ -417,8 +322,8 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old
return &[0]T{};
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
- const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
+ const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return null;
+ const rc = self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()) orelse return null;
assert(rc == new_byte_count);
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
@@ -488,8 +393,31 @@ pub fn reallocAdvancedWithRetAddr(
.exact => 0,
.at_least => @sizeOf(T),
};
- const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address);
- return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
+
+ if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), new_alignment)) {
+ if (byte_count <= old_byte_slice.len) {
+ const shrunk_len = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, len_align, return_address);
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..shrunk_len]));
+ }
+
+ if (self.rawResize(old_byte_slice, Slice.alignment, byte_count, len_align, return_address)) |resized_len| {
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(old_byte_slice.ptr + byte_count, undefined, resized_len - byte_count);
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..resized_len]));
+ }
+ }
+
+ if (byte_count <= old_byte_slice.len and new_alignment <= Slice.alignment) {
+ return error.OutOfMemory;
+ }
+
+ const new_mem = try self.rawAlloc(byte_count, new_alignment, len_align, return_address);
+ @memcpy(new_mem.ptr, old_byte_slice.ptr, math.min(byte_count, old_byte_slice.len));
+ // TODO https://github.com/ziglang/zig/issues/4298
+ @memset(old_byte_slice.ptr, undefined, old_byte_slice.len);
+ self.rawFree(old_byte_slice, Slice.alignment, return_address);
+
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, new_mem));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
@@ -580,7 +508,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
}
/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning
-/// error.OutOfMemory should be impossible.
+/// than a `null` return value should be impossible.
/// This function allows a runtime `buf_align` value. Callers should generally prefer
/// to call `shrink` directly.
pub fn shrinkBytes(
@@ -592,5 +520,5 @@ pub fn shrinkBytes(
return_address: usize,
) usize {
assert(new_len <= buf.len);
- return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable;
+ return self.rawResize(buf, buf_align, new_len, len_align, return_address) orelse unreachable;
}
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index 5e1084037e..677ca6f51b 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -68,11 +68,8 @@ pub const FailingAllocator = struct {
new_len: usize,
len_align: u29,
ra: usize,
- ) error{OutOfMemory}!usize {
- const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| {
- std.debug.assert(new_len > old_mem.len);
- return e;
- };
+ ) ?usize {
+ const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) orelse return null;
if (r < old_mem.len) {
self.freed_bytes += old_mem.len - r;
} else {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index db2b8ffc42..fc592ab5e8 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -1288,7 +1288,7 @@ fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs: any
// TODO this should not be performed if the user specifies `-flat_namespace` flag.
// See ld64 manpages.
var arena_alloc = std.heap.ArenaAllocator.init(self.base.allocator);
- const arena = &arena_alloc.allocator;
+ const arena = arena_alloc.allocator();
defer arena_alloc.deinit();
while (dependent_libs.readItem()) |*id| {
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 7a55a5104d..e31ca92ed9 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -138,7 +138,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as []TbdV3", .{});
const inner = lib_stub.yaml.parse([]TbdV3) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len);
+ var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
for (inner) |doc, i| {
out[i] = .{ .v3 = doc };
}
diff --git a/src/main.zig b/src/main.zig
index 981a76a364..ad86858240 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -159,7 +159,7 @@ pub fn main() anyerror!void {
if (tracy.enable_allocation) {
var gpa_tracy = tracy.tracyAllocator(gpa);
- return mainArgs(&gpa_tracy.allocator, arena, args);
+ return mainArgs(gpa_tracy.allocator(), arena, args);
}
return mainArgs(gpa, arena, args);
diff --git a/src/tracy.zig b/src/tracy.zig
index 064374030f..9a5bcc749b 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -113,20 +113,16 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
const Self = @This();
- pub fn allocator(self: *Self) std.mem.Allocator {
- return std.mem.Allocator.init(self, allocFn, resizeFn);
- }
-
- pub fn init(allocator: std.mem.Allocator) Self {
+ pub fn init(parent_allocator: std.mem.Allocator) Self {
return .{
- .parent_allocator = allocator,
- .allocator = .{
- .allocFn = allocFn,
- .resizeFn = resizeFn,
- },
+ .parent_allocator = parent_allocator,
};
}
+ pub fn allocator(self: *Self) std.mem.Allocator {
+ return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn);
+ }
+
fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr);
if (result) |data| {
@@ -162,12 +158,11 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
}
return resized_len;
- } else |err| {
- // this is not really an error condition, during normal operation the compiler hits this case thousands of times
- // due to this emitting messages for it is both slow and causes clutter
- // messageColor("allocation resize failed", 0xFF0000);
- return err;
}
+
+ // during normal operation the compiler hits this case thousands of times due to this
+ // emitting messages for it is both slow and causes clutter
+ return null;
}
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 8a0bfc1ac7..c8b157c335 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -496,7 +496,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ var a = try allocator.alloc(u8, 10);
\\ a = allocator.shrink(a, 5);
\\ try std.testing.expect(a.len == 5);
- \\ try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
+ \\ try std.testing.expect(allocator.resize(a, 20) == null);
\\ allocator.free(a);
\\}
\\
@@ -514,8 +514,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
,
\\debug: alloc - success - len: 10, ptr_align: 1, len_align: 0
\\debug: shrink - success - 10 to 5, len_align: 0, buf_align: 1
- \\error: expand - failure: OutOfMemory - 5 to 20, len_align: 0, buf_align: 1
- \\debug: free - success - len: 5
+ \\error: expand - failure - 5 to 20, len_align: 0, buf_align: 1
+ \\debug: free - len: 5
\\
);
}
diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig
index e7693687c4..0a3fb85f71 100644
--- a/tools/update-linux-headers.zig
+++ b/tools/update-linux-headers.zig
@@ -131,7 +131,7 @@ const PathTable = std.StringHashMap(*TargetToHash);
pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const arena = &arena_state.allocator;
+ const arena = arena_state.allocator();
const args = try std.process.argsAlloc(arena);
var search_paths = std.ArrayList([]const u8).init(arena);
var opt_out_dir: ?[]const u8 = null;
--
cgit v1.2.3