aboutsummaryrefslogtreecommitdiff
path: root/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2019-03-11 13:34:51 -0400
committerAndrew Kelley <andrew@ziglang.org>2019-03-11 13:34:51 -0400
commitd633dcd07a053d76942217ca845ff2735a0ce6a2 (patch)
tree0e7580ec1e9a9c88da1f6fe94ca5113684f1aa02 /std
parent5362ca204ff68716727915d98bd25d9d7b2c9f80 (diff)
downloadzig-d633dcd07a053d76942217ca845ff2735a0ce6a2.tar.gz
zig-d633dcd07a053d76942217ca845ff2735a0ce6a2.zip
remove the valgrind integration with std.mem.Allocator
See #1837
Diffstat (limited to 'std')
-rw-r--r--std/heap.zig21
-rw-r--r--std/mem.zig15
-rw-r--r--std/valgrind.zig (renamed from std/valgrind/index.zig)0
3 files changed, 7 insertions, 29 deletions
diff --git a/std/heap.zig b/std/heap.zig
index 369fa77fdb..0ea16c4e78 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -240,9 +240,8 @@ pub const ArenaAllocator = struct {
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
- const rem = @rem(addr, alignment);
- const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
- const adjusted_index = self.end_index + march_forward_bytes;
+ const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_index = self.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
cur_node = try self.createNode(cur_buf.len, n + alignment);
@@ -273,10 +272,6 @@ pub const FixedBufferAllocator = struct {
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
- // This loop gets optimized out in ReleaseFast mode
- for (buffer) |*byte| {
- byte.* = undefined;
- }
return FixedBufferAllocator{
.allocator = Allocator{
.allocFn = alloc,
@@ -291,9 +286,8 @@ pub const FixedBufferAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
- const rem = @rem(addr, alignment);
- const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
- const adjusted_index = self.end_index + march_forward_bytes;
+ const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_index = self.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
@@ -330,7 +324,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
if (builtin.single_threaded) {
break :blk FixedBufferAllocator;
} else {
- /// lock free
+ // lock free
break :blk struct {
allocator: Allocator,
end_index: usize,
@@ -353,9 +347,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
- const rem = @rem(addr, alignment);
- const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
- const adjusted_index = end_index + march_forward_bytes;
+ const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_index = end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
diff --git a/std/mem.zig b/std/mem.zig
index cd54358e43..28f25dcbcd 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -49,7 +49,6 @@ pub const Allocator = struct {
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
- _ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
@@ -63,7 +62,6 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
- _ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
// This loop gets optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
byte.* = undefined;
@@ -88,12 +86,6 @@ pub const Allocator = struct {
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
- if (byte_slice.ptr == old_byte_slice.ptr) {
- _ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
- } else {
- _ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
- _ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
- }
if (n > old_mem.len) {
// This loop gets optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
@@ -125,12 +117,6 @@ pub const Allocator = struct {
const old_byte_slice = @sliceToBytes(old_mem);
const byte_slice = self.reallocFn(self, old_byte_slice, byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
- if (byte_slice.ptr == old_byte_slice.ptr) {
- _ = std.valgrind.resizeInPlaceBlock(old_byte_slice, byte_count, 0);
- } else {
- _ = std.valgrind.freeLikeBlock(old_byte_slice.ptr, 0);
- _ = std.valgrind.mallocLikeBlock(byte_slice, 0, false);
- }
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
@@ -139,7 +125,6 @@ pub const Allocator = struct {
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
- _ = std.valgrind.freeLikeBlock(non_const_ptr, 0);
}
};
diff --git a/std/valgrind/index.zig b/std/valgrind.zig
index 1c0d084a4d..1c0d084a4d 100644
--- a/std/valgrind/index.zig
+++ b/std/valgrind.zig