aboutsummaryrefslogtreecommitdiff
path: root/lib/std/array_list.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-04-29 00:19:55 -0700
committerGitHub <noreply@github.com>2023-04-29 00:19:55 -0700
commitd65b42e07caa00dfe2f2fbf221c593ce57882784 (patch)
tree7926cbea1499e0affe930bf6d7455dc24adf014e /lib/std/array_list.zig
parentfd6200eda6d4fe19c34a59430a88a9ce38d6d7a4 (diff)
parentfa200ca0cad2705bad40eb723dedf4e3bf11f2ff (diff)
downloadzig-d65b42e07caa00dfe2f2fbf221c593ce57882784.tar.gz
zig-d65b42e07caa00dfe2f2fbf221c593ce57882784.zip
Merge pull request #15481 from ziglang/use-mem-intrinsics
actually use the new memory intrinsics
Diffstat (limited to 'lib/std/array_list.zig')
-rw-r--r--lib/std/array_list.zig48
1 files changed, 28 insertions, 20 deletions
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 1791482bc4..cbca601b82 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -120,7 +120,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
- mem.copy(T, new_memory, self.items);
+ @memcpy(new_memory, self.items);
@memset(self.items, undefined);
self.clearAndFree();
return new_memory;
@@ -170,7 +170,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
self.items.len += items.len;
mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]);
- mem.copy(T, self.items[i .. i + items.len], items);
+ @memcpy(self.items[i..][0..items.len], items);
}
/// Replace range of elements `list[start..start+len]` with `new_items`.
@@ -182,15 +182,15 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
const range = self.items[start..after_range];
if (range.len == new_items.len)
- mem.copy(T, range, new_items)
+ @memcpy(range[0..new_items.len], new_items)
else if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
- mem.copy(T, range, first);
+ @memcpy(range[0..first.len], first);
try self.insertSlice(after_range, rest);
} else {
- mem.copy(T, range, new_items);
+ @memcpy(range[0..new_items.len], new_items);
const after_subrange = start + new_items.len;
for (self.items[after_range..], 0..) |item, i| {
@@ -260,7 +260,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
- mem.copy(T, self.items[old_len..], items);
+ @memcpy(self.items[old_len..][0..items.len], items);
}
/// Append an unaligned slice of items to the list. Allocates more
@@ -306,18 +306,22 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
- pub fn appendNTimes(self: *Self, value: T, n: usize) Allocator.Error!void {
+ /// The function is inline so that a comptime-known `value` parameter will
+ /// have a more optimal memset codegen in case it has a repeated byte pattern.
+ pub inline fn appendNTimes(self: *Self, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(self.items.len + n);
- mem.set(T, self.items[old_len..self.items.len], value);
+ @memset(self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// Asserts the capacity is enough. **Does not** invalidate pointers.
- pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
+ /// The function is inline so that a comptime-known `value` parameter will
+ /// have a more optimal memset codegen in case it has a repeated byte pattern.
+ pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
- mem.set(T, self.items.ptr[self.items.len..new_len], value);
+ @memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
@@ -397,7 +401,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
self.capacity = new_capacity;
} else {
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
- mem.copy(T, new_memory, self.items);
+ @memcpy(new_memory[0..self.items.len], self.items);
self.allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
@@ -596,7 +600,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
- mem.copy(T, new_memory, self.items);
+ @memcpy(new_memory, self.items);
@memset(self.items, undefined);
self.clearAndFree(allocator);
return new_memory;
@@ -647,7 +651,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
self.items.len += items.len;
mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]);
- mem.copy(T, self.items[i .. i + items.len], items);
+ @memcpy(self.items[i..][0..items.len], items);
}
/// Replace range of elements `list[start..start+len]` with `new_items`
@@ -716,7 +720,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
- mem.copy(T, self.items[old_len..], items);
+ @memcpy(self.items[old_len..][0..items.len], items);
}
/// Append the slice of items to the list. Allocates more
@@ -766,19 +770,23 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
- pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) Allocator.Error!void {
+ /// The function is inline so that a comptime-known `value` parameter will
+ /// have a more optimal memset codegen in case it has a repeated byte pattern.
+ pub inline fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(allocator, self.items.len + n);
- mem.set(T, self.items[old_len..self.items.len], value);
+ @memset(self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// **Does not** invalidate pointers.
/// Asserts the capacity is enough.
- pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
+ /// The function is inline so that a comptime-known `value` parameter will
+ /// have a more optimal memset codegen in case it has a repeated byte pattern.
+ pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
- mem.set(T, self.items.ptr[self.items.len..new_len], value);
+ @memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
@@ -815,7 +823,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
},
};
- mem.copy(T, new_memory, self.items[0..new_len]);
+ @memcpy(new_memory, self.items[0..new_len]);
allocator.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
@@ -877,7 +885,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
self.capacity = new_capacity;
} else {
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
- mem.copy(T, new_memory, self.items);
+ @memcpy(new_memory[0..self.items.len], self.items);
allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;