aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2024-07-16 14:49:49 -0400
committerGitHub <noreply@github.com>2024-07-16 14:49:49 -0400
commit88bb0fd288acb6a20abed57cdf459cc4fc788b89 (patch)
tree4a488eb0c9ef76f63d13d12de1649ddab9b8caac /lib/std
parentd8f81372f148ad2ee5aab12cc7ea55562764b3e7 (diff)
parent00fdbf05f39931d1f6c5808e8da4afca85357214 (diff)
downloadzig-88bb0fd288acb6a20abed57cdf459cc4fc788b89.tar.gz
zig-88bb0fd288acb6a20abed57cdf459cc4fc788b89.zip
Merge pull request #20632 from jacobly0/codegen-thread
InternPool: enable separate codegen/linking thread
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/Progress.zig8
-rw-r--r--lib/std/mem.zig11
2 files changed, 7 insertions, 12 deletions
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 190f667897..be4d6d5b3a 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -669,14 +669,8 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
fn clearWrittenWithEscapeCodes() anyerror!void {
if (!global_progress.need_clear) return;
- var i: usize = 0;
- const buf = global_progress.draw_buffer;
-
- buf[i..][0..clear.len].* = clear.*;
- i += clear.len;
-
global_progress.need_clear = false;
- try write(buf[0..i]);
+ try write(clear);
}
/// U+25BA or ►
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 0e16c0f068..a31f7dfa52 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -1050,15 +1050,16 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
// as we don't read into a new page. This should be the case for most architectures
// which use paged memory, however should be confirmed before adding a new arch below.
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
+ const block_size = @sizeOf(T) * block_len;
const Block = @Vector(block_len, T);
const mask: Block = @splat(sentinel);
- comptime std.debug.assert(std.mem.page_size % @sizeOf(Block) == 0);
+ comptime std.debug.assert(std.mem.page_size % block_size == 0);
// First block may be unaligned
const start_addr = @intFromPtr(&p[i]);
const offset_in_page = start_addr & (std.mem.page_size - 1);
- if (offset_in_page <= std.mem.page_size - @sizeOf(Block)) {
+ if (offset_in_page <= std.mem.page_size - block_size) {
// Will not read past the end of a page, full block.
const block: Block = p[i..][0..block_len].*;
const matches = block == mask;
@@ -1066,19 +1067,19 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
return i + std.simd.firstTrue(matches).?;
}
- i += (std.mem.alignForward(usize, start_addr, @alignOf(Block)) - start_addr) / @sizeOf(T);
+ i += @divExact(std.mem.alignForward(usize, start_addr, block_size) - start_addr, @sizeOf(T));
} else {
// Would read over a page boundary. Per-byte at a time until aligned or found.
// 0.39% chance this branch is taken for 4K pages at 16b block length.
//
// An alternate strategy is to do read a full block (the last in the page) and
// mask the entries before the pointer.
- while ((@intFromPtr(&p[i]) & (@alignOf(Block) - 1)) != 0) : (i += 1) {
+ while ((@intFromPtr(&p[i]) & (block_size - 1)) != 0) : (i += 1) {
if (p[i] == sentinel) return i;
}
}
- std.debug.assert(std.mem.isAligned(@intFromPtr(&p[i]), @alignOf(Block)));
+ std.debug.assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
while (true) {
const block: *const Block = @ptrCast(@alignCast(p[i..][0..block_len]));
const matches = block.* == mask;