From 3db3cf77904e664d589287602c14168a7a63f125 Mon Sep 17 00:00:00 2001 From: Ali Chraghi Date: Tue, 23 May 2023 15:33:12 +0330 Subject: std.sort: add pdqsort and heapsort --- lib/std/sort/block.zig | 1066 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/std/sort/pdq.zig | 331 +++++++++++++++ 2 files changed, 1397 insertions(+) create mode 100644 lib/std/sort/block.zig create mode 100644 lib/std/sort/pdq.zig (limited to 'lib/std/sort') diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig new file mode 100644 index 0000000000..6c1be9c6c2 --- /dev/null +++ b/lib/std/sort/block.zig @@ -0,0 +1,1066 @@ +const std = @import("../std.zig"); +const sort = std.sort; +const math = std.math; +const mem = std.mem; + +const Range = struct { + start: usize, + end: usize, + + fn init(start: usize, end: usize) Range { + return Range{ + .start = start, + .end = end, + }; + } + + fn length(self: Range) usize { + return self.end - self.start; + } +}; + +const Iterator = struct { + size: usize, + power_of_two: usize, + numerator: usize, + decimal: usize, + denominator: usize, + decimal_step: usize, + numerator_step: usize, + + fn init(size2: usize, min_level: usize) Iterator { + const power_of_two = math.floorPowerOfTwo(usize, size2); + const denominator = power_of_two / min_level; + return Iterator{ + .numerator = 0, + .decimal = 0, + .size = size2, + .power_of_two = power_of_two, + .denominator = denominator, + .decimal_step = size2 / denominator, + .numerator_step = size2 % denominator, + }; + } + + fn begin(self: *Iterator) void { + self.numerator = 0; + self.decimal = 0; + } + + fn nextRange(self: *Iterator) Range { + const start = self.decimal; + + self.decimal += self.decimal_step; + self.numerator += self.numerator_step; + if (self.numerator >= self.denominator) { + self.numerator -= self.denominator; + self.decimal += 1; + } + + return Range{ + .start = start, + .end = self.decimal, + }; + } + + fn finished(self: *Iterator) bool { + return self.decimal >= self.size; + } + + fn nextLevel(self: *Iterator) bool { + self.decimal_step += self.decimal_step; + self.numerator_step += self.numerator_step; + if (self.numerator_step >= self.denominator) { + self.numerator_step -= self.denominator; + self.decimal_step += 1; + } + + return (self.decimal_step < self.size); + } + + fn length(self: *Iterator) usize { + return self.decimal_step; + } +}; + +const Pull = struct { + from: usize, + to: usize, + count: usize, + range: Range, +}; + +/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. +/// O(1) memory (no allocator required). +/// Sorts in ascending order with respect to the given `lessThan` function. +/// +/// NOTE: the algorithm only work when the comparison is less-than or greater-than +/// (See https://github.com/ziglang/zig/issues/8289) +pub fn block( + comptime T: type, + items: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + + // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c + var cache: [512]T = undefined; + + if (items.len < 4) { + if (items.len == 3) { + // hard coded insertion sort + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + if (lessThan(context, items[2], items[1])) { + mem.swap(T, &items[1], &items[2]); + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + } + } else if (items.len == 2) { + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + } + return; + } + + // sort groups of 4-8 items at a time using an unstable sorting network, + // but keep track of the original item orders to force it to be stable + // http://pages.ripco.net/~jgamble/nw.html + var iterator = Iterator.init(items.len, 4); + while (!iterator.finished()) { + var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 }; + const range = iterator.nextRange(); + + const sliced_items = items[range.start..]; + switch (range.length()) { + 8 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 6, 7, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 4, 6, context, lessThan); + swap(T, sliced_items, &order, 5, 7, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 5, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 7, context, lessThan); + swap(T, sliced_items, &order, 1, 5, context, lessThan); + swap(T, sliced_items, &order, 2, 6, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 6, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + }, + 7 => { + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 5, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 4, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 2, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 4, context, lessThan); + swap(T, sliced_items, &order, 1, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 5, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + }, + 6 => { + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + }, + 5 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + }, + 4 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + }, + else => {}, + } + } + if (items.len < 8) return; + + // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc. + while (true) { + // if every A and B block will fit into the cache, use a special branch + // specifically for merging with the cache + // (we use < rather than <= since the block size might be one more than + // iterator.length()) + if (iterator.length() < cache.len) { + // if four subarrays fit into the cache, it's faster to merge both + // pairs of subarrays into the cache, + // then merge the two merged subarrays from the cache back into the original array + if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) { + iterator.begin(); + while (!iterator.finished()) { + // merge A1 and B1 into the cache + var A1 = iterator.nextRange(); + var B1 = iterator.nextRange(); + var A2 = iterator.nextRange(); + var B2 = iterator.nextRange(); + + if (lessThan(context, items[B1.end - 1], items[A1.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the cache + const a1_items = items[A1.start..A1.end]; + @memcpy(cache[B1.length()..][0..a1_items.len], a1_items); + const b1_items = items[B1.start..B1.end]; + @memcpy(cache[0..b1_items.len], b1_items); + } else if (lessThan(context, items[B1.start], items[A1.end - 1])) { + // these two ranges weren't already in order, so merge them into the cache + mergeInto(T, items, A1, B1, cache[0..], context, lessThan); + } else { + // if A1, B1, A2, and B2 are all in order, skip doing anything else + if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue; + + // copy A1 and B1 into the cache in the same order + const a1_items = items[A1.start..A1.end]; + @memcpy(cache[0..a1_items.len], a1_items); + const b1_items = items[B1.start..B1.end]; + @memcpy(cache[A1.length()..][0..b1_items.len], b1_items); + } + A1 = Range.init(A1.start, B1.end); + + // merge A2 and B2 into the cache + if (lessThan(context, items[B2.end - 1], items[A2.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the cache + const a2_items = items[A2.start..A2.end]; + @memcpy(cache[A1.length() + B2.length() ..][0..a2_items.len], a2_items); + const b2_items = items[B2.start..B2.end]; + @memcpy(cache[A1.length()..][0..b2_items.len], b2_items); + } else if (lessThan(context, items[B2.start], items[A2.end - 1])) { + // these two ranges weren't already in order, so merge them into the cache + mergeInto(T, items, A2, B2, cache[A1.length()..], context, lessThan); + } else { + // copy A2 and B2 into the cache in the same order + const a2_items = items[A2.start..A2.end]; + @memcpy(cache[A1.length()..][0..a2_items.len], a2_items); + const b2_items = items[B2.start..B2.end]; + @memcpy(cache[A1.length() + A2.length() ..][0..b2_items.len], b2_items); + } + A2 = Range.init(A2.start, B2.end); + + // merge A1 and A2 from the cache into the items + const A3 = Range.init(0, A1.length()); + const B3 = Range.init(A1.length(), A1.length() + A2.length()); + + if (lessThan(context, cache[B3.end - 1], cache[A3.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the items + const a3_items = cache[A3.start..A3.end]; + @memcpy(items[A1.start + A2.length() ..][0..a3_items.len], a3_items); + const b3_items = cache[B3.start..B3.end]; + @memcpy(items[A1.start..][0..b3_items.len], b3_items); + } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) { + // these two ranges weren't already in order, so merge them back into the items + mergeInto(T, cache[0..], A3, B3, items[A1.start..], context, lessThan); + } else { + // copy A3 and B3 into the items in the same order + const a3_items = cache[A3.start..A3.end]; + @memcpy(items[A1.start..][0..a3_items.len], a3_items); + const b3_items = cache[B3.start..B3.end]; + @memcpy(items[A1.start + A1.length() ..][0..b3_items.len], b3_items); + } + } + + // we merged two levels at the same time, so we're done with this level already + // (iterator.nextLevel() is called again at the bottom of this outer merge loop) + _ = iterator.nextLevel(); + } else { + iterator.begin(); + while (!iterator.finished()) { + var A = iterator.nextRange(); + var B = iterator.nextRange(); + + if (lessThan(context, items[B.end - 1], items[A.start])) { + // the two ranges are in reverse order, so a simple rotation should fix it + mem.rotate(T, items[A.start..B.end], A.length()); + } else if (lessThan(context, items[B.start], items[A.end - 1])) { + // these two ranges weren't already in order, so we'll need to merge them! + const a_items = items[A.start..A.end]; + @memcpy(cache[0..a_items.len], a_items); + mergeExternal(T, items, A, B, cache[0..], context, lessThan); + } + } + } + } else { + // this is where the in-place merge logic starts! + // 1. pull out two internal buffers each containing √A unique values + // 1a. adjust block_size and buffer_size if we couldn't find enough unique values + // 2. loop over the A and B subarrays within this level of the merge sort + // 3. break A and B into blocks of size 'block_size' + // 4. "tag" each of the A blocks with values from the first internal buffer + // 5. roll the A blocks through the B blocks and drop/rotate them where they belong + // 6. merge each A block with any B values that follow, using the cache or the second internal buffer + // 7. sort the second internal buffer if it exists + // 8. redistribute the two internal buffers back into the items + var block_size: usize = math.sqrt(iterator.length()); + var buffer_size = iterator.length() / block_size + 1; + + // as an optimization, we really only need to pull out the internal buffers once for each level of merges + // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level + var A: Range = undefined; + var B: Range = undefined; + var index: usize = 0; + var last: usize = 0; + var count: usize = 0; + var find: usize = 0; + var start: usize = 0; + var pull_index: usize = 0; + var pull = [_]Pull{ + Pull{ + .from = 0, + .to = 0, + .count = 0, + .range = Range.init(0, 0), + }, + Pull{ + .from = 0, + .to = 0, + .count = 0, + .range = Range.init(0, 0), + }, + }; + + var buffer1 = Range.init(0, 0); + var buffer2 = Range.init(0, 0); + + // find two internal buffers of size 'buffer_size' each + find = buffer_size + buffer_size; + var find_separately = false; + + if (block_size <= cache.len) { + // if every A block fits into the cache then we won't need the second internal buffer, + // so we really only need to find 'buffer_size' unique values + find = buffer_size; + } else if (find > iterator.length()) { + // we can't fit both buffers into the same A or B subarray, so find two buffers separately + find = buffer_size; + find_separately = true; + } + + // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each), + // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values, + // OR if we couldn't find that many unique values, we need the largest possible buffer we can get + + // in the case where it couldn't find a single buffer of at least √A unique values, + // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace) + iterator.begin(); + while (!iterator.finished()) { + A = iterator.nextRange(); + B = iterator.nextRange(); + + // just store information about where the values will be pulled from and to, + // as well as how many values there are, to create the two internal buffers + + // check A for the number of unique values we need to fill an internal buffer + // these values will be pulled out to the start of A + last = A.start; + count = 1; + while (count < find) : ({ + last = index; + count += 1; + }) { + index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), find - count, context, lessThan); + if (index == A.end) break; + } + index = last; + + if (count >= buffer_size) { + // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = A.start, + }; + pull_index = 1; + + if (count == buffer_size + buffer_size) { + // we were able to find a single contiguous section containing 2√A unique values, + // so this section can be used to contain both of the internal buffers we'll need + buffer1 = Range.init(A.start, A.start + buffer_size); + buffer2 = Range.init(A.start + buffer_size, A.start + count); + break; + } else if (find == buffer_size + buffer_size) { + // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, + // so we still need to find a second separate buffer of at least √A unique values + buffer1 = Range.init(A.start, A.start + count); + find = buffer_size; + } else if (block_size <= cache.len) { + // we found the first and only internal buffer that we need, so we're done! + buffer1 = Range.init(A.start, A.start + count); + break; + } else if (find_separately) { + // found one buffer, but now find the other one + buffer1 = Range.init(A.start, A.start + count); + find_separately = false; + } else { + // we found a second buffer in an 'A' subarray containing √A unique values, so we're done! + buffer2 = Range.init(A.start, A.start + count); + break; + } + } else if (pull_index == 0 and count > buffer1.length()) { + // keep track of the largest buffer we were able to find + buffer1 = Range.init(A.start, A.start + count); + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = A.start, + }; + } + + // check B for the number of unique values we need to fill an internal buffer + // these values will be pulled out to the end of B + last = B.end - 1; + count = 1; + while (count < find) : ({ + last = index - 1; + count += 1; + }) { + index = findFirstBackward(T, items, items[last], Range.init(B.start, last), find - count, context, lessThan); + if (index == B.start) break; + } + index = last; + + if (count >= buffer_size) { + // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = B.end, + }; + pull_index = 1; + + if (count == buffer_size + buffer_size) { + // we were able to find a single contiguous section containing 2√A unique values, + // so this section can be used to contain both of the internal buffers we'll need + buffer1 = Range.init(B.end - count, B.end - buffer_size); + buffer2 = Range.init(B.end - buffer_size, B.end); + break; + } else if (find == buffer_size + buffer_size) { + // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, + // so we still need to find a second separate buffer of at least √A unique values + buffer1 = Range.init(B.end - count, B.end); + find = buffer_size; + } else if (block_size <= cache.len) { + // we found the first and only internal buffer that we need, so we're done! + buffer1 = Range.init(B.end - count, B.end); + break; + } else if (find_separately) { + // found one buffer, but now find the other one + buffer1 = Range.init(B.end - count, B.end); + find_separately = false; + } else { + // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray, + // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2 + if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count; + + // we found a second buffer in an 'B' subarray containing √A unique values, so we're done! + buffer2 = Range.init(B.end - count, B.end); + break; + } + } else if (pull_index == 0 and count > buffer1.length()) { + // keep track of the largest buffer we were able to find + buffer1 = Range.init(B.end - count, B.end); + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = B.end, + }; + } + } + + // pull out the two ranges so we can use them as internal buffers + pull_index = 0; + while (pull_index < 2) : (pull_index += 1) { + const length = pull[pull_index].count; + + if (pull[pull_index].to < pull[pull_index].from) { + // we're pulling the values out to the left, which means the start of an A subarray + index = pull[pull_index].from; + count = 1; + while (count < length) : (count += 1) { + index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), length - count, context, lessThan); + const range = Range.init(index + 1, pull[pull_index].from + 1); + mem.rotate(T, items[range.start..range.end], range.length() - count); + pull[pull_index].from = index + count; + } + } else if (pull[pull_index].to > pull[pull_index].from) { + // we're pulling values out to the right, which means the end of a B subarray + index = pull[pull_index].from + 1; + count = 1; + while (count < length) : (count += 1) { + index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), length - count, context, lessThan); + const range = Range.init(pull[pull_index].from, index - 1); + mem.rotate(T, items[range.start..range.end], count); + pull[pull_index].from = index - 1 - count; + } + } + } + + // adjust block_size and buffer_size based on the values we were able to pull out + buffer_size = buffer1.length(); + block_size = iterator.length() / buffer_size + 1; + + // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks, + // so this was originally here to test the math for adjusting block_size above + // assert((iterator.length() + 1)/block_size <= buffer_size); + + // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort! + iterator.begin(); + while (!iterator.finished()) { + A = iterator.nextRange(); + B = iterator.nextRange(); + + // remove any parts of A or B that are being used by the internal buffers + start = A.start; + if (start == pull[0].range.start) { + if (pull[0].from > pull[0].to) { + A.start += pull[0].count; + + // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge + // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4, + // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal + if (A.length() == 0) continue; + } else if (pull[0].from < pull[0].to) { + B.end -= pull[0].count; + if (B.length() == 0) continue; + } + } + if (start == pull[1].range.start) { + if (pull[1].from > pull[1].to) { + A.start += pull[1].count; + if (A.length() == 0) continue; + } else if (pull[1].from < pull[1].to) { + B.end -= pull[1].count; + if (B.length() == 0) continue; + } + } + + if (lessThan(context, items[B.end - 1], items[A.start])) { + // the two ranges are in reverse order, so a simple rotation should fix it + mem.rotate(T, items[A.start..B.end], A.length()); + } else if (lessThan(context, items[A.end], items[A.end - 1])) { + // these two ranges weren't already in order, so we'll need to merge them! + var findA: usize = undefined; + + // break the remainder of A into blocks. firstA is the uneven-sized first A block + var blockA = Range.init(A.start, A.end); + var firstA = Range.init(A.start, A.start + blockA.length() % block_size); + + // swap the first value of each A block with the value in buffer1 + var indexA = buffer1.start; + index = firstA.end; + while (index < blockA.end) : ({ + indexA += 1; + index += block_size; + }) { + mem.swap(T, &items[indexA], &items[index]); + } + + // start rolling the A blocks through the B blocks! + // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well + var lastA = firstA; + var lastB = Range.init(0, 0); + var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + blockA.start += firstA.length(); + indexA = buffer1.start; + + // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it + // otherwise, if the second buffer is available, block swap the contents into that + if (lastA.length() <= cache.len) { + const last_a_items = items[lastA.start..lastA.end]; + @memcpy(cache[0..last_a_items.len], last_a_items); + } else if (buffer2.length() > 0) { + blockSwap(T, items, lastA.start, buffer2.start, lastA.length()); + } + + if (blockA.length() > 0) { + while (true) { + // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block, + // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks. + if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) { + // figure out where to split the previous B block, and rotate it at the split + const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan); + const B_remaining = lastB.end - B_split; + + // swap the minimum A block to the beginning of the rolling A blocks + var minA = blockA.start; + findA = minA + block_size; + while (findA < blockA.end) : (findA += block_size) { + if (lessThan(context, items[findA], items[minA])) { + minA = findA; + } + } + blockSwap(T, items, blockA.start, minA, block_size); + + // swap the first item of the previous A block back with its original value, which is stored in buffer1 + mem.swap(T, &items[blockA.start], &items[indexA]); + indexA += 1; + + // locally merge the previous A block with the B values that follow it + // if lastA fits into the external cache we'll use that (with MergeExternal), + // or if the second internal buffer exists we'll use that (with MergeInternal), + // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace) + + if (lastA.length() <= cache.len) { + mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), cache[0..], context, lessThan); + } else if (buffer2.length() > 0) { + mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), buffer2, context, lessThan); + } else { + mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan); + } + + if (buffer2.length() > 0 or block_size <= cache.len) { + // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway + if (block_size <= cache.len) { + @memcpy(cache[0..block_size], items[blockA.start..][0..block_size]); + } else { + blockSwap(T, items, blockA.start, buffer2.start, block_size); + } + + // this is equivalent to rotating, but faster + // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it + // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs + blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining); + } else { + // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation + mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split); + } + + // update the range for the remaining A blocks, and the range remaining from the B block after it was split + lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size); + lastB = Range.init(lastA.end, lastA.end + B_remaining); + + // if there are no more A blocks remaining, this step is finished! + blockA.start += block_size; + if (blockA.length() == 0) break; + } else if (blockB.length() < block_size) { + // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation + // the cache is disabled here since it might contain the contents of the previous A block + mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start); + + lastB = Range.init(blockA.start, blockA.start + blockB.length()); + blockA.start += blockB.length(); + blockA.end += blockB.length(); + blockB.end = blockB.start; + } else { + // roll the leftmost A block to the end by swapping it with the next B block + blockSwap(T, items, blockA.start, blockB.start, block_size); + lastB = Range.init(blockA.start, blockA.start + block_size); + + blockA.start += block_size; + blockA.end += block_size; + blockB.start += block_size; + + if (blockB.end > B.end - block_size) { + blockB.end = B.end; + } else { + blockB.end += block_size; + } + } + } + } + + // merge the last A block with the remaining B values + if (lastA.length() <= cache.len) { + mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), cache[0..], context, lessThan); + } else if (buffer2.length() > 0) { + mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), buffer2, context, lessThan); + } else { + mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan); + } + } + } + + // when we're finished with this merge step we should have the one + // or two internal buffers left over, where the second buffer is all jumbled up + // insertion sort the second buffer, then redistribute the buffers + // back into the items using the opposite process used for creating the buffer + + // while an unstable sort like quicksort could be applied here, in benchmarks + // it was consistently slightly slower than a simple insertion sort, + // even for tens of millions of items. this may be because insertion + // sort is quite fast when the data is already somewhat sorted, like it is here + sort.insertion(T, items[buffer2.start..buffer2.end], context, lessThan); + + pull_index = 0; + while (pull_index < 2) : (pull_index += 1) { + var unique = pull[pull_index].count * 2; + if (pull[pull_index].from > pull[pull_index].to) { + // the values were pulled out to the left, so redistribute them back to the right + var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count); + while (buffer.length() > 0) { + index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), unique, context, lessThan); + const amount = index - buffer.end; + mem.rotate(T, items[buffer.start..index], buffer.length()); + buffer.start += (amount + 1); + buffer.end += amount; + unique -= 2; + } + } else if (pull[pull_index].from < pull[pull_index].to) { + // the values were pulled out to the right, so redistribute them back to the left + var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end); + while (buffer.length() > 0) { + index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), unique, context, lessThan); + const amount = buffer.start - index; + mem.rotate(T, items[index..buffer.end], amount); + buffer.start -= amount; + buffer.end -= (amount + 1); + unique -= 2; + } + } + } + } + + // double the size of each A and B subarray that will be merged in the next level + if (!iterator.nextLevel()) break; + } +} +// merge operation without a buffer +fn mergeInPlace( + comptime T: type, + items: []T, + A_arg: Range, + B_arg: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + if (A_arg.length() == 0 or B_arg.length() == 0) return; + + // this just repeatedly binary searches into B and rotates A into position. + // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here, + // but I decided to stick with this because it had better situational performance + // + // (Hwang and Lin is designed for merging subarrays of very different sizes, + // but WikiSort almost always uses subarrays that are roughly the same size) + // + // normally this is incredibly suboptimal, but this function is only called + // when none of the A or B blocks in any subarray contained 2√A unique values, + // which places a hard limit on the number of times this will ACTUALLY need + // to binary search and rotate. + // + // according to my analysis the worst case is √A rotations performed on √A items + // once the constant factors are removed, which ends up being O(n) + // + // again, this is NOT a general-purpose solution – it only works well in this case! + // kind of like how the O(n^2) insertion sort is used in some places + + var A = A_arg; + var B = B_arg; + + while (true) { + // find the first place in B where the first item in A needs to be inserted + const mid = binaryFirst(T, items, items[A.start], B, context, lessThan); + + // rotate A into place + const amount = mid - A.end; + mem.rotate(T, items[A.start..mid], A.length()); + if (B.end == mid) break; + + // calculate the new A and B ranges + B.start = mid; + A = Range.init(A.start + amount, B.start); + A.start = binaryLast(T, items, items[A.start], A, context, lessThan); + if (A.length() == 0) break; + } +} + +// merge operation using an internal buffer +fn mergeInternal( + comptime T: type, + items: []T, + A: Range, + B: Range, + buffer: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + // whenever we find a value to add to the final array, swap it with the value that's already in that spot + // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order + var A_count: usize = 0; + var B_count: usize = 0; + var insert: usize = 0; + + if (B.length() > 0 and A.length() > 0) { + while (true) { + if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) { + mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]); + A_count += 1; + insert += 1; + if (A_count >= A.length()) break; + } else { + mem.swap(T, &items[A.start + insert], &items[B.start + B_count]); + B_count += 1; + insert += 1; + if (B_count >= B.length()) break; + } + } + } + + // swap the remainder of A into the final array + blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count); +} + +fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void { + var index: usize = 0; + while (index < block_size) : (index += 1) { + mem.swap(T, &items[start1 + index], &items[start2 + index]); + } +} + +// combine a linear search with a binary search to reduce the number of comparisons in situations +// where have some idea as to how many unique values there are and where the next value might be +fn findFirstForward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.start + skip; + while (lessThan(context, items[index - 1], value)) : (index += skip) { + if (index >= range.end - skip) { + return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan); + } + } + + return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan); +} + +fn findFirstBackward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.end - skip; + while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { + if (index < range.start + skip) { + return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan); + } + } + + return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan); +} + +fn findLastForward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.start + skip; + while (!lessThan(context, value, items[index - 1])) : (index += skip) { + if (index >= range.end - skip) { + return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan); + } + } + + return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan); +} + +fn findLastBackward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.end - skip; + while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { + if (index < range.start + skip) { + return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan); + } + } + + return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan); +} + +fn binaryFirst( + comptime T: type, + items: []T, + value: T, + range: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + var curr = range.start; + var size = range.length(); + if (range.start >= range.end) return range.end; + while (size > 0) { + const offset = size % 2; + + size /= 2; + const mid_item = items[curr + size]; + if (lessThan(context, mid_item, value)) { + curr += size + offset; + } + } + return curr; +} + +fn binaryLast( + comptime T: type, + items: []T, + value: T, + range: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + var curr = range.start; + var size = range.length(); + if (range.start >= range.end) return range.end; + while (size > 0) { + const offset = size % 2; + + size /= 2; + const mid_item = items[curr + size]; + if (!lessThan(context, value, mid_item)) { + curr += size + offset; + } + } + return curr; +} + +fn mergeInto( + comptime T: type, + from: []T, + A: Range, + B: Range, + into: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + var A_index: usize = A.start; + var B_index: usize = B.start; + const A_last = A.end; + const B_last = B.end; + var insert_index: usize = 0; + + while (true) { + if (!lessThan(context, from[B_index], from[A_index])) { + into[insert_index] = from[A_index]; + A_index += 1; + insert_index += 1; + if (A_index == A_last) { + // copy the remainder of B into the final array + const from_b = from[B_index..B_last]; + @memcpy(into[insert_index..][0..from_b.len], from_b); + break; + } + } else { + into[insert_index] = from[B_index]; + B_index += 1; + insert_index += 1; + if (B_index == B_last) { + // copy the remainder of A into the final array + const from_a = from[A_index..A_last]; + @memcpy(into[insert_index..][0..from_a.len], from_a); + break; + } + } + } +} + +fn mergeExternal( + comptime T: type, + items: []T, + A: Range, + B: Range, + cache: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + // A fits into the cache, so use that instead of the internal buffer + var A_index: usize = 0; + var B_index: usize = B.start; + var insert_index: usize = A.start; + const A_last = A.length(); + const B_last = B.end; + + if (B.length() > 0 and A.length() > 0) { + while (true) { + if (!lessThan(context, items[B_index], cache[A_index])) { + items[insert_index] = cache[A_index]; + A_index += 1; + insert_index += 1; + if (A_index == A_last) break; + } else { + items[insert_index] = items[B_index]; + B_index += 1; + insert_index += 1; + if (B_index == B_last) break; + } + } + } + + // copy the remainder of A into the final array + const cache_a = cache[A_index..A_last]; + @memcpy(items[insert_index..][0..cache_a.len], cache_a); +} + +fn swap( + comptime T: type, + items: []T, + order: *[8]u8, + x: usize, + y: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) { + mem.swap(T, &items[x], &items[y]); + mem.swap(u8, &(order.*)[x], &(order.*)[y]); + } +} diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig new file mode 100644 index 0000000000..e7042b0c76 --- /dev/null +++ b/lib/std/sort/pdq.zig @@ -0,0 +1,331 @@ +const std = @import("../std.zig"); +const sort = std.sort; +const mem = std.mem; +const math = std.math; +const testing = std.testing; + +/// Unstable in-place sort. n best case, n*log(n) worst case and average case. +/// log(n) memory (no allocator required). +/// +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn pdq( + comptime T: type, + items: []T, + context: anytype, + comptime lessThanFn: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, +) void { + const Context = struct { + items: []T, + sub_ctx: @TypeOf(context), + + pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { + return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]); + } + + pub fn swap(ctx: @This(), a: usize, b: usize) void { + return mem.swap(T, &ctx.items[a], &ctx.items[b]); + } + }; + pdqContext(0, items.len, Context{ .items = items, .sub_ctx = context }); +} + +const Hint = enum { + increasing, + decreasing, + unknown, +}; + +/// Unstable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. +/// O(log(n)) memory (no allocator required). +/// +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn pdqContext(a: usize, b: usize, context: anytype) void { + // slices of up to this length get sorted using insertion sort. + const max_insertion = 24; + // number of allowed imbalanced partitions before switching to heap sort. + const max_limit = std.math.floorPowerOfTwo(usize, b) + 1; + + // set upper bound on stack memory usage. + const Range = struct { a: usize, b: usize, limit: usize }; + const stack_size = math.log2(math.maxInt(usize) + 1); + var stack: [stack_size]Range = undefined; + var range = Range{ .a = a, .b = b, .limit = max_limit }; + var top: usize = 0; + + while (true) { + var was_balanced = true; + var was_partitioned = true; + + while (true) { + const len = range.b - range.a; + + // very short slices get sorted using insertion sort. + if (len <= max_insertion) { + break sort.insertionContext(range.a, range.b, context); + } + + // if too many bad pivot choices were made, simply fall back to heapsort in order to + // guarantee O(n*log(n)) worst-case. + if (range.limit == 0) { + break sort.heapContext(range.a, range.b, context); + } + + // if the last partitioning was imbalanced, try breaking patterns in the slice by shuffling + // some elements around. Hopefully we'll choose a better pivot this time. + if (!was_balanced) { + breakPatterns(range.a, range.b, context); + range.limit -= 1; + } + + // choose a pivot and try guessing whether the slice is already sorted. + var pivot: usize = 0; + var hint = chosePivot(range.a, range.b, &pivot, context); + + if (hint == .decreasing) { + // The maximum number of swaps was performed, so items are likely + // in reverse order. Reverse it to make sorting faster. + reverseRange(range.a, range.b, context); + pivot = (range.b - 1) - (pivot - range.a); + hint = .increasing; + } + + // if the last partitioning was decently balanced and didn't shuffle elements, and if pivot + // selection predicts the slice is likely already sorted... + if (was_balanced and was_partitioned and hint == .increasing) { + // try identifying several out-of-order elements and shifting them to correct + // positions. If the slice ends up being completely sorted, we're done. + if (partialInsertionSort(range.a, range.b, context)) break; + } + + // if the chosen pivot is equal to the predecessor, then it's the smallest element in the + // slice. Partition the slice into elements equal to and elements greater than the pivot. + // This case is usually hit when the slice contains many duplicate elements. + if (range.a > 0 and !context.lessThan(range.a - 1, pivot)) { + range.a = partitionEqual(range.a, range.b, pivot, context); + continue; + } + + // partition the slice. + var mid = pivot; + was_partitioned = partition(range.a, range.b, &mid, context); + + const left_len = mid - range.a; + const right_len = range.b - mid; + const balanced_threshold = len / 8; + if (left_len < right_len) { + was_balanced = left_len >= balanced_threshold; + stack[top] = .{ .a = range.a, .b = mid, .limit = range.limit }; + top += 1; + range.a = mid + 1; + } else { + was_balanced = right_len >= balanced_threshold; + stack[top] = .{ .a = mid + 1, .b = range.b, .limit = range.limit }; + top += 1; + range.b = mid; + } + } + + top = math.sub(usize, top, 1) catch break; + range = stack[top]; + } +} + +/// partitions `items[a..b]` into elements smaller than `items[pivot]`, +/// followed by elements greater than or equal to `items[pivot]`. +/// +/// sets the new pivot. +/// returns `true` if already partitioned. +fn partition(a: usize, b: usize, pivot: *usize, context: anytype) bool { + // move pivot to the first place + context.swap(a, pivot.*); + + var i = a + 1; + var j = b - 1; + + while (i <= j and context.lessThan(i, a)) i += 1; + while (i <= j and !context.lessThan(j, a)) j -= 1; + + // check if items are already partitioned (no item to swap) + if (i > j) { + // put pivot back to the middle + context.swap(j, a); + pivot.* = j; + return true; + } + + context.swap(i, j); + i += 1; + j -= 1; + + while (true) { + while (i <= j and context.lessThan(i, a)) i += 1; + while (i <= j and !context.lessThan(j, a)) j -= 1; + if (i > j) break; + + context.swap(i, j); + i += 1; + j -= 1; + } + + // TODO: Enable the BlockQuicksort optimization + + context.swap(j, a); + pivot.* = j; + return false; +} + +/// partitions items into elements equal to `items[pivot]` +/// followed by elements greater than `items[pivot]`. +/// +/// it assumed that `items[a..b]` does not contain elements smaller than the `items[pivot]`. +fn partitionEqual(a: usize, b: usize, pivot: usize, context: anytype) usize { + // move pivot to the first place + context.swap(a, pivot); + + var i = a + 1; + var j = b - 1; + + while (true) { + while (i <= j and !context.lessThan(a, i)) i += 1; + while (i <= j and context.lessThan(a, j)) j -= 1; + if (i > j) break; + + context.swap(i, j); + i += 1; + j -= 1; + } + + return i; +} + +/// partially sorts a slice by shifting several out-of-order elements around. +/// +/// returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case. +fn partialInsertionSort(a: usize, b: usize, context: anytype) bool { + @setCold(true); + + // maximum number of adjacent out-of-order pairs that will get shifted + const max_steps = 5; + // if the slice is shorter than this, don't shift any elements + const shortest_shifting = 50; + + var i = a + 1; + for (0..max_steps) |_| { + // find the next pair of adjacent out-of-order elements. + while (i < b and !context.lessThan(i, i - 1)) i += 1; + + // are we done? + if (i == b) return true; + + // don't shift elements on short arrays, that has a performance cost. + if (b - a < shortest_shifting) return false; + + // swap the found pair of elements. This puts them in correct order. + context.swap(i, i - 1); + + // shift the smaller element to the left. + if (i - a >= 2) { + var j = i - 1; + while (j >= 1) : (j -= 1) { + if (!context.lessThan(j, j - 1)) break; + context.swap(j, j - 1); + } + } + + // shift the greater element to the right. + if (b - i >= 2) { + var j = i + 1; + while (j < b) : (j += 1) { + if (!context.lessThan(j, j - 1)) break; + context.swap(j, j - 1); + } + } + } + + return false; +} + +fn breakPatterns(a: usize, b: usize, context: anytype) void { + @setCold(true); + + const len = b - a; + if (len < 8) return; + + var rand = @intCast(u64, len); + const modulus = math.ceilPowerOfTwoAssert(u64, len); + + var i = a + (len / 4) * 2 - 1; + while (i <= a + (len / 4) * 2 + 1) : (i += 1) { + // xorshift64 + rand ^= rand << 13; + rand ^= rand >> 7; + rand ^= rand << 17; + + var other = @intCast(usize, rand & (modulus - 1)); + if (other >= len) other -= len; + context.swap(i, a + other); + } +} + +/// choses a pivot in `items[a..b]`. +/// swaps likely_sorted when `items[a..b]` seems to be already sorted. +fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { + // minimum length for using the Tukey's ninther method + const shortest_ninther = 50; + // max_swaps is the maximum number of swaps allowed in this function + const max_swaps = 4 * 3; + + var len = b - a; + var i = a + len / 4 * 1; + var j = a + len / 4 * 2; + var k = a + len / 4 * 3; + var swaps: usize = 0; + + if (len >= 8) { + if (len >= shortest_ninther) { + // find medians in the neighborhoods of `i`, `j` and `k` + i = sort3(i - 1, i, i + 1, &swaps, context); + j = sort3(j - 1, j, j + 1, &swaps, context); + k = sort3(k - 1, k, k + 1, &swaps, context); + } + + // find the median among `i`, `j` and `k` + j = sort3(i, j, k, &swaps, context); + } + + pivot.* = j; + return switch (swaps) { + 0 => .increasing, + max_swaps => .decreasing, + else => .unknown, + }; +} + +fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) usize { + if (context.lessThan(b, a)) { + swaps.* += 1; + context.swap(b, a); + } + + if (context.lessThan(c, b)) { + swaps.* += 1; + context.swap(c, b); + } + + if (context.lessThan(b, a)) { + swaps.* += 1; + context.swap(b, a); + } + + return b; +} + +fn reverseRange(a: usize, b: usize, context: anytype) void { + var i = a; + var j = b - 1; + while (i < j) { + context.swap(i, j); + i += 1; + j -= 1; + } +} -- cgit v1.2.3 From 700ea694b293565ececb7571e4d9613d2c143ca6 Mon Sep 17 00:00:00 2001 From: Niles Salter Date: Tue, 13 Jun 2023 14:55:58 -0600 Subject: Fix pdqSort+heapSort for ranges besides 0..len (#15982) --- lib/std/sort.zig | 76 +++++++++++++++++++++++++++++++++++++++++++--------- lib/std/sort/pdq.zig | 18 ++++++------- 2 files changed, 71 insertions(+), 23 deletions(-) (limited to 'lib/std/sort') diff --git a/lib/std/sort.zig b/lib/std/sort.zig index bf2bf40f89..813bad0741 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -74,29 +74,29 @@ pub fn heap( /// Sorts in ascending order with respect to the given `lessThan` function. pub fn heapContext(a: usize, b: usize, context: anytype) void { // build the heap in linear time. - var i = b / 2; - while (i > a) : (i -= 1) { - siftDown(i - 1, b, context); + var i = a + (b - a) / 2; + while (i > a) { + i -= 1; + siftDown(a, i, b, context); } // pop maximal elements from the heap. i = b; - while (i > a) : (i -= 1) { - context.swap(a, i - 1); - siftDown(a, i - 1, context); + while (i > a) { + i -= 1; + context.swap(a, i); + siftDown(a, a, i, context); } } -fn siftDown(root: usize, n: usize, context: anytype) void { +fn siftDown(a: usize, root: usize, n: usize, context: anytype) void { var node = root; while (true) { - var child = 2 * node + 1; + var child = a + 2 * (node - a) + 1; if (child >= n) break; // choose the greater child. - if (child + 1 < n and context.lessThan(child, child + 1)) { - child += 1; - } + child += @boolToInt(child + 1 < n and context.lessThan(child, child + 1)); // stop if the invariant holds at `node`. if (!context.lessThan(node, child)) break; @@ -138,6 +138,13 @@ const sort_funcs = &[_]fn (comptime type, anytype, anytype, comptime anytype) vo heap, }; +const context_sort_funcs = &[_]fn (usize, usize, anytype) void{ + // blockContext, + pdqContext, + insertionContext, + heapContext, +}; + const IdAndValue = struct { id: usize, value: i32, @@ -248,11 +255,15 @@ test "sort" { &[_]i32{ 2, 1, 3 }, &[_]i32{ 1, 2, 3 }, }, + &[_][]const i32{ + &[_]i32{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 55, 32, 39, 58, 21, 88, 43, 22, 59 }, + &[_]i32{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 21, 22, 32, 39, 43, 55, 58, 59, 88 }, + }, }; inline for (sort_funcs) |sortFn| { for (u8cases) |case| { - var buf: [8]u8 = undefined; + var buf: [20]u8 = undefined; const slice = buf[0..case[0].len]; @memcpy(slice, case[0]); sortFn(u8, slice, {}, asc_u8); @@ -260,7 +271,7 @@ test "sort" { } for (i32cases) |case| { - var buf: [8]i32 = undefined; + var buf: [20]i32 = undefined; const slice = buf[0..case[0].len]; @memcpy(slice, case[0]); sortFn(i32, slice, {}, asc_i32); @@ -308,6 +319,45 @@ test "sort descending" { } } +test "sort with context in the middle of a slice" { + const Context = struct { + items: []i32, + + pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { + return ctx.items[a] < ctx.items[b]; + } + + pub fn swap(ctx: @This(), a: usize, b: usize) void { + return mem.swap(i32, &ctx.items[a], &ctx.items[b]); + } + }; + + const i32cases = [_][]const []const i32{ + &[_][]const i32{ + &[_]i32{ 0, 1, 8, 3, 6, 5, 4, 2, 9, 7, 10, 55, 32, 39, 58, 21, 88, 43, 22, 59 }, + &[_]i32{ 50, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 21, 22, 32, 39, 43, 55, 58, 59, 88 }, + }, + }; + + const ranges = [_]struct { start: usize, end: usize }{ + .{ .start = 10, .end = 20 }, + .{ .start = 1, .end = 11 }, + .{ .start = 3, .end = 7 }, + }; + + inline for (context_sort_funcs) |sortFn| { + for (i32cases) |case| { + for (ranges) |range| { + var buf: [20]i32 = undefined; + const slice = buf[0..case[0].len]; + @memcpy(slice, case[0]); + sortFn(range.start, range.end, Context{ .items = slice }); + try testing.expectEqualSlices(i32, slice[range.start..range.end], case[1][range.start..range.end]); + } + } + } +} + test "sort fuzz testing" { var prng = std.rand.DefaultPrng.init(0x12345678); const random = prng.random(); diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig index e7042b0c76..23678a79c6 100644 --- a/lib/std/sort/pdq.zig +++ b/lib/std/sort/pdq.zig @@ -43,7 +43,7 @@ pub fn pdqContext(a: usize, b: usize, context: anytype) void { // slices of up to this length get sorted using insertion sort. const max_insertion = 24; // number of allowed imbalanced partitions before switching to heap sort. - const max_limit = std.math.floorPowerOfTwo(usize, b) + 1; + const max_limit = std.math.floorPowerOfTwo(usize, b - a) + 1; // set upper bound on stack memory usage. const Range = struct { a: usize, b: usize, limit: usize }; @@ -100,7 +100,7 @@ pub fn pdqContext(a: usize, b: usize, context: anytype) void { // if the chosen pivot is equal to the predecessor, then it's the smallest element in the // slice. Partition the slice into elements equal to and elements greater than the pivot. // This case is usually hit when the slice contains many duplicate elements. - if (range.a > 0 and !context.lessThan(range.a - 1, pivot)) { + if (range.a > a and !context.lessThan(range.a - 1, pivot)) { range.a = partitionEqual(range.a, range.b, pivot, context); continue; } @@ -284,13 +284,13 @@ fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { if (len >= 8) { if (len >= shortest_ninther) { // find medians in the neighborhoods of `i`, `j` and `k` - i = sort3(i - 1, i, i + 1, &swaps, context); - j = sort3(j - 1, j, j + 1, &swaps, context); - k = sort3(k - 1, k, k + 1, &swaps, context); + sort3(i - 1, i, i + 1, &swaps, context); + sort3(j - 1, j, j + 1, &swaps, context); + sort3(k - 1, k, k + 1, &swaps, context); } - // find the median among `i`, `j` and `k` - j = sort3(i, j, k, &swaps, context); + // find the median among `i`, `j` and `k` and stores it in `j` + sort3(i, j, k, &swaps, context); } pivot.* = j; @@ -301,7 +301,7 @@ fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { }; } -fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) usize { +fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) void { if (context.lessThan(b, a)) { swaps.* += 1; context.swap(b, a); @@ -316,8 +316,6 @@ fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) usize { swaps.* += 1; context.swap(b, a); } - - return b; } fn reverseRange(a: usize, b: usize, context: anytype) void { -- cgit v1.2.3 From 259315606827620daaabf82b479e59ee710097cd Mon Sep 17 00:00:00 2001 From: r00ster91 Date: Fri, 2 Jun 2023 22:02:45 -0400 Subject: migration: std.math.{min, min3, max, max3} -> `@min` & `@max` --- doc/docgen.zig | 2 +- lib/compiler_rt/divc3.zig | 3 +- lib/compiler_rt/emutls.zig | 4 +- lib/std/Build/Cache/DepTokenizer.zig | 2 +- lib/std/Thread.zig | 6 +- lib/std/Uri.zig | 4 +- lib/std/array_hash_map.zig | 6 +- lib/std/ascii.zig | 2 +- lib/std/compress/lzma/decode.zig | 2 +- lib/std/crypto/blake3.zig | 8 +-- lib/std/crypto/ff.zig | 2 +- lib/std/crypto/ghash_polyval.zig | 2 +- lib/std/crypto/keccak_p.zig | 4 +- lib/std/crypto/poly1305.zig | 2 +- lib/std/crypto/salsa20.zig | 2 +- lib/std/crypto/scrypt.zig | 4 +- lib/std/crypto/sha3.zig | 2 +- lib/std/crypto/siphash.zig | 2 +- lib/std/debug.zig | 4 +- lib/std/dynamic_library.zig | 3 +- lib/std/event/loop.zig | 2 +- lib/std/fifo.zig | 2 +- lib/std/fmt.zig | 18 +++--- lib/std/hash/wyhash.zig | 2 +- lib/std/hash_map.zig | 6 +- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/memory_pool.zig | 4 +- lib/std/http/protocol.zig | 2 +- lib/std/io/fixed_buffer_stream.zig | 4 +- lib/std/io/limited_reader.zig | 2 +- lib/std/io/reader.zig | 2 +- lib/std/io/writer.zig | 2 +- lib/std/math.zig | 103 +++----------------------------- lib/std/math/big/int.zig | 96 ++++++++++++++--------------- lib/std/math/ldexp.zig | 2 +- lib/std/mem.zig | 12 ++-- lib/std/net.zig | 8 +-- lib/std/os/linux.zig | 4 +- lib/std/os/linux/io_uring.zig | 4 +- lib/std/os/windows.zig | 4 +- lib/std/pdb.zig | 2 +- lib/std/rand.zig | 2 +- lib/std/sort/block.zig | 10 ++-- lib/std/zig/render.zig | 4 +- lib/std/zig/system/NativeTargetInfo.zig | 6 +- src/Sema.zig | 10 ++-- src/TypedValue.zig | 10 ++-- src/arch/x86_64/CodeGen.zig | 4 +- src/link/Elf.zig | 2 +- src/link/MachO/CodeSignature.zig | 6 +- src/link/MachO/Object.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/Wasm/Object.zig | 2 +- src/main.zig | 2 +- src/translate_c.zig | 2 +- src/translate_c/ast.zig | 14 ++--- src/type.zig | 2 +- src/value.zig | 8 +-- 58 files changed, 173 insertions(+), 264 deletions(-) (limited to 'lib/std/sort') diff --git a/doc/docgen.zig b/doc/docgen.zig index bdbde6f5d2..4a9e33fbdd 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -276,7 +276,7 @@ fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, arg } } { - const caret_count = std.math.min(token.end, loc.line_end) - token.start; + const caret_count = @min(token.end, loc.line_end) - token.start; var i: usize = 0; while (i < caret_count) : (i += 1) { print("~", .{}); diff --git a/lib/compiler_rt/divc3.zig b/lib/compiler_rt/divc3.zig index 4e4dba2856..c4241c1483 100644 --- a/lib/compiler_rt/divc3.zig +++ b/lib/compiler_rt/divc3.zig @@ -3,7 +3,6 @@ const isNan = std.math.isNan; const isInf = std.math.isInf; const scalbn = std.math.scalbn; const ilogb = std.math.ilogb; -const max = std.math.max; const fabs = std.math.fabs; const maxInt = std.math.maxInt; const minInt = std.math.minInt; @@ -17,7 +16,7 @@ pub inline fn divc3(comptime T: type, a: T, b: T, c_in: T, d_in: T) Complex(T) { var d = d_in; // logbw used to prevent under/over-flow - const logbw = ilogb(max(fabs(c), fabs(d))); + const logbw = ilogb(@max(fabs(c), fabs(d))); const logbw_finite = logbw != maxInt(i32) and logbw != minInt(i32); const ilogbw = if (logbw_finite) b: { c = scalbn(c, -logbw); diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig index 05a2de97a8..47c71efadd 100644 --- a/lib/compiler_rt/emutls.zig +++ b/lib/compiler_rt/emutls.zig @@ -49,7 +49,7 @@ const simple_allocator = struct { /// Allocate a memory chunk. pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 { - const minimal_alignment = std.math.max(@alignOf(usize), alignment); + const minimal_alignment = @max(@alignOf(usize), alignment); var aligned_ptr: ?*anyopaque = undefined; if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) { @@ -170,7 +170,7 @@ const current_thread_storage = struct { // make it to contains at least 16 objects (to avoid too much // reallocation at startup). - const size = std.math.max(16, index); + const size = @max(16, index); // create a new array and store it. var array: *ObjectArray = ObjectArray.init(size); diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig index 1a4e2ddb74..0e5224edc0 100644 --- a/lib/std/Build/Cache/DepTokenizer.zig +++ b/lib/std/Build/Cache/DepTokenizer.zig @@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.writeAll(":"); try out.writeAll(" "); - var end1 = std.math.min(offset + n, offset + 8); + var end1 = @min(offset + n, offset + 8); for (bytes[offset..end1]) |b| { try out.writeAll(" "); try printHexValue(out, b, 2); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index ed6a9383e3..76650a9072 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -541,7 +541,7 @@ const WindowsThreadImpl = struct { // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32); - stack_size = std.math.max(64 * 1024, stack_size); + stack_size = @max(64 * 1024, stack_size); instance.thread.thread_handle = windows.kernel32.CreateThread( null, @@ -690,7 +690,7 @@ const PosixThreadImpl = struct { defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS); // Use the same set of parameters used by the libc-less impl. - const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN); + const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN); assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); @@ -930,7 +930,7 @@ const LinuxThreadImpl = struct { var bytes: usize = page_size; guard_offset = bytes; - bytes += std.math.max(page_size, config.stack_size); + bytes += @max(page_size, config.stack_size); bytes = std.mem.alignForward(bytes, page_size); stack_offset = bytes; diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig index 7a9755bd28..198ab461ae 100644 --- a/lib/std/Uri.zig +++ b/lib/std/Uri.zig @@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri { if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= end_of_host) { // if not part of the V6 address field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } } else if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= start_of_host) { // if not part of the userinfo field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 55b9aac6e4..b46b5c12f0 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged( /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: Self) usize { const entry_cap = self.entries.capacity; - const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const header = self.index_header orelse return @min(linear_scan_max, entry_cap); const indexes_cap = header.capacity(); - return math.min(entry_cap, indexes_cap); + return @min(entry_cap, indexes_cap); } /// Clobbers any existing data. To detect if a put would clobber @@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type { /// length * the size of an Index(u32). The index is 8 bytes (3 bits repr) /// and max_usize + 1 is not representable, so we need to subtract out 4 bits. const max_representable_index_len = @bitSizeOf(usize) - 4; -const max_bit_index = math.min(32, max_representable_index_len); +const max_bit_index = @min(32, max_representable_index_len); const min_bit_index = 5; const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 941f398f20..e47ef4db65 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -422,7 +422,7 @@ test "indexOfIgnoreCase" { /// Returns the lexicographical order of two slices. O(n). pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order { - const n = std.math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) { diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index dc220d8e87..f539abf8b1 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -59,7 +59,7 @@ pub const Params = struct { const pb = @intCast(u3, props); const dict_size_provided = try reader.readIntLittle(u32); - const dict_size = math.max(0x1000, dict_size_provided); + const dict_size = @max(0x1000, dict_size_provided); const unpacked_size = switch (options.unpacked_size) { .read_from_header => blk: { diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index fb580fda13..7ad1511e79 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -20,7 +20,7 @@ const ChunkIterator = struct { } fn next(self: *ChunkIterator) ?[]u8 { - const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)]; + const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)]; self.slice = self.slice[next_chunk.len..]; return if (next_chunk.len > 0) next_chunk else null; } @@ -283,7 +283,7 @@ const ChunkState = struct { fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 { const want = BLOCK_LEN - self.block_len; - const take = math.min(want, input.len); + const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); self.block_len += @truncate(u8, take); return input[take..]; @@ -450,7 +450,7 @@ pub const Blake3 = struct { // Compress input bytes into the current chunk state. const want = CHUNK_LEN - self.chunk_state.len(); - const take = math.min(want, input.len); + const take = @min(want, input.len); self.chunk_state.update(input[0..take]); input = input[take..]; } @@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Write repeating input pattern to hasher var input_counter = input_len; while (input_counter > 0) { - const update_len = math.min(input_counter, input_pattern.len); + const update_len = @min(input_counter, input_pattern.len); hasher.update(input_pattern[0..update_len]); input_counter -= update_len; } diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 84753ddefb..37e3d1c1b3 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var out = self.zero; var i = x.limbs_count() - 1; if (self.limbs_count() >= 2) { - const start = math.min(i, self.limbs_count() - 2); + const start = @min(i, self.limbs_count() - 2); var j = start; while (true) : (j -= 1) { out.v.limbs.set(j, x.limbs.get(i)); diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 46645d710f..2fbff25f72 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { var mb = m; if (st.leftover > 0) { - const want = math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index 9226f2f6d4..ddc9b1b847 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn absorb(self: *Self, bytes_: []const u8) void { var bytes = bytes_; if (self.offset > 0) { - const left = math.min(rate - self.offset, bytes.len); + const left = @min(rate - self.offset, bytes.len); @memcpy(self.buf[self.offset..][0..left], bytes[0..left]); self.offset += left; if (self.offset == rate) { @@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn squeeze(self: *Self, out: []u8) void { var i: usize = 0; while (i < out.len) : (i += rate) { - const left = math.min(rate, out.len - i); + const left = @min(rate, out.len - i); self.st.extractBytes(out[i..][0..left]); self.st.permuteR(rounds); } diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index a2873f1145..51e1c2ab24 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -112,7 +112,7 @@ pub const Poly1305 = struct { // handle leftover if (st.leftover > 0) { - const want = std.math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index 7f57e6cecb..c8a639ad0b 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct { debug.assert(c.len == m.len); const extended = extend(rounds, k, npub); var block0 = [_]u8{0} ** 64; - const mlen0 = math.min(32, c.len); + const mlen0 = @min(32, c.len); @memcpy(block0[32..][0..mlen0], c[0..mlen0]); Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce); var mac = Poly1305.init(block0[0..32]); diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index b8e8ef55e2..97dd9b95d0 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -143,7 +143,7 @@ pub const Params = struct { /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self { - const ops = math.max(32768, ops_limit); + const ops = @max(32768, ops_limit); const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); @@ -151,7 +151,7 @@ pub const Params = struct { } else { const max_n = mem_limit / (@intCast(usize, r) * 128); const ln = @intCast(u6, math.log2(max_n)); - const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); + const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; } } diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig index 23f9e65534..0226490881 100644 --- a/lib/std/crypto/sha3.zig +++ b/lib/std/crypto/sha3.zig @@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds: if (self.offset > 0) { const left = self.buf.len - self.offset; if (left > 0) { - const n = math.min(left, out.len); + const n = @min(left, out.len); @memcpy(out[0..n], self.buf[self.offset..][0..n]); out = out[n..]; self.offset += n; diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 37d219f868..70f4f2fd53 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -433,7 +433,7 @@ test "iterative non-divisible update" { var siphash = Siphash.init(key); var i: usize = 0; while (i < end) : (i += 7) { - siphash.update(buf[i..std.math.min(i + 7, end)]); + siphash.update(buf[i..@min(i + 7, end)]); } const iterative_hash = siphash.finalInt(); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ea0d467085..3015c30bfb 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT stack_trace.index = 0; return; }; - const end_index = math.min(first_index + addrs.len, n); + const end_index = @min(first_index + addrs.len, n); const slice = addr_buf[first_index..end_index]; // We use a for loop here because slice and addrs may alias. for (slice, 0..) |addr, i| { @@ -380,7 +380,7 @@ pub fn writeStackTrace( _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; var frame_index: usize = 0; - var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len); + var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); while (frames_left != 0) : ({ frames_left -= 1; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 59ad7429cf..94da2f4d6d 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -8,7 +8,6 @@ const elf = std.elf; const windows = std.os.windows; const system = std.os.system; const maxInt = std.math.maxInt; -const max = std.math.max; pub const DynLib = switch (builtin.os.tag) { .linux => if (builtin.link_libc) DlDynlib else ElfDynLib, @@ -152,7 +151,7 @@ pub const ElfDynLib = struct { }) { const ph = @intToPtr(*elf.Phdr, ph_addr); switch (ph.p_type) { - elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz), + elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset), else => {}, } diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index c8d41d3eb0..bc0162423b 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -179,7 +179,7 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; - const resume_node_count = std.math.max(extra_thread_count, 1); + const resume_node_count = @max(extra_thread_count, 1); self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index bc88e61d76..535376d38f 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -150,7 +150,7 @@ pub fn LinearFifo( start -= self.buf.len; return self.buf[start .. start + (self.count - offset)]; } else { - const end = math.min(self.head + self.count, self.buf.len); + const end = @min(self.head + self.count, self.buf.len); return self.buf[start..end]; } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 6896d0a7a0..c9d8e611ca 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type { const log2 = math.log2(value); const magnitude = switch (base) { - 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1), - 1024 => math.min(log2 / 10, mags_iec.len - 1), + 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1), + 1024 => @min(log2 / 10, mags_iec.len - 1), else => unreachable, }; const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude)); @@ -1103,7 +1103,7 @@ pub fn formatFloatScientific( var printed: usize = 0; if (float_decimal.digits.len > 1) { - const num_digits = math.min(float_decimal.digits.len, precision + 1); + const num_digits = @min(float_decimal.digits.len, precision + 1); try writer.writeAll(float_decimal.digits[1..num_digits]); printed += num_digits - 1; } @@ -1116,7 +1116,7 @@ pub fn formatFloatScientific( try writer.writeAll(float_decimal.digits[0..1]); try writer.writeAll("."); if (float_decimal.digits.len > 1) { - const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; + const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; try writer.writeAll(float_decimal.digits[1..num_digits]); } else { @@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { const zero_digit_count = @intCast(usize, -float_decimal.exp); - const zeros_to_print = math.min(zero_digit_count, precision); + const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; while (i < zeros_to_print) : (i += 1) { @@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1410,12 +1410,12 @@ pub fn formatInt( // The type must have the same size as `base` or be wider in order for the // division to work - const min_int_bits = comptime math.max(value_info.bits, 8); + const min_int_bits = comptime @max(value_info.bits, 8); const MinInt = std.meta.Int(.unsigned, min_int_bits); const abs_value = math.absCast(int_value); // The worst case in terms of space needed is base 2, plus 1 for the sign - var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined; + var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined; var a: MinInt = abs_value; var index: usize = buf.len; diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3426bca9f4..c36c3fe87c 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -252,7 +252,7 @@ test "iterative non-divisible update" { var wy = Wyhash.init(seed); var i: usize = 0; while (i < end) : (i += 33) { - wy.update(buf[i..std.math.min(i + 33, end)]); + wy.update(buf[i..@min(i + 33, end)]); } const iterative_hash = wy.final(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 041d99606e..5b539ddaad 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged( fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void { @setCold(true); - const new_cap = std.math.max(new_capacity, minimal_capacity); + const new_cap = @max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); assert(std.math.isPowerOfTwo(new_cap)); @@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); @@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const cap = self.capacity(); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c0eeae6e61..c7e0569067 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -110,7 +110,7 @@ pub const ArenaAllocator = struct { // value. const requested_capacity = switch (mode) { .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()), + .retain_with_limit => |limit| @min(limit, self.queryCapacity()), .free_all => 0, }; if (requested_capacity == 0) { diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index ca6eb7f518..3fc7dfbfca 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type /// Size of the memory pool items. This is not necessarily the same /// as `@sizeOf(Item)` as the pool also uses the items for internal means. - pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item)); + pub const item_size = @max(@sizeOf(Node), @sizeOf(Item)); /// Alignment of the memory pool items. This is not necessarily the same /// as `@alignOf(Item)` as the pool also uses the items for internal means. - pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0); + pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0); const Node = struct { next: ?*@This(), diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index b001b3cddf..b5c2cdfa0c 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -82,7 +82,7 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { - const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8); + const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); const len = @intCast(u32, bytes.len); var index: u32 = 0; diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index c170dd1f74..27b978744c 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } pub fn seekTo(self: *Self, pos: u64) SeekError!void { - self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len; + self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len; } pub fn seekBy(self: *Self, amt: i64) SeekError!void { @@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } else { const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize); const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize); - self.pos = std.math.min(self.buffer.len, new_pos); + self.pos = @min(self.buffer.len, new_pos); } } diff --git a/lib/std/io/limited_reader.zig b/lib/std/io/limited_reader.zig index aa00af0d09..09d76007da 100644 --- a/lib/std/io/limited_reader.zig +++ b/lib/std/io/limited_reader.zig @@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type { const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { - const max_read = std.math.min(self.bytes_left, dest.len); + const max_read = @min(self.bytes_left, dest.len); const n = try self.inner_reader.read(dest[0..max_read]); self.bytes_left -= n; return n; diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 344515d07b..abdca56d3c 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -325,7 +325,7 @@ pub fn Reader( var remaining = num_bytes; while (remaining > 0) { - const amt = std.math.min(remaining, options.buf_size); + const amt = @min(remaining, options.buf_size); try self.readNoEof(buf[0..amt]); remaining -= amt; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index cfc76de452..d0b7fa11ee 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -39,7 +39,7 @@ pub fn Writer( var remaining: usize = n; while (remaining > 0) { - const to_write = std.math.min(remaining, bytes.len); + const to_write = @min(remaining, bytes.len); try self.writeAll(bytes[0..to_write]); remaining -= to_write; } diff --git a/lib/std/math.zig b/lib/std/math.zig index 46a7e40a37..e60e964747 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool { if (isNan(x) or isNan(y)) return false; - return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance; + return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance; } test "approxEqAbs and approxEqRel" { @@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type { return @TypeOf(@as(A, 0) + @as(B, 0)); } -/// Returns the smaller number. When one parameter's type's full range -/// fits in the other, the return type is the smaller type. -pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { - const Result = Min(@TypeOf(x), @TypeOf(y)); - if (x < y) { - // TODO Zig should allow this as an implicit cast because x is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, x), - else => return x, - } - } else { - // TODO Zig should allow this as an implicit cast because y is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, y), - else => return y, - } - } -} - -test "min" { - try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1); - { - var a: u16 = 999; - var b: u32 = 10; - var result = min(a, b); - try testing.expect(@TypeOf(result) == u16); - try testing.expect(result == 10); - } - { - var a: f64 = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f64); - try testing.expect(result == 10.34); - } - { - var a: i8 = -127; - var b: i16 = -200; - var result = min(a, b); - try testing.expect(@TypeOf(result) == i16); - try testing.expect(result == -200); - } - { - const a = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f32); - try testing.expect(result == 10.34); - } -} - -/// Finds the minimum of three numbers. -pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return min(x, min(y, z)); -} - -test "min3" { - try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0); -} - -/// Returns the maximum of two numbers. Return type is the one with the -/// larger range. -pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { - return if (x > y) x else y; -} - -test "max" { - try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); - try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2); -} - -/// Finds the maximum of three numbers. -pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return max(x, max(y, z)); -} - -test "max3" { - try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2); -} +pub const min = @compileError("deprecated; use @min instead"); +pub const max = @compileError("deprecated; use @max instead"); +pub const min3 = @compileError("deprecated; use @min instead"); +pub const max3 = @compileError("deprecated; use @max instead"); /// Limit val to the inclusive range [lower, upper]. pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); - return max(lower, min(val, upper)); + return @max(lower, @min(val, upper)); } test "clamp" { // Within range @@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t return u0; } const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned; - const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement + const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement const base = log2(largest_positive_integer); const upper = (1 << base) - 1; var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1; diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index ec79d843da..487812e1de 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize { } pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize { - return aliases * math.max(a_len, b_len); + return aliases * @max(a_len, b_len); } pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize { const req_limbs = calcTwosCompLimbCount(bit_count); - return aliases * math.min(req_limbs, math.max(a_len, b_len)); + return aliases * @min(req_limbs, @max(a_len, b_len)); } pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { @@ -396,7 +396,7 @@ pub const Mutable = struct { /// scalar is a primitive integer type. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. + /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`. pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { // Normally we could just determine the number of limbs needed with calcLimbLen, // but that is not comptime-known when scalar is not a comptime_int. Instead, we @@ -414,11 +414,11 @@ pub const Mutable = struct { return add(r, a, operand); } - /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn addCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -452,12 +452,12 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn add(r: *Mutable, a: Const, b: Const) void { if (r.addCarry(a, b)) { // Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len, // so we need to set the length here. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); // `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here. // Note, the fact that it normalized means that the intermediary limbs are zero here. r.len = msl + 1; @@ -477,12 +477,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; var carry_truncated = false; @@ -492,7 +492,7 @@ pub const Mutable = struct { // truncate anyway. // - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled. // Note: after this we still might need to wrap. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -522,12 +522,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; if (r.addCarry(x, y)) { @@ -535,7 +535,7 @@ pub const Mutable = struct { // - We overflowed req_limbs, in which case we need to saturate. // - a and b had less elements than req_limbs, and those were overflowed. // Note: In this case, might _also_ need to saturate. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -550,11 +550,11 @@ pub const Mutable = struct { r.saturate(r.toConst(), signedness, bit_count); } - /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn subCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -607,7 +607,7 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. pub fn sub(r: *Mutable, a: Const, b: Const) void { r.add(a, b.negate()); } @@ -714,7 +714,7 @@ pub const Mutable = struct { const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { const start = buf_index; - const a_len = math.min(req_limbs, a.limbs.len); + const a_len = @min(req_limbs, a.limbs.len); @memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]); buf_index += a_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -722,7 +722,7 @@ pub const Mutable = struct { const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: { const start = buf_index; - const b_len = math.min(req_limbs, b.limbs.len); + const b_len = @min(req_limbs, b.limbs.len); @memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]); buf_index += b_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -755,13 +755,13 @@ pub const Mutable = struct { const req_limbs = calcTwosCompLimbCount(bit_count); // We can ignore the upper bits here, those results will be discarded anyway. - const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)]; - const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)]; + const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)]; + const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)]; @memset(rma.limbs[0..req_limbs], 0); llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs); - rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len)); + rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len)); rma.positive = (a.positive == b.positive); rma.truncate(rma.toConst(), signedness, bit_count); } @@ -1211,7 +1211,7 @@ pub const Mutable = struct { /// /// a and b are zero-extended to the longer of a or b. /// - /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`. pub fn bitOr(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedor does not support zero. if (a.eqZero()) { @@ -1235,8 +1235,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. - /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`. - /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`. + /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitAnd(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedand does not support zero. if (a.eqZero()) { @@ -1260,8 +1260,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the - /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative - /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative + /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitXor(r: *Mutable, a: Const, b: Const) void { // Trivial cases, because llsignedxor does not support negative zero. if (a.eqZero()) { @@ -1284,7 +1284,7 @@ pub const Mutable = struct { /// rma may alias x or y. /// x and y may alias each other. /// Asserts that `rma` has enough limbs to store the result. Upper bound is - /// `math.min(x.limbs.len, y.limbs.len)`. + /// `@min(x.limbs.len, y.limbs.len)`. /// /// `limbs_buffer` is used for temporary storage during the operation. When this function returns, /// it will have the same length as it had when the function was called. @@ -1546,7 +1546,7 @@ pub const Mutable = struct { if (yi != 0) break i; } else unreachable; - const xy_trailing = math.min(x_trailing, y_trailing); + const xy_trailing = @min(x_trailing, y_trailing); if (y.len - xy_trailing == 1) { const divisor = y.limbs[y.len - 1]; @@ -2589,7 +2589,7 @@ pub const Managed = struct { .allocator = allocator, .metadata = 1, .limbs = block: { - const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); + const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity)); limbs[0] = 0; break :block limbs; }, @@ -2918,7 +2918,7 @@ pub const Managed = struct { /// /// Returns an error if memory could not be allocated. pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len()) + 1); + try r.ensureCapacity(@max(a.len(), b.len()) + 1); var m = r.toMutable(); m.sub(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3025,11 +3025,11 @@ pub const Managed = struct { } pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void { - try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); + try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1); } pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void { - try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1); } pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void { @@ -3123,7 +3123,7 @@ pub const Managed = struct { /// /// a and b are zero-extended to the longer of a or b. pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len())); + try r.ensureCapacity(@max(a.len(), b.len())); var m = r.toMutable(); m.bitOr(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3132,9 +3132,9 @@ pub const Managed = struct { /// r = a & b pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void { const cap = if (a.isPositive() or b.isPositive()) - math.min(a.len(), b.len()) + @min(a.len(), b.len()) else - math.max(a.len(), b.len()) + 1; + @max(a.len(), b.len()) + 1; try r.ensureCapacity(cap); var m = r.toMutable(); m.bitAnd(a.toConst(), b.toConst()); @@ -3143,7 +3143,7 @@ pub const Managed = struct { /// r = a ^ b pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void { - var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); + var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); try r.ensureCapacity(cap); var m = r.toMutable(); @@ -3156,7 +3156,7 @@ pub const Managed = struct { /// /// rma's allocator is used for temporary storage to boost multiplication performance. pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void { - try rma.ensureCapacity(math.min(x.len(), y.len())); + try rma.ensureCapacity(@min(x.len(), y.len())); var m = rma.toMutable(); var limbs_buffer = std.ArrayList(Limb).init(rma.allocator); defer limbs_buffer.deinit(); @@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba( // For a1 and b1 we only need `limbs_after_split` limbs. const a1 = blk: { var a1 = a[split..]; - a1.len = math.min(llnormalize(a1), limbs_after_split); + a1.len = @min(llnormalize(a1), limbs_after_split); break :blk a1; }; const b1 = blk: { var b1 = b[split..]; - b1.len = math.min(llnormalize(b1), limbs_after_split); + b1.len = @min(llnormalize(b1), limbs_after_split); break :blk b1; }; @@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba( // Compute p2. // Note, we don't need to compute all of p2, just enough limbs to satisfy r. - const p2_limbs = math.min(limbs_after_split, a1.len + b1.len); + const p2_limbs = @min(limbs_after_split, a1.len + b1.len); @memset(tmp[0..p2_limbs], 0); - llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]); + llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]); const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])]; // Add p2 * B to the result. @@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba( // Add p2 * B^2 to the result if required. if (limbs_after_split2 > 0) { - llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]); + llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]); } // Compute p0. @@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba( llaccum(op, r, p0); // Add p0 * B to the result. In this case, we may not need all of it. - llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]); + llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]); // Finally, compute and add p1. // From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the // following computation will be added * B. - const a0x = a0[0..std.math.min(a0.len, limbs_after_split)]; - const b0x = b0[0..std.math.min(b0.len, limbs_after_split)]; + const a0x = a0[0..@min(a0.len, limbs_after_split)]; + const b0x = b0[0..@min(b0.len, limbs_after_split)]; const j0_sign = llcmp(a0x, a1); const j1_sign = llcmp(b1, b0x); @@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool { return false; } - const split = std.math.min(y.len, acc.len); + const split = @min(y.len, acc.len); var a_lo = acc[0..split]; var a_hi = acc[split..]; @@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_ // r may alias. // a and b must not be -0. // Returns `true` when the result is positive. -// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required. -// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs. +// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required. +// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs. fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool { @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index d2fd8db9b7..8947475159 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1)); + const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index c4ad708887..2f34745a64 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void { /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { - const n = math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (math.order(lhs[i], rhs[i])) { @@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool { /// Compares two slices and returns the index of the first inequality. /// Returns null if the slices are equal. pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize { - const shortest = math.min(a.len, b.len); + const shortest = @min(a.len, b.len); if (a.ptr == b.ptr) return if (a.len == b.len) null else shortest; var index: usize = 0; @@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.min(best, item); + best = @min(best, item); } return best; } @@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.max(best, item); + best = @max(best, item); } return best; } @@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } { var minVal = slice[0]; var maxVal = slice[0]; for (slice[1..]) |item| { - minVal = math.min(minVal, item); - maxVal = math.max(maxVal, item); + minVal = @min(minVal, item); + maxVal = @max(maxVal, item); } return .{ .min = minVal, .max = maxVal }; } diff --git a/lib/std/net.zig b/lib/std/net.zig index 64b13ec544..dfd6fe4a9e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { error.InvalidCharacter => continue, }; if (mem.eql(u8, name, "ndots")) { - rc.ndots = std.math.min(value, 15); + rc.ndots = @min(value, 15); } else if (mem.eql(u8, name, "attempts")) { - rc.attempts = std.math.min(value, 10); + rc.attempts = @min(value, 10); } else if (mem.eql(u8, name, "timeout")) { - rc.timeout = std.math.min(value, 60); + rc.timeout = @min(value, 60); } } } else if (mem.eql(u8, token, "nameserver")) { @@ -1615,7 +1615,7 @@ fn resMSendRc( } // Wait for a response, or until time to retry - const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); + const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); const nevents = os.poll(&pfd, clamped_timeout) catch 0; if (nevents == 0) continue; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index ef0ec94d3b..e4d6790505 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { .getdents, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } @@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { .getdents64, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index b7467d765f..0610b214d5 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -277,7 +277,7 @@ pub const IO_Uring = struct { fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 { _ = wait_nr; const ready = self.cq_ready(); - const count = std.math.min(cqes.len, ready); + const count = @min(cqes.len, ready); var head = self.cq.head.*; var tail = head +% count; // TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop. @@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct { pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); - const size = std.math.max( + const size = @max( p.sq_off.array + p.sq_entries * @sizeOf(u32), p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe), ); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index e559e48915..389c4bea12 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { const max_read_size: ULONG = maxInt(ULONG); while (total_read < output.len) { - const to_read: ULONG = math.min(buff.len, max_read_size); + const to_read: ULONG = @min(buff.len, max_read_size); if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { return unexpectedError(kernel32.GetLastError()); @@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo return @as(usize, bytes_transferred); } else { while (true) { - const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len)); + const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len); var amt_read: DWORD = undefined; var overlapped_data: OVERLAPPED = undefined; const overlapped: ?*OVERLAPPED = if (offset) |off| blk: { diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 5bc836b08e..180507ba71 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -1049,7 +1049,7 @@ const MsfStream = struct { var size: usize = 0; var rem_buffer = buffer; while (size < buffer.len) { - const size_to_read = math.min(self.block_size - offset, rem_buffer.len); + const size_to_read = @min(self.block_size - offset, rem_buffer.len); size += try in.read(rem_buffer[0..size_to_read]); rem_buffer = buffer[size..]; offset += size_to_read; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 1e9f4051e9..f07562c911 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -410,7 +410,7 @@ pub const Random = struct { r.uintLessThan(T, sum) else if (comptime std.meta.trait.isFloat(T)) // take care that imprecision doesn't lead to a value slightly greater than sum - std.math.min(r.float(T) * sum, sum - std.math.floatEps(T)) + @min(r.float(T) * sum, sum - std.math.floatEps(T)) else @compileError("weightedIndex does not support proportions of type " ++ @typeName(T)); diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig index 6c1be9c6c2..518d148a73 100644 --- a/lib/std/sort/block.zig +++ b/lib/std/sort/block.zig @@ -590,7 +590,7 @@ pub fn block( // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well var lastA = firstA; var lastB = Range.init(0, 0); - var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + var blockB = Range.init(B.start, B.start + @min(block_size, B.length())); blockA.start += firstA.length(); indexA = buffer1.start; @@ -849,7 +849,7 @@ fn findFirstForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (lessThan(context, items[index - 1], value)) : (index += skip) { @@ -871,7 +871,7 @@ fn findFirstBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { @@ -893,7 +893,7 @@ fn findLastForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (!lessThan(context, value, items[index - 1])) : (index += skip) { @@ -915,7 +915,7 @@ fn findLastBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 83fa68567f..3930c9714a 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1960,7 +1960,7 @@ fn renderArrayInit( if (!this_contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); const expr_last_token = tree.lastToken(expr) + 1; const next_expr = section_exprs[i + 1]; @@ -1980,7 +1980,7 @@ fn renderArrayInit( if (!contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); } } } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index f17356fdcd..cddaea2295 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version { const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum); @@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile( const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; @@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile( const rpoff_file = ds.offset + rpoff_usize; const rp_max_size = ds.size - rpoff_usize; - const strtab_len = std.math.min(rp_max_size, strtab_buf.len); + const strtab_len = @min(rp_max_size, strtab_buf.len); const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len); const strtab = strtab_buf[0..strtab_read_len]; diff --git a/src/Sema.zig b/src/Sema.zig index 99ebd044f9..36fe5a6ee8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22367,9 +22367,9 @@ fn analyzeShuffle( // to it up to the length of the longer vector. This recursion terminates // in 1 call because these calls to analyzeShuffle guarantee a_len == b_len. if (a_len != b_len) { - const min_len = std.math.min(a_len, b_len); + const min_len = @min(a_len, b_len); const max_src = if (a_len > b_len) a_src else b_src; - const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); + const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { @@ -31301,7 +31301,7 @@ fn cmpNumeric( } const dest_ty = if (dest_float_type) |ft| ft else blk: { - const max_bits = std.math.max(lhs_bits, rhs_bits); + const max_bits = @max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try mod.intType(signedness, casted_bits); @@ -35828,7 +35828,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); @@ -35918,7 +35918,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 9d3fb67d1f..93454710dc 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -111,7 +111,7 @@ pub fn print( .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); try print(elem_tv, writer, level - 1, mod); @@ -130,7 +130,7 @@ pub fn print( const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -149,7 +149,7 @@ pub fn print( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -455,7 +455,7 @@ fn printAggregate( const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -471,7 +471,7 @@ fn printAggregate( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a1b57516ee..6d98ecce4f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2907,7 +2907,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const dst_info = dst_ty.intInfo(mod); const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, - .mul, .mulwrap => math.max3( + .mul, .mulwrap => @max( self.activeIntBits(bin_op.lhs), self.activeIntBits(bin_op.rhs), dst_info.bits / 2, @@ -3349,7 +3349,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 409eca6e7a..0863a22fac 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2326,7 +2326,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme self.debug_aranges_section_dirty = true; } } - shdr.sh_addralign = math.max(shdr.sh_addralign, alignment); + shdr.sh_addralign = @max(shdr.sh_addralign, alignment); // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 59b3e50b07..4709560ba7 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -99,7 +99,7 @@ const CodeDirectory = struct { fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void { assert(index > 0); - self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index); + self.inner.nSpecialSlots = @max(self.inner.nSpecialSlots, index); self.special_slots[index - 1] = hash; } @@ -426,11 +426,11 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var n_special_slots: u32 = 0; if (self.requirements) |req| { ssize += @sizeOf(macho.BlobIndex) + req.size(); - n_special_slots = std.math.max(n_special_slots, req.slotType()); + n_special_slots = @max(n_special_slots, req.slotType()); } if (self.entitlements) |ent| { ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size; - n_special_slots = std.math.max(n_special_slots, ent.slotType()); + n_special_slots = @max(n_special_slots, ent.slotType()); } if (self.signature) |sig| { ssize += @sizeOf(macho.BlobIndex) + sig.size(); diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index b218fdbd2d..105a806075 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -530,7 +530,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { sect.addr + sect.size - addr; const atom_align = if (addr > 0) - math.min(@ctz(addr), sect.@"align") + @min(@ctz(addr), sect.@"align") else sect.@"align"; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fdac7dfa63..5126033995 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2027,7 +2027,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }; const segment: *Segment = &wasm.segments.items[final_index]; - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); try wasm.appendAtomAtIndex(final_index, atom_index); } diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index 363648971a..33f54dece5 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -979,7 +979,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index]; if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); } try wasm_bin.appendAtomAtIndex(final_index, atom_index); diff --git a/src/main.zig b/src/main.zig index 5d666840c0..aedca80d26 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5391,7 +5391,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void { // setrlimit() now returns with errno set to EINVAL in places that historically succeeded. // It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE. // Use "rlim_cur = min(OPEN_MAX, rlim_max)". - lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max); + lim.max = @min(std.os.darwin.OPEN_MAX, lim.max); } if (lim.cur == lim.max) return; diff --git a/src/translate_c.zig b/src/translate_c.zig index 8cc2d1856c..67176ff74b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2400,7 +2400,7 @@ fn transStringLiteralInitializer( if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type); - const num_inits = math.min(str_length, array_size); + const num_inits = @min(str_length, array_size); const init_node = if (num_inits > 0) blk: { if (is_narrow) { // "string literal".* or string literal"[0..num_inits].* diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 6c6bbf28bd..443c56a84a 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -1824,7 +1824,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .switch_prong => { const payload = node.castTag(.switch_prong).?.data; - var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1)); + var items = try c.gpa.alloc(NodeIndex, @max(payload.cases.len, 1)); defer c.gpa.free(items); items[0] = 0; for (payload.cases, 0..) |item, i| { @@ -1973,7 +1973,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.tuple).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2007,7 +2007,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.container_init_dot).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2046,7 +2046,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const lhs = try renderNode(c, payload.lhs); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.inits.len, 1)); defer c.gpa.free(inits); inits[0] = 0; for (payload.inits, 0..) |init, i| { @@ -2102,7 +2102,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex { const num_vars = payload.variables.len; const num_funcs = payload.functions.len; const total_members = payload.fields.len + num_vars + num_funcs; - const members = try c.gpa.alloc(NodeIndex, std.math.max(total_members, 2)); + const members = try c.gpa.alloc(NodeIndex, @max(total_members, 2)); defer c.gpa.free(members); members[0] = 0; members[1] = 0; @@ -2195,7 +2195,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex { const l_brace = try c.addToken(.l_brace, "{"); - var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1)); + var rendered = try c.gpa.alloc(NodeIndex, @max(inits.len, 1)); defer c.gpa.free(rendered); rendered[0] = 0; for (inits, 0..) |init, i| { @@ -2904,7 +2904,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex { fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) { _ = try c.addToken(.l_paren, "("); - var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1)); + var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, @max(params.len, 1)); errdefer rendered.deinit(); for (params, 0..) |param, i| { diff --git a/src/type.zig b/src/type.zig index 22523a7141..bb82a50682 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1633,7 +1633,7 @@ pub const Type = struct { const len = array_type.len + @boolToInt(array_type.sentinel != .none); if (len == 0) return 0; const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); if (elem_size == 0) return 0; const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); return (len - 1) * 8 * elem_size + elem_bit_size; diff --git a/src/value.zig b/src/value.zig index 85204e2b10..8590aa8872 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2458,7 +2458,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max( + @max( // For the saturate std.math.big.int.calcTwosCompLimbCount(info.bits), lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -2572,7 +2572,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); @@ -2638,7 +2638,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); @@ -2677,7 +2677,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); -- cgit v1.2.3