diff options
Diffstat (limited to 'lib/std')
42 files changed, 136 insertions, 226 deletions
diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig index 1a4e2ddb74..0e5224edc0 100644 --- a/lib/std/Build/Cache/DepTokenizer.zig +++ b/lib/std/Build/Cache/DepTokenizer.zig @@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.writeAll(":"); try out.writeAll(" "); - var end1 = std.math.min(offset + n, offset + 8); + var end1 = @min(offset + n, offset + 8); for (bytes[offset..end1]) |b| { try out.writeAll(" "); try printHexValue(out, b, 2); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index ed6a9383e3..76650a9072 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -541,7 +541,7 @@ const WindowsThreadImpl = struct { // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32); - stack_size = std.math.max(64 * 1024, stack_size); + stack_size = @max(64 * 1024, stack_size); instance.thread.thread_handle = windows.kernel32.CreateThread( null, @@ -690,7 +690,7 @@ const PosixThreadImpl = struct { defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS); // Use the same set of parameters used by the libc-less impl. - const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN); + const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN); assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); @@ -930,7 +930,7 @@ const LinuxThreadImpl = struct { var bytes: usize = page_size; guard_offset = bytes; - bytes += std.math.max(page_size, config.stack_size); + bytes += @max(page_size, config.stack_size); bytes = std.mem.alignForward(bytes, page_size); stack_offset = bytes; diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig index 7a9755bd28..198ab461ae 100644 --- a/lib/std/Uri.zig +++ b/lib/std/Uri.zig @@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri { if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= end_of_host) { // if not part of the V6 address field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } } else if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= start_of_host) { // if not part of the userinfo field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 55b9aac6e4..b46b5c12f0 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged( /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: Self) usize { const entry_cap = self.entries.capacity; - const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const header = self.index_header orelse return @min(linear_scan_max, entry_cap); const indexes_cap = header.capacity(); - return math.min(entry_cap, indexes_cap); + return @min(entry_cap, indexes_cap); } /// Clobbers any existing data. To detect if a put would clobber @@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type { /// length * the size of an Index(u32). The index is 8 bytes (3 bits repr) /// and max_usize + 1 is not representable, so we need to subtract out 4 bits. const max_representable_index_len = @bitSizeOf(usize) - 4; -const max_bit_index = math.min(32, max_representable_index_len); +const max_bit_index = @min(32, max_representable_index_len); const min_bit_index = 5; const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 941f398f20..e47ef4db65 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -422,7 +422,7 @@ test "indexOfIgnoreCase" { /// Returns the lexicographical order of two slices. O(n). pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order { - const n = std.math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) { diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index dc220d8e87..f539abf8b1 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -59,7 +59,7 @@ pub const Params = struct { const pb = @intCast(u3, props); const dict_size_provided = try reader.readIntLittle(u32); - const dict_size = math.max(0x1000, dict_size_provided); + const dict_size = @max(0x1000, dict_size_provided); const unpacked_size = switch (options.unpacked_size) { .read_from_header => blk: { diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index fb580fda13..7ad1511e79 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -20,7 +20,7 @@ const ChunkIterator = struct { } fn next(self: *ChunkIterator) ?[]u8 { - const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)]; + const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)]; self.slice = self.slice[next_chunk.len..]; return if (next_chunk.len > 0) next_chunk else null; } @@ -283,7 +283,7 @@ const ChunkState = struct { fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 { const want = BLOCK_LEN - self.block_len; - const take = math.min(want, input.len); + const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); self.block_len += @truncate(u8, take); return input[take..]; @@ -450,7 +450,7 @@ pub const Blake3 = struct { // Compress input bytes into the current chunk state. const want = CHUNK_LEN - self.chunk_state.len(); - const take = math.min(want, input.len); + const take = @min(want, input.len); self.chunk_state.update(input[0..take]); input = input[take..]; } @@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Write repeating input pattern to hasher var input_counter = input_len; while (input_counter > 0) { - const update_len = math.min(input_counter, input_pattern.len); + const update_len = @min(input_counter, input_pattern.len); hasher.update(input_pattern[0..update_len]); input_counter -= update_len; } diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 84753ddefb..37e3d1c1b3 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var out = self.zero; var i = x.limbs_count() - 1; if (self.limbs_count() >= 2) { - const start = math.min(i, self.limbs_count() - 2); + const start = @min(i, self.limbs_count() - 2); var j = start; while (true) : (j -= 1) { out.v.limbs.set(j, x.limbs.get(i)); diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 46645d710f..2fbff25f72 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { var mb = m; if (st.leftover > 0) { - const want = math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index 9226f2f6d4..ddc9b1b847 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn absorb(self: *Self, bytes_: []const u8) void { var bytes = bytes_; if (self.offset > 0) { - const left = math.min(rate - self.offset, bytes.len); + const left = @min(rate - self.offset, bytes.len); @memcpy(self.buf[self.offset..][0..left], bytes[0..left]); self.offset += left; if (self.offset == rate) { @@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn squeeze(self: *Self, out: []u8) void { var i: usize = 0; while (i < out.len) : (i += rate) { - const left = math.min(rate, out.len - i); + const left = @min(rate, out.len - i); self.st.extractBytes(out[i..][0..left]); self.st.permuteR(rounds); } diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index a2873f1145..51e1c2ab24 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -112,7 +112,7 @@ pub const Poly1305 = struct { // handle leftover if (st.leftover > 0) { - const want = std.math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index 7f57e6cecb..c8a639ad0b 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct { debug.assert(c.len == m.len); const extended = extend(rounds, k, npub); var block0 = [_]u8{0} ** 64; - const mlen0 = math.min(32, c.len); + const mlen0 = @min(32, c.len); @memcpy(block0[32..][0..mlen0], c[0..mlen0]); Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce); var mac = Poly1305.init(block0[0..32]); diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index b8e8ef55e2..97dd9b95d0 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -143,7 +143,7 @@ pub const Params = struct { /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self { - const ops = math.max(32768, ops_limit); + const ops = @max(32768, ops_limit); const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); @@ -151,7 +151,7 @@ pub const Params = struct { } else { const max_n = mem_limit / (@intCast(usize, r) * 128); const ln = @intCast(u6, math.log2(max_n)); - const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); + const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; } } diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig index 23f9e65534..0226490881 100644 --- a/lib/std/crypto/sha3.zig +++ b/lib/std/crypto/sha3.zig @@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds: if (self.offset > 0) { const left = self.buf.len - self.offset; if (left > 0) { - const n = math.min(left, out.len); + const n = @min(left, out.len); @memcpy(out[0..n], self.buf[self.offset..][0..n]); out = out[n..]; self.offset += n; diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 37d219f868..70f4f2fd53 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -433,7 +433,7 @@ test "iterative non-divisible update" { var siphash = Siphash.init(key); var i: usize = 0; while (i < end) : (i += 7) { - siphash.update(buf[i..std.math.min(i + 7, end)]); + siphash.update(buf[i..@min(i + 7, end)]); } const iterative_hash = siphash.finalInt(); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ea0d467085..3015c30bfb 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT stack_trace.index = 0; return; }; - const end_index = math.min(first_index + addrs.len, n); + const end_index = @min(first_index + addrs.len, n); const slice = addr_buf[first_index..end_index]; // We use a for loop here because slice and addrs may alias. for (slice, 0..) |addr, i| { @@ -380,7 +380,7 @@ pub fn writeStackTrace( _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; var frame_index: usize = 0; - var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len); + var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); while (frames_left != 0) : ({ frames_left -= 1; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 59ad7429cf..94da2f4d6d 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -8,7 +8,6 @@ const elf = std.elf; const windows = std.os.windows; const system = std.os.system; const maxInt = std.math.maxInt; -const max = std.math.max; pub const DynLib = switch (builtin.os.tag) { .linux => if (builtin.link_libc) DlDynlib else ElfDynLib, @@ -152,7 +151,7 @@ pub const ElfDynLib = struct { }) { const ph = @intToPtr(*elf.Phdr, ph_addr); switch (ph.p_type) { - elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz), + elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset), else => {}, } diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index c8d41d3eb0..bc0162423b 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -179,7 +179,7 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; - const resume_node_count = std.math.max(extra_thread_count, 1); + const resume_node_count = @max(extra_thread_count, 1); self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index bc88e61d76..535376d38f 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -150,7 +150,7 @@ pub fn LinearFifo( start -= self.buf.len; return self.buf[start .. start + (self.count - offset)]; } else { - const end = math.min(self.head + self.count, self.buf.len); + const end = @min(self.head + self.count, self.buf.len); return self.buf[start..end]; } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 6896d0a7a0..c9d8e611ca 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type { const log2 = math.log2(value); const magnitude = switch (base) { - 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1), - 1024 => math.min(log2 / 10, mags_iec.len - 1), + 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1), + 1024 => @min(log2 / 10, mags_iec.len - 1), else => unreachable, }; const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude)); @@ -1103,7 +1103,7 @@ pub fn formatFloatScientific( var printed: usize = 0; if (float_decimal.digits.len > 1) { - const num_digits = math.min(float_decimal.digits.len, precision + 1); + const num_digits = @min(float_decimal.digits.len, precision + 1); try writer.writeAll(float_decimal.digits[1..num_digits]); printed += num_digits - 1; } @@ -1116,7 +1116,7 @@ pub fn formatFloatScientific( try writer.writeAll(float_decimal.digits[0..1]); try writer.writeAll("."); if (float_decimal.digits.len > 1) { - const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; + const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; try writer.writeAll(float_decimal.digits[1..num_digits]); } else { @@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { const zero_digit_count = @intCast(usize, -float_decimal.exp); - const zeros_to_print = math.min(zero_digit_count, precision); + const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; while (i < zeros_to_print) : (i += 1) { @@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1410,12 +1410,12 @@ pub fn formatInt( // The type must have the same size as `base` or be wider in order for the // division to work - const min_int_bits = comptime math.max(value_info.bits, 8); + const min_int_bits = comptime @max(value_info.bits, 8); const MinInt = std.meta.Int(.unsigned, min_int_bits); const abs_value = math.absCast(int_value); // The worst case in terms of space needed is base 2, plus 1 for the sign - var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined; + var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined; var a: MinInt = abs_value; var index: usize = buf.len; diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3426bca9f4..c36c3fe87c 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -252,7 +252,7 @@ test "iterative non-divisible update" { var wy = Wyhash.init(seed); var i: usize = 0; while (i < end) : (i += 33) { - wy.update(buf[i..std.math.min(i + 33, end)]); + wy.update(buf[i..@min(i + 33, end)]); } const iterative_hash = wy.final(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 041d99606e..5b539ddaad 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged( fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void { @setCold(true); - const new_cap = std.math.max(new_capacity, minimal_capacity); + const new_cap = @max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); assert(std.math.isPowerOfTwo(new_cap)); @@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); @@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const cap = self.capacity(); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c0eeae6e61..c7e0569067 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -110,7 +110,7 @@ pub const ArenaAllocator = struct { // value. const requested_capacity = switch (mode) { .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()), + .retain_with_limit => |limit| @min(limit, self.queryCapacity()), .free_all => 0, }; if (requested_capacity == 0) { diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index ca6eb7f518..3fc7dfbfca 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type /// Size of the memory pool items. This is not necessarily the same /// as `@sizeOf(Item)` as the pool also uses the items for internal means. - pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item)); + pub const item_size = @max(@sizeOf(Node), @sizeOf(Item)); /// Alignment of the memory pool items. This is not necessarily the same /// as `@alignOf(Item)` as the pool also uses the items for internal means. - pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0); + pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0); const Node = struct { next: ?*@This(), diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index b001b3cddf..b5c2cdfa0c 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -82,7 +82,7 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { - const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8); + const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); const len = @intCast(u32, bytes.len); var index: u32 = 0; diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index c170dd1f74..27b978744c 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } pub fn seekTo(self: *Self, pos: u64) SeekError!void { - self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len; + self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len; } pub fn seekBy(self: *Self, amt: i64) SeekError!void { @@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } else { const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize); const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize); - self.pos = std.math.min(self.buffer.len, new_pos); + self.pos = @min(self.buffer.len, new_pos); } } diff --git a/lib/std/io/limited_reader.zig b/lib/std/io/limited_reader.zig index aa00af0d09..09d76007da 100644 --- a/lib/std/io/limited_reader.zig +++ b/lib/std/io/limited_reader.zig @@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type { const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { - const max_read = std.math.min(self.bytes_left, dest.len); + const max_read = @min(self.bytes_left, dest.len); const n = try self.inner_reader.read(dest[0..max_read]); self.bytes_left -= n; return n; diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 344515d07b..abdca56d3c 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -325,7 +325,7 @@ pub fn Reader( var remaining = num_bytes; while (remaining > 0) { - const amt = std.math.min(remaining, options.buf_size); + const amt = @min(remaining, options.buf_size); try self.readNoEof(buf[0..amt]); remaining -= amt; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index cfc76de452..d0b7fa11ee 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -39,7 +39,7 @@ pub fn Writer( var remaining: usize = n; while (remaining > 0) { - const to_write = std.math.min(remaining, bytes.len); + const to_write = @min(remaining, bytes.len); try self.writeAll(bytes[0..to_write]); remaining -= to_write; } diff --git a/lib/std/math.zig b/lib/std/math.zig index 46a7e40a37..e60e964747 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool { if (isNan(x) or isNan(y)) return false; - return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance; + return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance; } test "approxEqAbs and approxEqRel" { @@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type { return @TypeOf(@as(A, 0) + @as(B, 0)); } -/// Returns the smaller number. When one parameter's type's full range -/// fits in the other, the return type is the smaller type. -pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { - const Result = Min(@TypeOf(x), @TypeOf(y)); - if (x < y) { - // TODO Zig should allow this as an implicit cast because x is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, x), - else => return x, - } - } else { - // TODO Zig should allow this as an implicit cast because y is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, y), - else => return y, - } - } -} - -test "min" { - try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1); - { - var a: u16 = 999; - var b: u32 = 10; - var result = min(a, b); - try testing.expect(@TypeOf(result) == u16); - try testing.expect(result == 10); - } - { - var a: f64 = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f64); - try testing.expect(result == 10.34); - } - { - var a: i8 = -127; - var b: i16 = -200; - var result = min(a, b); - try testing.expect(@TypeOf(result) == i16); - try testing.expect(result == -200); - } - { - const a = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f32); - try testing.expect(result == 10.34); - } -} - -/// Finds the minimum of three numbers. -pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return min(x, min(y, z)); -} - -test "min3" { - try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0); -} - -/// Returns the maximum of two numbers. Return type is the one with the -/// larger range. -pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { - return if (x > y) x else y; -} - -test "max" { - try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); - try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2); -} - -/// Finds the maximum of three numbers. -pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return max(x, max(y, z)); -} - -test "max3" { - try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2); -} +pub const min = @compileError("deprecated; use @min instead"); +pub const max = @compileError("deprecated; use @max instead"); +pub const min3 = @compileError("deprecated; use @min instead"); +pub const max3 = @compileError("deprecated; use @max instead"); /// Limit val to the inclusive range [lower, upper]. pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); - return max(lower, min(val, upper)); + return @max(lower, @min(val, upper)); } test "clamp" { // Within range @@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t return u0; } const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned; - const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement + const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement const base = log2(largest_positive_integer); const upper = (1 << base) - 1; var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1; diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index ec79d843da..487812e1de 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize { } pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize { - return aliases * math.max(a_len, b_len); + return aliases * @max(a_len, b_len); } pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize { const req_limbs = calcTwosCompLimbCount(bit_count); - return aliases * math.min(req_limbs, math.max(a_len, b_len)); + return aliases * @min(req_limbs, @max(a_len, b_len)); } pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { @@ -396,7 +396,7 @@ pub const Mutable = struct { /// scalar is a primitive integer type. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. + /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`. pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { // Normally we could just determine the number of limbs needed with calcLimbLen, // but that is not comptime-known when scalar is not a comptime_int. Instead, we @@ -414,11 +414,11 @@ pub const Mutable = struct { return add(r, a, operand); } - /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn addCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -452,12 +452,12 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn add(r: *Mutable, a: Const, b: Const) void { if (r.addCarry(a, b)) { // Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len, // so we need to set the length here. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); // `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here. // Note, the fact that it normalized means that the intermediary limbs are zero here. r.len = msl + 1; @@ -477,12 +477,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; var carry_truncated = false; @@ -492,7 +492,7 @@ pub const Mutable = struct { // truncate anyway. // - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled. // Note: after this we still might need to wrap. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -522,12 +522,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; if (r.addCarry(x, y)) { @@ -535,7 +535,7 @@ pub const Mutable = struct { // - We overflowed req_limbs, in which case we need to saturate. // - a and b had less elements than req_limbs, and those were overflowed. // Note: In this case, might _also_ need to saturate. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -550,11 +550,11 @@ pub const Mutable = struct { r.saturate(r.toConst(), signedness, bit_count); } - /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn subCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -607,7 +607,7 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. pub fn sub(r: *Mutable, a: Const, b: Const) void { r.add(a, b.negate()); } @@ -714,7 +714,7 @@ pub const Mutable = struct { const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { const start = buf_index; - const a_len = math.min(req_limbs, a.limbs.len); + const a_len = @min(req_limbs, a.limbs.len); @memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]); buf_index += a_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -722,7 +722,7 @@ pub const Mutable = struct { const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: { const start = buf_index; - const b_len = math.min(req_limbs, b.limbs.len); + const b_len = @min(req_limbs, b.limbs.len); @memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]); buf_index += b_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -755,13 +755,13 @@ pub const Mutable = struct { const req_limbs = calcTwosCompLimbCount(bit_count); // We can ignore the upper bits here, those results will be discarded anyway. - const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)]; - const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)]; + const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)]; + const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)]; @memset(rma.limbs[0..req_limbs], 0); llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs); - rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len)); + rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len)); rma.positive = (a.positive == b.positive); rma.truncate(rma.toConst(), signedness, bit_count); } @@ -1211,7 +1211,7 @@ pub const Mutable = struct { /// /// a and b are zero-extended to the longer of a or b. /// - /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`. pub fn bitOr(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedor does not support zero. if (a.eqZero()) { @@ -1235,8 +1235,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. - /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`. - /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`. + /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitAnd(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedand does not support zero. if (a.eqZero()) { @@ -1260,8 +1260,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the - /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative - /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative + /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitXor(r: *Mutable, a: Const, b: Const) void { // Trivial cases, because llsignedxor does not support negative zero. if (a.eqZero()) { @@ -1284,7 +1284,7 @@ pub const Mutable = struct { /// rma may alias x or y. /// x and y may alias each other. /// Asserts that `rma` has enough limbs to store the result. Upper bound is - /// `math.min(x.limbs.len, y.limbs.len)`. + /// `@min(x.limbs.len, y.limbs.len)`. /// /// `limbs_buffer` is used for temporary storage during the operation. When this function returns, /// it will have the same length as it had when the function was called. @@ -1546,7 +1546,7 @@ pub const Mutable = struct { if (yi != 0) break i; } else unreachable; - const xy_trailing = math.min(x_trailing, y_trailing); + const xy_trailing = @min(x_trailing, y_trailing); if (y.len - xy_trailing == 1) { const divisor = y.limbs[y.len - 1]; @@ -2589,7 +2589,7 @@ pub const Managed = struct { .allocator = allocator, .metadata = 1, .limbs = block: { - const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); + const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity)); limbs[0] = 0; break :block limbs; }, @@ -2918,7 +2918,7 @@ pub const Managed = struct { /// /// Returns an error if memory could not be allocated. pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len()) + 1); + try r.ensureCapacity(@max(a.len(), b.len()) + 1); var m = r.toMutable(); m.sub(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3025,11 +3025,11 @@ pub const Managed = struct { } pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void { - try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); + try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1); } pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void { - try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1); } pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void { @@ -3123,7 +3123,7 @@ pub const Managed = struct { /// /// a and b are zero-extended to the longer of a or b. pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len())); + try r.ensureCapacity(@max(a.len(), b.len())); var m = r.toMutable(); m.bitOr(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3132,9 +3132,9 @@ pub const Managed = struct { /// r = a & b pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void { const cap = if (a.isPositive() or b.isPositive()) - math.min(a.len(), b.len()) + @min(a.len(), b.len()) else - math.max(a.len(), b.len()) + 1; + @max(a.len(), b.len()) + 1; try r.ensureCapacity(cap); var m = r.toMutable(); m.bitAnd(a.toConst(), b.toConst()); @@ -3143,7 +3143,7 @@ pub const Managed = struct { /// r = a ^ b pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void { - var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); + var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); try r.ensureCapacity(cap); var m = r.toMutable(); @@ -3156,7 +3156,7 @@ pub const Managed = struct { /// /// rma's allocator is used for temporary storage to boost multiplication performance. pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void { - try rma.ensureCapacity(math.min(x.len(), y.len())); + try rma.ensureCapacity(@min(x.len(), y.len())); var m = rma.toMutable(); var limbs_buffer = std.ArrayList(Limb).init(rma.allocator); defer limbs_buffer.deinit(); @@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba( // For a1 and b1 we only need `limbs_after_split` limbs. const a1 = blk: { var a1 = a[split..]; - a1.len = math.min(llnormalize(a1), limbs_after_split); + a1.len = @min(llnormalize(a1), limbs_after_split); break :blk a1; }; const b1 = blk: { var b1 = b[split..]; - b1.len = math.min(llnormalize(b1), limbs_after_split); + b1.len = @min(llnormalize(b1), limbs_after_split); break :blk b1; }; @@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba( // Compute p2. // Note, we don't need to compute all of p2, just enough limbs to satisfy r. - const p2_limbs = math.min(limbs_after_split, a1.len + b1.len); + const p2_limbs = @min(limbs_after_split, a1.len + b1.len); @memset(tmp[0..p2_limbs], 0); - llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]); + llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]); const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])]; // Add p2 * B to the result. @@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba( // Add p2 * B^2 to the result if required. if (limbs_after_split2 > 0) { - llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]); + llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]); } // Compute p0. @@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba( llaccum(op, r, p0); // Add p0 * B to the result. In this case, we may not need all of it. - llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]); + llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]); // Finally, compute and add p1. // From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the // following computation will be added * B. - const a0x = a0[0..std.math.min(a0.len, limbs_after_split)]; - const b0x = b0[0..std.math.min(b0.len, limbs_after_split)]; + const a0x = a0[0..@min(a0.len, limbs_after_split)]; + const b0x = b0[0..@min(b0.len, limbs_after_split)]; const j0_sign = llcmp(a0x, a1); const j1_sign = llcmp(b1, b0x); @@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool { return false; } - const split = std.math.min(y.len, acc.len); + const split = @min(y.len, acc.len); var a_lo = acc[0..split]; var a_hi = acc[split..]; @@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_ // r may alias. // a and b must not be -0. // Returns `true` when the result is positive. -// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required. -// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs. +// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required. +// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs. fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool { @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index d2fd8db9b7..8947475159 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1)); + const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index c4ad708887..2f34745a64 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void { /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { - const n = math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (math.order(lhs[i], rhs[i])) { @@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool { /// Compares two slices and returns the index of the first inequality. /// Returns null if the slices are equal. pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize { - const shortest = math.min(a.len, b.len); + const shortest = @min(a.len, b.len); if (a.ptr == b.ptr) return if (a.len == b.len) null else shortest; var index: usize = 0; @@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.min(best, item); + best = @min(best, item); } return best; } @@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.max(best, item); + best = @max(best, item); } return best; } @@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } { var minVal = slice[0]; var maxVal = slice[0]; for (slice[1..]) |item| { - minVal = math.min(minVal, item); - maxVal = math.max(maxVal, item); + minVal = @min(minVal, item); + maxVal = @max(maxVal, item); } return .{ .min = minVal, .max = maxVal }; } diff --git a/lib/std/net.zig b/lib/std/net.zig index 64b13ec544..dfd6fe4a9e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { error.InvalidCharacter => continue, }; if (mem.eql(u8, name, "ndots")) { - rc.ndots = std.math.min(value, 15); + rc.ndots = @min(value, 15); } else if (mem.eql(u8, name, "attempts")) { - rc.attempts = std.math.min(value, 10); + rc.attempts = @min(value, 10); } else if (mem.eql(u8, name, "timeout")) { - rc.timeout = std.math.min(value, 60); + rc.timeout = @min(value, 60); } } } else if (mem.eql(u8, token, "nameserver")) { @@ -1615,7 +1615,7 @@ fn resMSendRc( } // Wait for a response, or until time to retry - const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); + const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); const nevents = os.poll(&pfd, clamped_timeout) catch 0; if (nevents == 0) continue; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index ef0ec94d3b..e4d6790505 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { .getdents, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } @@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { .getdents64, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index b7467d765f..0610b214d5 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -277,7 +277,7 @@ pub const IO_Uring = struct { fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 { _ = wait_nr; const ready = self.cq_ready(); - const count = std.math.min(cqes.len, ready); + const count = @min(cqes.len, ready); var head = self.cq.head.*; var tail = head +% count; // TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop. @@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct { pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); - const size = std.math.max( + const size = @max( p.sq_off.array + p.sq_entries * @sizeOf(u32), p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe), ); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index e559e48915..389c4bea12 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { const max_read_size: ULONG = maxInt(ULONG); while (total_read < output.len) { - const to_read: ULONG = math.min(buff.len, max_read_size); + const to_read: ULONG = @min(buff.len, max_read_size); if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { return unexpectedError(kernel32.GetLastError()); @@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo return @as(usize, bytes_transferred); } else { while (true) { - const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len)); + const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len); var amt_read: DWORD = undefined; var overlapped_data: OVERLAPPED = undefined; const overlapped: ?*OVERLAPPED = if (offset) |off| blk: { diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 5bc836b08e..180507ba71 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -1049,7 +1049,7 @@ const MsfStream = struct { var size: usize = 0; var rem_buffer = buffer; while (size < buffer.len) { - const size_to_read = math.min(self.block_size - offset, rem_buffer.len); + const size_to_read = @min(self.block_size - offset, rem_buffer.len); size += try in.read(rem_buffer[0..size_to_read]); rem_buffer = buffer[size..]; offset += size_to_read; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 1e9f4051e9..f07562c911 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -410,7 +410,7 @@ pub const Random = struct { r.uintLessThan(T, sum) else if (comptime std.meta.trait.isFloat(T)) // take care that imprecision doesn't lead to a value slightly greater than sum - std.math.min(r.float(T) * sum, sum - std.math.floatEps(T)) + @min(r.float(T) * sum, sum - std.math.floatEps(T)) else @compileError("weightedIndex does not support proportions of type " ++ @typeName(T)); diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig index 6c1be9c6c2..518d148a73 100644 --- a/lib/std/sort/block.zig +++ b/lib/std/sort/block.zig @@ -590,7 +590,7 @@ pub fn block( // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well var lastA = firstA; var lastB = Range.init(0, 0); - var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + var blockB = Range.init(B.start, B.start + @min(block_size, B.length())); blockA.start += firstA.length(); indexA = buffer1.start; @@ -849,7 +849,7 @@ fn findFirstForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (lessThan(context, items[index - 1], value)) : (index += skip) { @@ -871,7 +871,7 @@ fn findFirstBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { @@ -893,7 +893,7 @@ fn findLastForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (!lessThan(context, value, items[index - 1])) : (index += skip) { @@ -915,7 +915,7 @@ fn findLastBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 83fa68567f..3930c9714a 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1960,7 +1960,7 @@ fn renderArrayInit( if (!this_contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); const expr_last_token = tree.lastToken(expr) + 1; const next_expr = section_exprs[i + 1]; @@ -1980,7 +1980,7 @@ fn renderArrayInit( if (!contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); } } } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index f17356fdcd..cddaea2295 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version { const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum); @@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile( const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; @@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile( const rpoff_file = ds.offset + rpoff_usize; const rp_max_size = ds.size - rpoff_usize; - const strtab_len = std.math.min(rp_max_size, strtab_buf.len); + const strtab_len = @min(rp_max_size, strtab_buf.len); const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len); const strtab = strtab_buf[0..strtab_read_len]; |
