aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/docgen.zig2
-rw-r--r--lib/build_runner.zig63
-rw-r--r--lib/compiler_rt/divc3.zig3
-rw-r--r--lib/compiler_rt/emutls.zig4
-rw-r--r--lib/std/Build/Cache/DepTokenizer.zig2
-rw-r--r--lib/std/Thread.zig6
-rw-r--r--lib/std/Uri.zig4
-rw-r--r--lib/std/array_hash_map.zig6
-rw-r--r--lib/std/ascii.zig2
-rw-r--r--lib/std/compress/lzma/decode.zig2
-rw-r--r--lib/std/crypto/blake3.zig8
-rw-r--r--lib/std/crypto/ff.zig2
-rw-r--r--lib/std/crypto/ghash_polyval.zig2
-rw-r--r--lib/std/crypto/keccak_p.zig4
-rw-r--r--lib/std/crypto/poly1305.zig2
-rw-r--r--lib/std/crypto/salsa20.zig2
-rw-r--r--lib/std/crypto/scrypt.zig4
-rw-r--r--lib/std/crypto/sha3.zig2
-rw-r--r--lib/std/crypto/siphash.zig2
-rw-r--r--lib/std/debug.zig4
-rw-r--r--lib/std/dynamic_library.zig3
-rw-r--r--lib/std/event/loop.zig2
-rw-r--r--lib/std/fifo.zig2
-rw-r--r--lib/std/fmt.zig18
-rw-r--r--lib/std/hash/wyhash.zig2
-rw-r--r--lib/std/hash_map.zig6
-rw-r--r--lib/std/heap/arena_allocator.zig2
-rw-r--r--lib/std/heap/memory_pool.zig4
-rw-r--r--lib/std/http/protocol.zig2
-rw-r--r--lib/std/io/fixed_buffer_stream.zig4
-rw-r--r--lib/std/io/limited_reader.zig2
-rw-r--r--lib/std/io/reader.zig2
-rw-r--r--lib/std/io/writer.zig2
-rw-r--r--lib/std/math.zig103
-rw-r--r--lib/std/math/big/int.zig96
-rw-r--r--lib/std/math/ldexp.zig2
-rw-r--r--lib/std/mem.zig12
-rw-r--r--lib/std/net.zig8
-rw-r--r--lib/std/os/linux.zig4
-rw-r--r--lib/std/os/linux/io_uring.zig4
-rw-r--r--lib/std/os/windows.zig4
-rw-r--r--lib/std/pdb.zig2
-rw-r--r--lib/std/rand.zig2
-rw-r--r--lib/std/sort/block.zig10
-rw-r--r--lib/std/zig/render.zig4
-rw-r--r--lib/std/zig/system/NativeTargetInfo.zig6
-rw-r--r--src/Autodoc.zig4
-rw-r--r--src/Sema.zig260
-rw-r--r--src/TypedValue.zig10
-rw-r--r--src/arch/x86_64/CodeGen.zig4
-rw-r--r--src/link/Elf.zig2
-rw-r--r--src/link/MachO/CodeSignature.zig6
-rw-r--r--src/link/MachO/Object.zig2
-rw-r--r--src/link/Wasm.zig2
-rw-r--r--src/link/Wasm/Object.zig2
-rw-r--r--src/main.zig44
-rw-r--r--src/translate_c.zig2
-rw-r--r--src/translate_c/ast.zig14
-rw-r--r--src/type.zig2
-rw-r--r--src/value.zig22
-rw-r--r--stage1/zig.h37
-rw-r--r--stage1/zig1.wasmbin2455067 -> 2489820 bytes
-rw-r--r--test/behavior/maximum_minimum.zig85
63 files changed, 497 insertions, 435 deletions
diff --git a/doc/docgen.zig b/doc/docgen.zig
index bdbde6f5d2..4a9e33fbdd 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -276,7 +276,7 @@ fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, arg
}
}
{
- const caret_count = std.math.min(token.end, loc.line_end) - token.start;
+ const caret_count = @min(token.end, loc.line_end) - token.start;
var i: usize = 0;
while (i < caret_count) : (i += 1) {
print("~", .{});
diff --git a/lib/build_runner.zig b/lib/build_runner.zig
index a09ec2cf1f..a5abbebe16 100644
--- a/lib/build_runner.zig
+++ b/lib/build_runner.zig
@@ -90,7 +90,7 @@ pub fn main() !void {
var install_prefix: ?[]const u8 = null;
var dir_list = std.Build.DirList{};
- var enable_summary: ?bool = null;
+ var summary: ?Summary = null;
var max_rss: usize = 0;
var color: Color = .auto;
@@ -178,6 +178,15 @@ pub fn main() !void {
std.debug.print("Expected [auto|on|off] after {s}, found '{s}'\n\n", .{ arg, next_arg });
usageAndErr(builder, false, stderr_stream);
};
+ } else if (mem.eql(u8, arg, "--summary")) {
+ const next_arg = nextArg(args, &arg_idx) orelse {
+ std.debug.print("Expected [all|failures|none] after {s}\n\n", .{arg});
+ usageAndErr(builder, false, stderr_stream);
+ };
+ summary = std.meta.stringToEnum(Summary, next_arg) orelse {
+ std.debug.print("Expected [all|failures|none] after {s}, found '{s}'\n\n", .{ arg, next_arg });
+ usageAndErr(builder, false, stderr_stream);
+ };
} else if (mem.eql(u8, arg, "--zig-lib-dir")) {
builder.zig_lib_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
@@ -234,10 +243,6 @@ pub fn main() !void {
builder.enable_darling = true;
} else if (mem.eql(u8, arg, "-fno-darling")) {
builder.enable_darling = false;
- } else if (mem.eql(u8, arg, "-fsummary")) {
- enable_summary = true;
- } else if (mem.eql(u8, arg, "-fno-summary")) {
- enable_summary = false;
} else if (mem.eql(u8, arg, "-freference-trace")) {
builder.reference_trace = 256;
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
@@ -302,7 +307,7 @@ pub fn main() !void {
.memory_blocked_steps = std.ArrayList(*Step).init(arena),
.claimed_rss = 0,
- .enable_summary = enable_summary,
+ .summary = summary,
.ttyconf = ttyconf,
.stderr = stderr,
};
@@ -332,7 +337,7 @@ const Run = struct {
memory_blocked_steps: std.ArrayList(*Step),
claimed_rss: usize,
- enable_summary: ?bool,
+ summary: ?Summary,
ttyconf: std.io.tty.Config,
stderr: std.fs.File,
};
@@ -469,12 +474,12 @@ fn runStepNames(
// A proper command line application defaults to silently succeeding.
// The user may request verbose mode if they have a different preference.
- if (failure_count == 0 and run.enable_summary != true) return cleanExit();
+ if (failure_count == 0 and run.summary != Summary.all) return cleanExit();
const ttyconf = run.ttyconf;
const stderr = run.stderr;
- if (run.enable_summary != false) {
+ if (run.summary != Summary.none) {
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(stderr, .cyan) catch {};
stderr.writeAll("Build Summary:") catch {};
@@ -488,23 +493,32 @@ fn runStepNames(
if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {};
- if (run.enable_summary == null) {
+ if (run.summary == null) {
ttyconf.setColor(stderr, .dim) catch {};
- stderr.writeAll(" (disable with -fno-summary)") catch {};
+ stderr.writeAll(" (disable with --summary none)") catch {};
ttyconf.setColor(stderr, .reset) catch {};
}
stderr.writeAll("\n") catch {};
+ const failures_only = run.summary != Summary.all;
// Print a fancy tree with build results.
var print_node: PrintNode = .{ .parent = null };
if (step_names.len == 0) {
print_node.last = true;
- printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {};
+ printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack, failures_only) catch {};
} else {
+ const last_index = if (!failures_only) b.top_level_steps.count() else blk: {
+ var i: usize = step_names.len;
+ while (i > 0) {
+ i -= 1;
+ if (b.top_level_steps.get(step_names[i]).?.step.state != .success) break :blk i;
+ }
+ break :blk b.top_level_steps.count();
+ };
for (step_names, 0..) |step_name, i| {
const tls = b.top_level_steps.get(step_name).?;
- print_node.last = i + 1 == b.top_level_steps.count();
- printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {};
+ print_node.last = i + 1 == last_index;
+ printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack, failures_only) catch {};
}
}
}
@@ -556,8 +570,10 @@ fn printTreeStep(
ttyconf: std.io.tty.Config,
parent_node: *PrintNode,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
+ failures_only: bool,
) !void {
const first = step_stack.swapRemove(s);
+ if (failures_only and s.state == .success) return;
try printPrefix(parent_node, stderr, ttyconf);
if (!first) try ttyconf.setColor(stderr, .dim);
@@ -688,12 +704,20 @@ fn printTreeStep(
},
}
+ const last_index = if (!failures_only) s.dependencies.items.len -| 1 else blk: {
+ var i: usize = s.dependencies.items.len;
+ while (i > 0) {
+ i -= 1;
+ if (s.dependencies.items[i].state != .success) break :blk i;
+ }
+ break :blk s.dependencies.items.len -| 1;
+ };
for (s.dependencies.items, 0..) |dep, i| {
var print_node: PrintNode = .{
.parent = parent_node,
- .last = i == s.dependencies.items.len - 1,
+ .last = i == last_index,
};
- try printTreeStep(b, dep, stderr, ttyconf, &print_node, step_stack);
+ try printTreeStep(b, dep, stderr, ttyconf, &print_node, step_stack, failures_only);
}
} else {
if (s.dependencies.items.len == 0) {
@@ -948,8 +972,10 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
\\ -l, --list-steps Print available steps
\\ --verbose Print commands before executing them
\\ --color [auto|off|on] Enable or disable colored error messages
- \\ -fsummary Print the build summary, even on success
- \\ -fno-summary Omit the build summary, even on failure
+ \\ --summary [mode] Control the printing of the build summary
+ \\ all Print the build summary in its entirety
+ \\ failures (Default) Only print failed steps
+ \\ none Do not print the build summary
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
\\ --maxrss <bytes> Limit memory usage (default is to use available memory)
\\
@@ -1025,6 +1051,7 @@ fn cleanExit() void {
}
const Color = enum { auto, off, on };
+const Summary = enum { all, failures, none };
fn get_tty_conf(color: Color, stderr: std.fs.File) std.io.tty.Config {
return switch (color) {
diff --git a/lib/compiler_rt/divc3.zig b/lib/compiler_rt/divc3.zig
index 4e4dba2856..c4241c1483 100644
--- a/lib/compiler_rt/divc3.zig
+++ b/lib/compiler_rt/divc3.zig
@@ -3,7 +3,6 @@ const isNan = std.math.isNan;
const isInf = std.math.isInf;
const scalbn = std.math.scalbn;
const ilogb = std.math.ilogb;
-const max = std.math.max;
const fabs = std.math.fabs;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
@@ -17,7 +16,7 @@ pub inline fn divc3(comptime T: type, a: T, b: T, c_in: T, d_in: T) Complex(T) {
var d = d_in;
// logbw used to prevent under/over-flow
- const logbw = ilogb(max(fabs(c), fabs(d)));
+ const logbw = ilogb(@max(fabs(c), fabs(d)));
const logbw_finite = logbw != maxInt(i32) and logbw != minInt(i32);
const ilogbw = if (logbw_finite) b: {
c = scalbn(c, -logbw);
diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig
index 05a2de97a8..47c71efadd 100644
--- a/lib/compiler_rt/emutls.zig
+++ b/lib/compiler_rt/emutls.zig
@@ -49,7 +49,7 @@ const simple_allocator = struct {
/// Allocate a memory chunk.
pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 {
- const minimal_alignment = std.math.max(@alignOf(usize), alignment);
+ const minimal_alignment = @max(@alignOf(usize), alignment);
var aligned_ptr: ?*anyopaque = undefined;
if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) {
@@ -170,7 +170,7 @@ const current_thread_storage = struct {
// make it to contains at least 16 objects (to avoid too much
// reallocation at startup).
- const size = std.math.max(16, index);
+ const size = @max(16, index);
// create a new array and store it.
var array: *ObjectArray = ObjectArray.init(size);
diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig
index 1a4e2ddb74..0e5224edc0 100644
--- a/lib/std/Build/Cache/DepTokenizer.zig
+++ b/lib/std/Build/Cache/DepTokenizer.zig
@@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void {
try printDecValue(out, offset, 8);
try out.writeAll(":");
try out.writeAll(" ");
- var end1 = std.math.min(offset + n, offset + 8);
+ var end1 = @min(offset + n, offset + 8);
for (bytes[offset..end1]) |b| {
try out.writeAll(" ");
try printHexValue(out, b, 2);
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index ed6a9383e3..76650a9072 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -541,7 +541,7 @@ const WindowsThreadImpl = struct {
// Going lower makes it default to that specified in the executable (~1mb).
// Its also fine if the limit here is incorrect as stack size is only a hint.
var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32);
- stack_size = std.math.max(64 * 1024, stack_size);
+ stack_size = @max(64 * 1024, stack_size);
instance.thread.thread_handle = windows.kernel32.CreateThread(
null,
@@ -690,7 +690,7 @@ const PosixThreadImpl = struct {
defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS);
// Use the same set of parameters used by the libc-less impl.
- const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN);
+ const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN);
assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS);
@@ -930,7 +930,7 @@ const LinuxThreadImpl = struct {
var bytes: usize = page_size;
guard_offset = bytes;
- bytes += std.math.max(page_size, config.stack_size);
+ bytes += @max(page_size, config.stack_size);
bytes = std.mem.alignForward(bytes, page_size);
stack_offset = bytes;
diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig
index 7a9755bd28..198ab461ae 100644
--- a/lib/std/Uri.zig
+++ b/lib/std/Uri.zig
@@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri {
if (std.mem.lastIndexOf(u8, authority, ":")) |index| {
if (index >= end_of_host) { // if not part of the V6 address field
- end_of_host = std.math.min(end_of_host, index);
+ end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
}
}
} else if (std.mem.lastIndexOf(u8, authority, ":")) |index| {
if (index >= start_of_host) { // if not part of the userinfo field
- end_of_host = std.math.min(end_of_host, index);
+ end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
}
}
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index 55b9aac6e4..b46b5c12f0 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged(
/// no longer guaranteed that no allocations will be performed.
pub fn capacity(self: Self) usize {
const entry_cap = self.entries.capacity;
- const header = self.index_header orelse return math.min(linear_scan_max, entry_cap);
+ const header = self.index_header orelse return @min(linear_scan_max, entry_cap);
const indexes_cap = header.capacity();
- return math.min(entry_cap, indexes_cap);
+ return @min(entry_cap, indexes_cap);
}
/// Clobbers any existing data. To detect if a put would clobber
@@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type {
/// length * the size of an Index(u32). The index is 8 bytes (3 bits repr)
/// and max_usize + 1 is not representable, so we need to subtract out 4 bits.
const max_representable_index_len = @bitSizeOf(usize) - 4;
-const max_bit_index = math.min(32, max_representable_index_len);
+const max_bit_index = @min(32, max_representable_index_len);
const min_bit_index = 5;
const max_capacity = (1 << max_bit_index) - 1;
const index_capacities = blk: {
diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig
index 941f398f20..e47ef4db65 100644
--- a/lib/std/ascii.zig
+++ b/lib/std/ascii.zig
@@ -422,7 +422,7 @@ test "indexOfIgnoreCase" {
/// Returns the lexicographical order of two slices. O(n).
pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order {
- const n = std.math.min(lhs.len, rhs.len);
+ const n = @min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) {
diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig
index dc220d8e87..f539abf8b1 100644
--- a/lib/std/compress/lzma/decode.zig
+++ b/lib/std/compress/lzma/decode.zig
@@ -59,7 +59,7 @@ pub const Params = struct {
const pb = @intCast(u3, props);
const dict_size_provided = try reader.readIntLittle(u32);
- const dict_size = math.max(0x1000, dict_size_provided);
+ const dict_size = @max(0x1000, dict_size_provided);
const unpacked_size = switch (options.unpacked_size) {
.read_from_header => blk: {
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index fb580fda13..7ad1511e79 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -20,7 +20,7 @@ const ChunkIterator = struct {
}
fn next(self: *ChunkIterator) ?[]u8 {
- const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)];
+ const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)];
self.slice = self.slice[next_chunk.len..];
return if (next_chunk.len > 0) next_chunk else null;
}
@@ -283,7 +283,7 @@ const ChunkState = struct {
fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 {
const want = BLOCK_LEN - self.block_len;
- const take = math.min(want, input.len);
+ const take = @min(want, input.len);
@memcpy(self.block[self.block_len..][0..take], input[0..take]);
self.block_len += @truncate(u8, take);
return input[take..];
@@ -450,7 +450,7 @@ pub const Blake3 = struct {
// Compress input bytes into the current chunk state.
const want = CHUNK_LEN - self.chunk_state.len();
- const take = math.min(want, input.len);
+ const take = @min(want, input.len);
self.chunk_state.update(input[0..take]);
input = input[take..];
}
@@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
// Write repeating input pattern to hasher
var input_counter = input_len;
while (input_counter > 0) {
- const update_len = math.min(input_counter, input_pattern.len);
+ const update_len = @min(input_counter, input_pattern.len);
hasher.update(input_pattern[0..update_len]);
input_counter -= update_len;
}
diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig
index 84753ddefb..37e3d1c1b3 100644
--- a/lib/std/crypto/ff.zig
+++ b/lib/std/crypto/ff.zig
@@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var out = self.zero;
var i = x.limbs_count() - 1;
if (self.limbs_count() >= 2) {
- const start = math.min(i, self.limbs_count() - 2);
+ const start = @min(i, self.limbs_count() - 2);
var j = start;
while (true) : (j -= 1) {
out.v.limbs.set(j, x.limbs.get(i));
diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig
index 46645d710f..2fbff25f72 100644
--- a/lib/std/crypto/ghash_polyval.zig
+++ b/lib/std/crypto/ghash_polyval.zig
@@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
var mb = m;
if (st.leftover > 0) {
- const want = math.min(block_length - st.leftover, mb.len);
+ const want = @min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig
index 9226f2f6d4..ddc9b1b847 100644
--- a/lib/std/crypto/keccak_p.zig
+++ b/lib/std/crypto/keccak_p.zig
@@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
pub fn absorb(self: *Self, bytes_: []const u8) void {
var bytes = bytes_;
if (self.offset > 0) {
- const left = math.min(rate - self.offset, bytes.len);
+ const left = @min(rate - self.offset, bytes.len);
@memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
self.offset += left;
if (self.offset == rate) {
@@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
pub fn squeeze(self: *Self, out: []u8) void {
var i: usize = 0;
while (i < out.len) : (i += rate) {
- const left = math.min(rate, out.len - i);
+ const left = @min(rate, out.len - i);
self.st.extractBytes(out[i..][0..left]);
self.st.permuteR(rounds);
}
diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig
index a2873f1145..51e1c2ab24 100644
--- a/lib/std/crypto/poly1305.zig
+++ b/lib/std/crypto/poly1305.zig
@@ -112,7 +112,7 @@ pub const Poly1305 = struct {
// handle leftover
if (st.leftover > 0) {
- const want = std.math.min(block_length - st.leftover, mb.len);
+ const want = @min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index 7f57e6cecb..c8a639ad0b 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct {
debug.assert(c.len == m.len);
const extended = extend(rounds, k, npub);
var block0 = [_]u8{0} ** 64;
- const mlen0 = math.min(32, c.len);
+ const mlen0 = @min(32, c.len);
@memcpy(block0[32..][0..mlen0], c[0..mlen0]);
Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
var mac = Poly1305.init(block0[0..32]);
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index b8e8ef55e2..97dd9b95d0 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -143,7 +143,7 @@ pub const Params = struct {
/// Create parameters from ops and mem limits, where mem_limit given in bytes
pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self {
- const ops = math.max(32768, ops_limit);
+ const ops = @max(32768, ops_limit);
const r: u30 = 8;
if (ops < mem_limit / 32) {
const max_n = ops / (r * 4);
@@ -151,7 +151,7 @@ pub const Params = struct {
} else {
const max_n = mem_limit / (@intCast(usize, r) * 128);
const ln = @intCast(u6, math.log2(max_n));
- const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
+ const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln };
}
}
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 23f9e65534..0226490881 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds:
if (self.offset > 0) {
const left = self.buf.len - self.offset;
if (left > 0) {
- const n = math.min(left, out.len);
+ const n = @min(left, out.len);
@memcpy(out[0..n], self.buf[self.offset..][0..n]);
out = out[n..];
self.offset += n;
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index 37d219f868..70f4f2fd53 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -433,7 +433,7 @@ test "iterative non-divisible update" {
var siphash = Siphash.init(key);
var i: usize = 0;
while (i < end) : (i += 7) {
- siphash.update(buf[i..std.math.min(i + 7, end)]);
+ siphash.update(buf[i..@min(i + 7, end)]);
}
const iterative_hash = siphash.finalInt();
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index ea0d467085..3015c30bfb 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
stack_trace.index = 0;
return;
};
- const end_index = math.min(first_index + addrs.len, n);
+ const end_index = @min(first_index + addrs.len, n);
const slice = addr_buf[first_index..end_index];
// We use a for loop here because slice and addrs may alias.
for (slice, 0..) |addr, i| {
@@ -380,7 +380,7 @@ pub fn writeStackTrace(
_ = allocator;
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
- var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
+ var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len);
while (frames_left != 0) : ({
frames_left -= 1;
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index 59ad7429cf..94da2f4d6d 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -8,7 +8,6 @@ const elf = std.elf;
const windows = std.os.windows;
const system = std.os.system;
const maxInt = std.math.maxInt;
-const max = std.math.max;
pub const DynLib = switch (builtin.os.tag) {
.linux => if (builtin.link_libc) DlDynlib else ElfDynLib,
@@ -152,7 +151,7 @@ pub const ElfDynLib = struct {
}) {
const ph = @intToPtr(*elf.Phdr, ph_addr);
switch (ph.p_type) {
- elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
+ elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset),
else => {},
}
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index c8d41d3eb0..bc0162423b 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -179,7 +179,7 @@ pub const Loop = struct {
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
- const resume_node_count = std.math.max(extra_thread_count, 1);
+ const resume_node_count = @max(extra_thread_count, 1);
self.eventfd_resume_nodes = try self.arena.allocator().alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index bc88e61d76..535376d38f 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -150,7 +150,7 @@ pub fn LinearFifo(
start -= self.buf.len;
return self.buf[start .. start + (self.count - offset)];
} else {
- const end = math.min(self.head + self.count, self.buf.len);
+ const end = @min(self.head + self.count, self.buf.len);
return self.buf[start..end];
}
}
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 6896d0a7a0..c9d8e611ca 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type {
const log2 = math.log2(value);
const magnitude = switch (base) {
- 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1),
- 1024 => math.min(log2 / 10, mags_iec.len - 1),
+ 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1),
+ 1024 => @min(log2 / 10, mags_iec.len - 1),
else => unreachable,
};
const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude));
@@ -1103,7 +1103,7 @@ pub fn formatFloatScientific(
var printed: usize = 0;
if (float_decimal.digits.len > 1) {
- const num_digits = math.min(float_decimal.digits.len, precision + 1);
+ const num_digits = @min(float_decimal.digits.len, precision + 1);
try writer.writeAll(float_decimal.digits[1..num_digits]);
printed += num_digits - 1;
}
@@ -1116,7 +1116,7 @@ pub fn formatFloatScientific(
try writer.writeAll(float_decimal.digits[0..1]);
try writer.writeAll(".");
if (float_decimal.digits.len > 1) {
- const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
+ const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
try writer.writeAll(float_decimal.digits[1..num_digits]);
} else {
@@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal(
var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
- var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+ var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
@@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal(
// Zero-fill until we reach significant digits or run out of precision.
if (float_decimal.exp <= 0) {
const zero_digit_count = @intCast(usize, -float_decimal.exp);
- const zeros_to_print = math.min(zero_digit_count, precision);
+ const zeros_to_print = @min(zero_digit_count, precision);
var i: usize = 0;
while (i < zeros_to_print) : (i += 1) {
@@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal(
var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
- var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+ var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
@@ -1410,12 +1410,12 @@ pub fn formatInt(
// The type must have the same size as `base` or be wider in order for the
// division to work
- const min_int_bits = comptime math.max(value_info.bits, 8);
+ const min_int_bits = comptime @max(value_info.bits, 8);
const MinInt = std.meta.Int(.unsigned, min_int_bits);
const abs_value = math.absCast(int_value);
// The worst case in terms of space needed is base 2, plus 1 for the sign
- var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined;
+ var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined;
var a: MinInt = abs_value;
var index: usize = buf.len;
diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig
index 3426bca9f4..c36c3fe87c 100644
--- a/lib/std/hash/wyhash.zig
+++ b/lib/std/hash/wyhash.zig
@@ -252,7 +252,7 @@ test "iterative non-divisible update" {
var wy = Wyhash.init(seed);
var i: usize = 0;
while (i < end) : (i += 33) {
- wy.update(buf[i..std.math.min(i + 33, end)]);
+ wy.update(buf[i..@min(i + 33, end)]);
}
const iterative_hash = wy.final();
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 041d99606e..5b539ddaad 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged(
fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void {
@setCold(true);
- const new_cap = std.math.max(new_capacity, minimal_capacity);
+ const new_cap = @max(new_capacity, minimal_capacity);
assert(new_cap > self.capacity());
assert(std.math.isPowerOfTwo(new_cap));
@@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged(
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
- const max_align = comptime math.max3(header_align, key_align, val_align);
+ const max_align = comptime @max(header_align, key_align, val_align);
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1);
@@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged(
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
- const max_align = comptime math.max3(header_align, key_align, val_align);
+ const max_align = comptime @max(header_align, key_align, val_align);
const cap = self.capacity();
const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index c0eeae6e61..c7e0569067 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -110,7 +110,7 @@ pub const ArenaAllocator = struct {
// value.
const requested_capacity = switch (mode) {
.retain_capacity => self.queryCapacity(),
- .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()),
+ .retain_with_limit => |limit| @min(limit, self.queryCapacity()),
.free_all => 0,
};
if (requested_capacity == 0) {
diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig
index ca6eb7f518..3fc7dfbfca 100644
--- a/lib/std/heap/memory_pool.zig
+++ b/lib/std/heap/memory_pool.zig
@@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
/// Size of the memory pool items. This is not necessarily the same
/// as `@sizeOf(Item)` as the pool also uses the items for internal means.
- pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item));
+ pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));
/// Alignment of the memory pool items. This is not necessarily the same
/// as `@alignOf(Item)` as the pool also uses the items for internal means.
- pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0);
+ pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0);
const Node = struct {
next: ?*@This(),
diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig
index b001b3cddf..b5c2cdfa0c 100644
--- a/lib/std/http/protocol.zig
+++ b/lib/std/http/protocol.zig
@@ -82,7 +82,7 @@ pub const HeadersParser = struct {
/// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the
/// first byte of content is located at `bytes[result]`.
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
- const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8);
+ const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8);
const len = @intCast(u32, bytes.len);
var index: u32 = 0;
diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig
index c170dd1f74..27b978744c 100644
--- a/lib/std/io/fixed_buffer_stream.zig
+++ b/lib/std/io/fixed_buffer_stream.zig
@@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
- self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len;
+ self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len;
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
@@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
} else {
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
- self.pos = std.math.min(self.buffer.len, new_pos);
+ self.pos = @min(self.buffer.len, new_pos);
}
}
diff --git a/lib/std/io/limited_reader.zig b/lib/std/io/limited_reader.zig
index aa00af0d09..09d76007da 100644
--- a/lib/std/io/limited_reader.zig
+++ b/lib/std/io/limited_reader.zig
@@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type {
const Self = @This();
pub fn read(self: *Self, dest: []u8) Error!usize {
- const max_read = std.math.min(self.bytes_left, dest.len);
+ const max_read = @min(self.bytes_left, dest.len);
const n = try self.inner_reader.read(dest[0..max_read]);
self.bytes_left -= n;
return n;
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 344515d07b..abdca56d3c 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -325,7 +325,7 @@ pub fn Reader(
var remaining = num_bytes;
while (remaining > 0) {
- const amt = std.math.min(remaining, options.buf_size);
+ const amt = @min(remaining, options.buf_size);
try self.readNoEof(buf[0..amt]);
remaining -= amt;
}
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index cfc76de452..d0b7fa11ee 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -39,7 +39,7 @@ pub fn Writer(
var remaining: usize = n;
while (remaining > 0) {
- const to_write = std.math.min(remaining, bytes.len);
+ const to_write = @min(remaining, bytes.len);
try self.writeAll(bytes[0..to_write]);
remaining -= to_write;
}
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 46a7e40a37..e60e964747 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool {
if (isNan(x) or isNan(y))
return false;
- return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance;
+ return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance;
}
test "approxEqAbs and approxEqRel" {
@@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type {
return @TypeOf(@as(A, 0) + @as(B, 0));
}
-/// Returns the smaller number. When one parameter's type's full range
-/// fits in the other, the return type is the smaller type.
-pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) {
- const Result = Min(@TypeOf(x), @TypeOf(y));
- if (x < y) {
- // TODO Zig should allow this as an implicit cast because x is
- // immutable and in this scope it is known to fit in the
- // return type.
- switch (@typeInfo(Result)) {
- .Int => return @intCast(Result, x),
- else => return x,
- }
- } else {
- // TODO Zig should allow this as an implicit cast because y is
- // immutable and in this scope it is known to fit in the
- // return type.
- switch (@typeInfo(Result)) {
- .Int => return @intCast(Result, y),
- else => return y,
- }
- }
-}
-
-test "min" {
- try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1);
- {
- var a: u16 = 999;
- var b: u32 = 10;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == u16);
- try testing.expect(result == 10);
- }
- {
- var a: f64 = 10.34;
- var b: f32 = 999.12;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == f64);
- try testing.expect(result == 10.34);
- }
- {
- var a: i8 = -127;
- var b: i16 = -200;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == i16);
- try testing.expect(result == -200);
- }
- {
- const a = 10.34;
- var b: f32 = 999.12;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == f32);
- try testing.expect(result == 10.34);
- }
-}
-
-/// Finds the minimum of three numbers.
-pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
- return min(x, min(y, z));
-}
-
-test "min3" {
- try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0);
- try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0);
- try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0);
- try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0);
- try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0);
- try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0);
-}
-
-/// Returns the maximum of two numbers. Return type is the one with the
-/// larger range.
-pub fn max(x: anytype, y: anytype) @TypeOf(x, y) {
- return if (x > y) x else y;
-}
-
-test "max" {
- try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
- try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2);
-}
-
-/// Finds the maximum of three numbers.
-pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
- return max(x, max(y, z));
-}
-
-test "max3" {
- try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2);
- try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2);
- try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2);
- try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2);
- try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2);
- try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2);
-}
+pub const min = @compileError("deprecated; use @min instead");
+pub const max = @compileError("deprecated; use @max instead");
+pub const min3 = @compileError("deprecated; use @min instead");
+pub const max3 = @compileError("deprecated; use @max instead");
/// Limit val to the inclusive range [lower, upper].
pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) {
assert(lower <= upper);
- return max(lower, min(val, upper));
+ return @max(lower, @min(val, upper));
}
test "clamp" {
// Within range
@@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t
return u0;
}
const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned;
- const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement
+ const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement
const base = log2(largest_positive_integer);
const upper = (1 << base) - 1;
var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1;
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index ec79d843da..487812e1de 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize {
}
pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize {
- return aliases * math.max(a_len, b_len);
+ return aliases * @max(a_len, b_len);
}
pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize {
const req_limbs = calcTwosCompLimbCount(bit_count);
- return aliases * math.min(req_limbs, math.max(a_len, b_len));
+ return aliases * @min(req_limbs, @max(a_len, b_len));
}
pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
@@ -396,7 +396,7 @@ pub const Mutable = struct {
/// scalar is a primitive integer type.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`.
+ /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`.
pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void {
// Normally we could just determine the number of limbs needed with calcLimbLen,
// but that is not comptime-known when scalar is not a comptime_int. Instead, we
@@ -414,11 +414,11 @@ pub const Mutable = struct {
return add(r, a, operand);
}
- /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b,
+ /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b,
/// and returns whether any overflow occurred.
/// r, a and b may be aliases.
///
- /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`.
+ /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`.
fn addCarry(r: *Mutable, a: Const, b: Const) bool {
if (a.eqZero()) {
r.copy(b);
@@ -452,12 +452,12 @@ pub const Mutable = struct {
/// r, a and b may be aliases.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// r is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn add(r: *Mutable, a: Const, b: Const) void {
if (r.addCarry(a, b)) {
// Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len,
// so we need to set the length here.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
// `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here.
// Note, the fact that it normalized means that the intermediary limbs are zero here.
r.len = msl + 1;
@@ -477,12 +477,12 @@ pub const Mutable = struct {
// if an overflow occurred.
const x = Const{
.positive = a.positive,
- .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)],
+ .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)],
};
const y = Const{
.positive = b.positive,
- .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)],
+ .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)],
};
var carry_truncated = false;
@@ -492,7 +492,7 @@ pub const Mutable = struct {
// truncate anyway.
// - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled.
// Note: after this we still might need to wrap.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
if (msl < req_limbs) {
r.limbs[msl] = 1;
r.len = req_limbs;
@@ -522,12 +522,12 @@ pub const Mutable = struct {
// if an overflow occurred.
const x = Const{
.positive = a.positive,
- .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)],
+ .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)],
};
const y = Const{
.positive = b.positive,
- .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)],
+ .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)],
};
if (r.addCarry(x, y)) {
@@ -535,7 +535,7 @@ pub const Mutable = struct {
// - We overflowed req_limbs, in which case we need to saturate.
// - a and b had less elements than req_limbs, and those were overflowed.
// Note: In this case, might _also_ need to saturate.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
if (msl < req_limbs) {
r.limbs[msl] = 1;
r.len = req_limbs;
@@ -550,11 +550,11 @@ pub const Mutable = struct {
r.saturate(r.toConst(), signedness, bit_count);
}
- /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b,
+ /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b,
/// and returns whether any overflow occurred.
/// r, a and b may be aliases.
///
- /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`.
+ /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`.
fn subCarry(r: *Mutable, a: Const, b: Const) bool {
if (a.eqZero()) {
r.copy(b);
@@ -607,7 +607,7 @@ pub const Mutable = struct {
/// r, a and b may be aliases.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive.
+ /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive.
pub fn sub(r: *Mutable, a: Const, b: Const) void {
r.add(a, b.negate());
}
@@ -714,7 +714,7 @@ pub const Mutable = struct {
const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
const start = buf_index;
- const a_len = math.min(req_limbs, a.limbs.len);
+ const a_len = @min(req_limbs, a.limbs.len);
@memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]);
buf_index += a_len;
break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst();
@@ -722,7 +722,7 @@ pub const Mutable = struct {
const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: {
const start = buf_index;
- const b_len = math.min(req_limbs, b.limbs.len);
+ const b_len = @min(req_limbs, b.limbs.len);
@memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]);
buf_index += b_len;
break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst();
@@ -755,13 +755,13 @@ pub const Mutable = struct {
const req_limbs = calcTwosCompLimbCount(bit_count);
// We can ignore the upper bits here, those results will be discarded anyway.
- const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)];
- const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)];
+ const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)];
+ const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)];
@memset(rma.limbs[0..req_limbs], 0);
llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs);
- rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len));
+ rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len));
rma.positive = (a.positive == b.positive);
rma.truncate(rma.toConst(), signedness, bit_count);
}
@@ -1211,7 +1211,7 @@ pub const Mutable = struct {
///
/// a and b are zero-extended to the longer of a or b.
///
- /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
+ /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`.
pub fn bitOr(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, llsignedor does not support zero.
if (a.eqZero()) {
@@ -1235,8 +1235,8 @@ pub const Mutable = struct {
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result.
- /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`.
- /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`.
+ /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitAnd(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, llsignedand does not support zero.
if (a.eqZero()) {
@@ -1260,8 +1260,8 @@ pub const Mutable = struct {
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the
- /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
- /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
+ /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitXor(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, because llsignedxor does not support negative zero.
if (a.eqZero()) {
@@ -1284,7 +1284,7 @@ pub const Mutable = struct {
/// rma may alias x or y.
/// x and y may alias each other.
/// Asserts that `rma` has enough limbs to store the result. Upper bound is
- /// `math.min(x.limbs.len, y.limbs.len)`.
+ /// `@min(x.limbs.len, y.limbs.len)`.
///
/// `limbs_buffer` is used for temporary storage during the operation. When this function returns,
/// it will have the same length as it had when the function was called.
@@ -1546,7 +1546,7 @@ pub const Mutable = struct {
if (yi != 0) break i;
} else unreachable;
- const xy_trailing = math.min(x_trailing, y_trailing);
+ const xy_trailing = @min(x_trailing, y_trailing);
if (y.len - xy_trailing == 1) {
const divisor = y.limbs[y.len - 1];
@@ -2589,7 +2589,7 @@ pub const Managed = struct {
.allocator = allocator,
.metadata = 1,
.limbs = block: {
- const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity));
+ const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity));
limbs[0] = 0;
break :block limbs;
},
@@ -2918,7 +2918,7 @@ pub const Managed = struct {
///
/// Returns an error if memory could not be allocated.
pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void {
- try r.ensureCapacity(math.max(a.len(), b.len()) + 1);
+ try r.ensureCapacity(@max(a.len(), b.len()) + 1);
var m = r.toMutable();
m.sub(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -3025,11 +3025,11 @@ pub const Managed = struct {
}
pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void {
- try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1);
+ try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1);
}
pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void {
- try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1);
+ try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1);
}
pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void {
@@ -3123,7 +3123,7 @@ pub const Managed = struct {
///
/// a and b are zero-extended to the longer of a or b.
pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void {
- try r.ensureCapacity(math.max(a.len(), b.len()));
+ try r.ensureCapacity(@max(a.len(), b.len()));
var m = r.toMutable();
m.bitOr(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -3132,9 +3132,9 @@ pub const Managed = struct {
/// r = a & b
pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void {
const cap = if (a.isPositive() or b.isPositive())
- math.min(a.len(), b.len())
+ @min(a.len(), b.len())
else
- math.max(a.len(), b.len()) + 1;
+ @max(a.len(), b.len()) + 1;
try r.ensureCapacity(cap);
var m = r.toMutable();
m.bitAnd(a.toConst(), b.toConst());
@@ -3143,7 +3143,7 @@ pub const Managed = struct {
/// r = a ^ b
pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void {
- var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
+ var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
try r.ensureCapacity(cap);
var m = r.toMutable();
@@ -3156,7 +3156,7 @@ pub const Managed = struct {
///
/// rma's allocator is used for temporary storage to boost multiplication performance.
pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void {
- try rma.ensureCapacity(math.min(x.len(), y.len()));
+ try rma.ensureCapacity(@min(x.len(), y.len()));
var m = rma.toMutable();
var limbs_buffer = std.ArrayList(Limb).init(rma.allocator);
defer limbs_buffer.deinit();
@@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba(
// For a1 and b1 we only need `limbs_after_split` limbs.
const a1 = blk: {
var a1 = a[split..];
- a1.len = math.min(llnormalize(a1), limbs_after_split);
+ a1.len = @min(llnormalize(a1), limbs_after_split);
break :blk a1;
};
const b1 = blk: {
var b1 = b[split..];
- b1.len = math.min(llnormalize(b1), limbs_after_split);
+ b1.len = @min(llnormalize(b1), limbs_after_split);
break :blk b1;
};
@@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba(
// Compute p2.
// Note, we don't need to compute all of p2, just enough limbs to satisfy r.
- const p2_limbs = math.min(limbs_after_split, a1.len + b1.len);
+ const p2_limbs = @min(limbs_after_split, a1.len + b1.len);
@memset(tmp[0..p2_limbs], 0);
- llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]);
+ llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]);
const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])];
// Add p2 * B to the result.
@@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba(
// Add p2 * B^2 to the result if required.
if (limbs_after_split2 > 0) {
- llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]);
+ llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]);
}
// Compute p0.
@@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba(
llaccum(op, r, p0);
// Add p0 * B to the result. In this case, we may not need all of it.
- llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]);
+ llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]);
// Finally, compute and add p1.
// From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the
// following computation will be added * B.
- const a0x = a0[0..std.math.min(a0.len, limbs_after_split)];
- const b0x = b0[0..std.math.min(b0.len, limbs_after_split)];
+ const a0x = a0[0..@min(a0.len, limbs_after_split)];
+ const b0x = b0[0..@min(b0.len, limbs_after_split)];
const j0_sign = llcmp(a0x, a1);
const j1_sign = llcmp(b1, b0x);
@@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool {
return false;
}
- const split = std.math.min(y.len, acc.len);
+ const split = @min(y.len, acc.len);
var a_lo = acc[0..split];
var a_hi = acc[split..];
@@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
// r may alias.
// a and b must not be -0.
// Returns `true` when the result is positive.
-// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required.
-// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs.
+// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required.
+// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs.
fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(a.len != 0 and b.len != 0);
diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig
index d2fd8db9b7..8947475159 100644
--- a/lib/std/math/ldexp.zig
+++ b/lib/std/math/ldexp.zig
@@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0
// Result underflowed, we need to shift and round
- const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1));
+ const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1));
const exact_tie: bool = @ctz(repr) == shift - 1;
var result = repr & mantissa_mask;
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index c4ad708887..2f34745a64 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void {
/// Compares two slices of numbers lexicographically. O(n).
pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order {
- const n = math.min(lhs.len, rhs.len);
+ const n = @min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
switch (math.order(lhs[i], rhs[i])) {
@@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
/// Compares two slices and returns the index of the first inequality.
/// Returns null if the slices are equal.
pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize {
- const shortest = math.min(a.len, b.len);
+ const shortest = @min(a.len, b.len);
if (a.ptr == b.ptr)
return if (a.len == b.len) null else shortest;
var index: usize = 0;
@@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T {
assert(slice.len > 0);
var best = slice[0];
for (slice[1..]) |item| {
- best = math.min(best, item);
+ best = @min(best, item);
}
return best;
}
@@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T {
assert(slice.len > 0);
var best = slice[0];
for (slice[1..]) |item| {
- best = math.max(best, item);
+ best = @max(best, item);
}
return best;
}
@@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } {
var minVal = slice[0];
var maxVal = slice[0];
for (slice[1..]) |item| {
- minVal = math.min(minVal, item);
- maxVal = math.max(maxVal, item);
+ minVal = @min(minVal, item);
+ maxVal = @max(maxVal, item);
}
return .{ .min = minVal, .max = maxVal };
}
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 64b13ec544..dfd6fe4a9e 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
error.InvalidCharacter => continue,
};
if (mem.eql(u8, name, "ndots")) {
- rc.ndots = std.math.min(value, 15);
+ rc.ndots = @min(value, 15);
} else if (mem.eql(u8, name, "attempts")) {
- rc.attempts = std.math.min(value, 10);
+ rc.attempts = @min(value, 10);
} else if (mem.eql(u8, name, "timeout")) {
- rc.timeout = std.math.min(value, 60);
+ rc.timeout = @min(value, 60);
}
}
} else if (mem.eql(u8, token, "nameserver")) {
@@ -1615,7 +1615,7 @@ fn resMSendRc(
}
// Wait for a response, or until time to retry
- const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
+ const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
const nevents = os.poll(&pfd, clamped_timeout) catch 0;
if (nevents == 0) continue;
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index ef0ec94d3b..e4d6790505 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
.getdents,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
- std.math.min(len, maxInt(c_int)),
+ @min(len, maxInt(c_int)),
);
}
@@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize {
.getdents64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
- std.math.min(len, maxInt(c_int)),
+ @min(len, maxInt(c_int)),
);
}
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index b7467d765f..0610b214d5 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -277,7 +277,7 @@ pub const IO_Uring = struct {
fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 {
_ = wait_nr;
const ready = self.cq_ready();
- const count = std.math.min(cqes.len, ready);
+ const count = @min(cqes.len, ready);
var head = self.cq.head.*;
var tail = head +% count;
// TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop.
@@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct {
pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
- const size = std.math.max(
+ const size = @max(
p.sq_off.array + p.sq_entries * @sizeOf(u32),
p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe),
);
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index e559e48915..389c4bea12 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void {
const max_read_size: ULONG = maxInt(ULONG);
while (total_read < output.len) {
- const to_read: ULONG = math.min(buff.len, max_read_size);
+ const to_read: ULONG = @min(buff.len, max_read_size);
if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) {
return unexpectedError(kernel32.GetLastError());
@@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
return @as(usize, bytes_transferred);
} else {
while (true) {
- const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len));
+ const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len);
var amt_read: DWORD = undefined;
var overlapped_data: OVERLAPPED = undefined;
const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 5bc836b08e..180507ba71 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -1049,7 +1049,7 @@ const MsfStream = struct {
var size: usize = 0;
var rem_buffer = buffer;
while (size < buffer.len) {
- const size_to_read = math.min(self.block_size - offset, rem_buffer.len);
+ const size_to_read = @min(self.block_size - offset, rem_buffer.len);
size += try in.read(rem_buffer[0..size_to_read]);
rem_buffer = buffer[size..];
offset += size_to_read;
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index 1e9f4051e9..f07562c911 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -410,7 +410,7 @@ pub const Random = struct {
r.uintLessThan(T, sum)
else if (comptime std.meta.trait.isFloat(T))
// take care that imprecision doesn't lead to a value slightly greater than sum
- std.math.min(r.float(T) * sum, sum - std.math.floatEps(T))
+ @min(r.float(T) * sum, sum - std.math.floatEps(T))
else
@compileError("weightedIndex does not support proportions of type " ++ @typeName(T));
diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig
index 6c1be9c6c2..518d148a73 100644
--- a/lib/std/sort/block.zig
+++ b/lib/std/sort/block.zig
@@ -590,7 +590,7 @@ pub fn block(
// whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
var lastA = firstA;
var lastB = Range.init(0, 0);
- var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
+ var blockB = Range.init(B.start, B.start + @min(block_size, B.length()));
blockA.start += firstA.length();
indexA = buffer1.start;
@@ -849,7 +849,7 @@ fn findFirstForward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (lessThan(context, items[index - 1], value)) : (index += skip) {
@@ -871,7 +871,7 @@ fn findFirstBackward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
@@ -893,7 +893,7 @@ fn findLastForward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (!lessThan(context, value, items[index - 1])) : (index += skip) {
@@ -915,7 +915,7 @@ fn findLastBackward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 83fa68567f..3930c9714a 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1960,7 +1960,7 @@ fn renderArrayInit(
if (!this_contains_newline) {
const column = column_counter % row_size;
- column_widths[column] = std.math.max(column_widths[column], width);
+ column_widths[column] = @max(column_widths[column], width);
const expr_last_token = tree.lastToken(expr) + 1;
const next_expr = section_exprs[i + 1];
@@ -1980,7 +1980,7 @@ fn renderArrayInit(
if (!contains_newline) {
const column = column_counter % row_size;
- column_widths[column] = std.math.max(column_widths[column], width);
+ column_widths[column] = @max(column_widths[column], width);
}
}
}
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index f17356fdcd..cddaea2295 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version {
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
- const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len);
+ const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum);
@@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile(
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
- const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len);
+ const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
@@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile(
const rpoff_file = ds.offset + rpoff_usize;
const rp_max_size = ds.size - rpoff_usize;
- const strtab_len = std.math.min(rp_max_size, strtab_buf.len);
+ const strtab_len = @min(rp_max_size, strtab_buf.len);
const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len);
const strtab = strtab_buf[0..strtab_read_len];
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 055b8fd989..b34432fe26 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1494,8 +1494,6 @@ fn walkInstruction(
.frame_type,
.frame_size,
.ptr_to_int,
- .min,
- .max,
.bit_not,
// @check
.clz,
@@ -1546,6 +1544,8 @@ fn walkInstruction(
.offset_of,
.splat,
.reduce,
+ .min,
+ .max,
=> {
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index);
diff --git a/src/Sema.zig b/src/Sema.zig
index c32df9e12a..36fe5a6ee8 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -22367,9 +22367,9 @@ fn analyzeShuffle(
// to it up to the length of the longer vector. This recursion terminates
// in 1 call because these calls to analyzeShuffle guarantee a_len == b_len.
if (a_len != b_len) {
- const min_len = std.math.min(a_len, b_len);
+ const min_len = @min(a_len, b_len);
const max_src = if (a_len > b_len) a_src else b_src;
- const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len));
+ const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len));
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| {
@@ -22984,104 +22984,127 @@ fn analyzeMinMax(
else => @compileError("unreachable"),
};
- // First, find all comptime-known arguments, and get their min/max
+ // The set of runtime-known operands. Set up in the loop below.
var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len);
+ // The current minmax value - initially this will always be comptime-known, then we'll add
+ // runtime values into the mix later.
var cur_minmax: ?Air.Inst.Ref = null;
var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null
+ // The current known scalar bounds of the value.
+ var bounds_status: enum {
+ unknown, // We've only seen undef comptime_ints so far, so do not know the bounds.
+ defined, // We've seen only integers, so the bounds are defined.
+ non_integral, // There are floats in the mix, so the bounds aren't defined.
+ } = .unknown;
+ var cur_min_scalar: Value = undefined;
+ var cur_max_scalar: Value = undefined;
+
+ // First, find all comptime-known arguments, and get their min/max
+
for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| {
// Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call
// it in the runtime path anyway since the result type may have been refined
- const uncasted_operand_val = (try sema.resolveMaybeUndefVal(operand)) orelse continue;
- if (cur_minmax) |cur| {
- const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src);
- const cur_val = simd_op.lhs_val.?; // cur_minmax is comptime-known
- const operand_val = simd_op.rhs_val.?; // we checked the operand was resolvable above
-
- runtime_known.unset(operand_idx);
+ const unresolved_uncoerced_val = try sema.resolveMaybeUndefVal(operand) orelse continue;
+ const uncoerced_val = try sema.resolveLazyValue(unresolved_uncoerced_val);
+
+ runtime_known.unset(operand_idx);
+
+ switch (bounds_status) {
+ .unknown, .defined => refine_bounds: {
+ const ty = sema.typeOf(operand);
+ if (!ty.scalarType(mod).isInt(mod) and !ty.scalarType(mod).eql(Type.comptime_int, mod)) {
+ bounds_status = .non_integral;
+ break :refine_bounds;
+ }
+ const scalar_bounds: ?[2]Value = bounds: {
+ if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(mod);
+ var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(mod, 0), mod) orelse break :bounds null;
+ const len = try sema.usizeCast(block, src, ty.vectorLen(mod));
+ for (1..len) |i| {
+ const elem = try uncoerced_val.elemValue(mod, i);
+ const elem_bounds = try elem.intValueBounds(mod) orelse break :bounds null;
+ cur_bounds = .{
+ Value.numberMin(elem_bounds[0], cur_bounds[0], mod),
+ Value.numberMax(elem_bounds[1], cur_bounds[1], mod),
+ };
+ }
+ break :bounds cur_bounds;
+ };
+ if (scalar_bounds) |bounds| {
+ if (bounds_status == .unknown) {
+ cur_min_scalar = bounds[0];
+ cur_max_scalar = bounds[1];
+ bounds_status = .defined;
+ } else {
+ cur_min_scalar = opFunc(cur_min_scalar, bounds[0], mod);
+ cur_max_scalar = opFunc(cur_max_scalar, bounds[1], mod);
+ }
+ }
+ },
+ .non_integral => {},
+ }
- if (cur_val.isUndef(mod)) continue; // result is also undef
- if (operand_val.isUndef(mod)) {
- cur_minmax = try sema.addConstUndef(simd_op.result_ty);
- continue;
- }
+ const cur = cur_minmax orelse {
+ cur_minmax = operand;
+ cur_minmax_src = operand_src;
+ continue;
+ };
- const resolved_cur_val = try sema.resolveLazyValue(cur_val);
- const resolved_operand_val = try sema.resolveLazyValue(operand_val);
+ const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src);
+ const cur_val = try sema.resolveLazyValue(simd_op.lhs_val.?); // cur_minmax is comptime-known
+ const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above
- const vec_len = simd_op.len orelse {
- const result_val = opFunc(resolved_cur_val, resolved_operand_val, mod);
- cur_minmax = try sema.addConstant(simd_op.result_ty, result_val);
- continue;
- };
- const elems = try sema.arena.alloc(InternPool.Index, vec_len);
- for (elems, 0..) |*elem, i| {
- const lhs_elem_val = try resolved_cur_val.elemValue(mod, i);
- const rhs_elem_val = try resolved_operand_val.elemValue(mod, i);
- elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod);
- }
- cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{
- .ty = simd_op.result_ty.toIntern(),
- .storage = .{ .elems = elems },
- } })).toValue());
- } else {
- runtime_known.unset(operand_idx);
- cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val);
- cur_minmax_src = operand_src;
+ const vec_len = simd_op.len orelse {
+ const result_val = opFunc(cur_val, operand_val, mod);
+ cur_minmax = try sema.addConstant(simd_op.result_ty, result_val);
+ continue;
+ };
+ const elems = try sema.arena.alloc(InternPool.Index, vec_len);
+ for (elems, 0..) |*elem, i| {
+ const lhs_elem_val = try cur_val.elemValue(mod, i);
+ const rhs_elem_val = try operand_val.elemValue(mod, i);
+ const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, mod);
+ elem.* = (try mod.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
}
+ cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{
+ .ty = simd_op.result_ty.toIntern(),
+ .storage = .{ .elems = elems },
+ } })).toValue());
}
const opt_runtime_idx = runtime_known.findFirstSet();
- const comptime_refined_ty: ?Type = if (cur_minmax) |ct_minmax_ref| refined: {
- // Refine the comptime-known result type based on the operation
+ if (cur_minmax) |ct_minmax_ref| refine: {
+ // Refine the comptime-known result type based on the bounds. This isn't strictly necessary
+ // in the runtime case, since we'll refine the type again later, but keeping things as small
+ // as possible will allow us to emit more optimal AIR (if all the runtime operands have
+ // smaller types than the non-refined comptime type).
+
const val = (try sema.resolveMaybeUndefVal(ct_minmax_ref)).?;
const orig_ty = sema.typeOf(ct_minmax_ref);
- if (opt_runtime_idx == null and orig_ty.eql(Type.comptime_int, mod)) {
+ if (opt_runtime_idx == null and orig_ty.scalarType(mod).eql(Type.comptime_int, mod)) {
// If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type
- break :refined orig_ty;
+ break :refine;
}
- const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: {
- const elem_ty = orig_ty.childType(mod);
- const len = orig_ty.vectorLen(mod);
-
- if (len == 0) break :blk orig_ty;
- if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
+ // We can't refine float types
+ if (orig_ty.scalarType(mod).isAnyFloat()) break :refine;
- var cur_min: Value = try val.elemValue(mod, 0);
- var cur_max: Value = cur_min;
- for (1..len) |idx| {
- const elem_val = try val.elemValue(mod, idx);
- if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef
- if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val;
- if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val;
- }
+ assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg
- const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max);
- break :blk try mod.vectorType(.{
- .len = len,
- .child = refined_elem_ty.toIntern(),
- });
- } else blk: {
- if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
- if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef
- break :blk try mod.intFittingRange(val, val);
- };
+ const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
+ const refined_ty = if (orig_ty.isVector(mod)) try mod.vectorType(.{
+ .len = orig_ty.vectorLen(mod),
+ .child = refined_scalar_ty.toIntern(),
+ }) else refined_scalar_ty;
- // Apply the refined type to the current value - this isn't strictly necessary in the
- // runtime case since we'll refine again afterwards, but keeping things as small as possible
- // will allow us to emit more optimal AIR (if all the runtime operands have smaller types
- // than the non-refined comptime type).
- if (!refined_ty.eql(orig_ty, mod)) {
- if (std.debug.runtime_safety) {
- assert(try sema.intFitsInType(val, refined_ty, null));
- }
- cur_minmax = try sema.coerceInMemory(val, refined_ty);
+ // Apply the refined type to the current value
+ if (std.debug.runtime_safety) {
+ assert(try sema.intFitsInType(val, refined_ty, null));
}
-
- break :refined refined_ty;
- } else null;
+ cur_minmax = try sema.coerceInMemory(val, refined_ty);
+ }
const runtime_idx = opt_runtime_idx orelse return cur_minmax.?;
const runtime_src = operand_srcs[runtime_idx];
@@ -23102,6 +23125,11 @@ fn analyzeMinMax(
cur_minmax = operands[0];
cur_minmax_src = runtime_src;
runtime_known.unset(0); // don't look at this operand in the loop below
+ const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod);
+ if (scalar_ty.isInt(mod)) {
+ cur_min_scalar = try scalar_ty.minInt(mod, scalar_ty);
+ cur_max_scalar = try scalar_ty.maxInt(mod, scalar_ty);
+ }
}
var it = runtime_known.iterator(.{});
@@ -23112,49 +23140,49 @@ fn analyzeMinMax(
const rhs_src = operand_srcs[idx];
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
if (known_undef) {
- cur_minmax = try sema.addConstant(simd_op.result_ty, Value.undef);
+ cur_minmax = try sema.addConstUndef(simd_op.result_ty);
} else {
cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
}
+ // Compute the bounds of this type
+ switch (bounds_status) {
+ .unknown, .defined => refine_bounds: {
+ const scalar_ty = sema.typeOf(rhs).scalarType(mod);
+ if (scalar_ty.isAnyFloat()) {
+ bounds_status = .non_integral;
+ break :refine_bounds;
+ }
+ const scalar_min = try scalar_ty.minInt(mod, scalar_ty);
+ const scalar_max = try scalar_ty.maxInt(mod, scalar_ty);
+ if (bounds_status == .unknown) {
+ cur_min_scalar = scalar_min;
+ cur_max_scalar = scalar_max;
+ bounds_status = .defined;
+ } else {
+ cur_min_scalar = opFunc(cur_min_scalar, scalar_min, mod);
+ cur_max_scalar = opFunc(cur_max_scalar, scalar_max, mod);
+ }
+ },
+ .non_integral => {},
+ }
}
- if (comptime_refined_ty) |comptime_ty| refine: {
- // Finally, refine the type based on the comptime-known bound.
- if (known_undef) break :refine; // can't refine undef
- const unrefined_ty = sema.typeOf(cur_minmax.?);
- const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector;
- const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty;
- const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty;
-
- if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats
-
- // Compute the final bounds based on the runtime type and the comptime-known bound type
- const min_val = switch (air_tag) {
- .min => try unrefined_elem_ty.minInt(mod, unrefined_elem_ty),
- .max => try comptime_elem_ty.minInt(mod, comptime_elem_ty), // @max(ct, rt) >= ct
- else => unreachable,
- };
- const max_val = switch (air_tag) {
- .min => try comptime_elem_ty.maxInt(mod, comptime_elem_ty), // @min(ct, rt) <= ct
- .max => try unrefined_elem_ty.maxInt(mod, unrefined_elem_ty),
- else => unreachable,
- };
-
- // Find the smallest type which can contain these bounds
- const final_elem_ty = try mod.intFittingRange(min_val, max_val);
-
- const final_ty = if (is_vector)
- try mod.vectorType(.{
- .len = unrefined_ty.vectorLen(mod),
- .child = final_elem_ty.toIntern(),
- })
- else
- final_elem_ty;
+ // Finally, refine the type based on the known bounds.
+ const unrefined_ty = sema.typeOf(cur_minmax.?);
+ if (unrefined_ty.scalarType(mod).isAnyFloat()) {
+ // We can't refine floats, so we're done.
+ return cur_minmax.?;
+ }
+ assert(bounds_status == .defined); // there were integral runtime operands
+ const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
+ const refined_ty = if (unrefined_ty.isVector(mod)) try mod.vectorType(.{
+ .len = unrefined_ty.vectorLen(mod),
+ .child = refined_scalar_ty.toIntern(),
+ }) else refined_scalar_ty;
- if (!final_ty.eql(unrefined_ty, mod)) {
- // We've reduced the type - cast the result down
- return block.addTyOp(.intcast, final_ty, cur_minmax.?);
- }
+ if (!refined_ty.eql(unrefined_ty, mod)) {
+ // We've reduced the type - cast the result down
+ return block.addTyOp(.intcast, refined_ty, cur_minmax.?);
}
return cur_minmax.?;
@@ -31273,7 +31301,7 @@ fn cmpNumeric(
}
const dest_ty = if (dest_float_type) |ft| ft else blk: {
- const max_bits = std.math.max(lhs_bits, rhs_bits);
+ const max_bits = @max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
break :blk try mod.intType(signedness, casted_bits);
@@ -35800,7 +35828,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.add(lhs_bigint, rhs_bigint);
@@ -35890,7 +35918,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.sub(lhs_bigint, rhs_bigint);
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 9d3fb67d1f..93454710dc 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -111,7 +111,7 @@ pub fn print(
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen(mod);
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, mod);
@@ -130,7 +130,7 @@ pub fn print(
const len = payload.len.toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
- const max_len = @intCast(usize, std.math.min(len, max_string_len));
+ const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
@@ -149,7 +149,7 @@ pub fn print(
try writer.writeAll(".{ ");
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
@@ -455,7 +455,7 @@ fn printAggregate(
const len = ty.arrayLen(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
- const max_len = @intCast(usize, std.math.min(len, max_string_len));
+ const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
@@ -471,7 +471,7 @@ fn printAggregate(
try writer.writeAll(".{ ");
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index f1669256c8..6e13a55008 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2912,7 +2912,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const dst_info = dst_ty.intInfo(mod);
const src_ty = try mod.intType(dst_info.signedness, switch (tag) {
else => unreachable,
- .mul, .mulwrap => math.max3(
+ .mul, .mulwrap => @max(
self.activeIntBits(bin_op.lhs),
self.activeIntBits(bin_op.rhs),
dst_info.bits / 2,
@@ -3354,7 +3354,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs_active_bits = self.activeIntBits(bin_op.lhs);
const rhs_active_bits = self.activeIntBits(bin_op.rhs);
- const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
+ const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
const src_ty = try mod.intType(dst_info.signedness, src_bits);
const lhs = try self.resolveInst(bin_op.lhs);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 273bfc7c9b..15ba9ebecc 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2327,7 +2327,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
self.debug_aranges_section_dirty = true;
}
}
- shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
+ shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 59b3e50b07..4709560ba7 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -99,7 +99,7 @@ const CodeDirectory = struct {
fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void {
assert(index > 0);
- self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index);
+ self.inner.nSpecialSlots = @max(self.inner.nSpecialSlots, index);
self.special_slots[index - 1] = hash;
}
@@ -426,11 +426,11 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
var n_special_slots: u32 = 0;
if (self.requirements) |req| {
ssize += @sizeOf(macho.BlobIndex) + req.size();
- n_special_slots = std.math.max(n_special_slots, req.slotType());
+ n_special_slots = @max(n_special_slots, req.slotType());
}
if (self.entitlements) |ent| {
ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size;
- n_special_slots = std.math.max(n_special_slots, ent.slotType());
+ n_special_slots = @max(n_special_slots, ent.slotType());
}
if (self.signature) |sig| {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index b218fdbd2d..105a806075 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -530,7 +530,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
sect.addr + sect.size - addr;
const atom_align = if (addr > 0)
- math.min(@ctz(addr), sect.@"align")
+ @min(@ctz(addr), sect.@"align")
else
sect.@"align";
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index fdac7dfa63..5126033995 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2027,7 +2027,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
};
const segment: *Segment = &wasm.segments.items[final_index];
- segment.alignment = std.math.max(segment.alignment, atom.alignment);
+ segment.alignment = @max(segment.alignment, atom.alignment);
try wasm.appendAtomAtIndex(final_index, atom_index);
}
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 363648971a..33f54dece5 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -979,7 +979,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
- segment.alignment = std.math.max(segment.alignment, atom.alignment);
+ segment.alignment = @max(segment.alignment, atom.alignment);
}
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
diff --git a/src/main.zig b/src/main.zig
index 5d666840c0..b245b357ca 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4000,8 +4000,8 @@ pub const usage_libc =
\\ Parse a libc installation text file and validate it.
\\
\\Options:
- \\ -h, --help Print this help and exit
- \\ -target [name] <arch><sub>-<os>-<abi> see the targets command
+ \\ -h, --help Print this help and exit
+ \\ -target [name] <arch><sub>-<os>-<abi> see the targets command
\\
;
@@ -4068,7 +4068,7 @@ pub const usage_init =
\\ directory.
\\
\\Options:
- \\ -h, --help Print this help and exit
+ \\ -h, --help Print this help and exit
\\
\\
;
@@ -4166,16 +4166,18 @@ pub const usage_build =
\\ Build a project from build.zig.
\\
\\Options:
- \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
- \\ -fno-reference-trace Disable reference trace
- \\ -fsummary Print the build summary, even on success
- \\ -fno-summary Omit the build summary, even on failure
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to local Zig cache directory
- \\ --global-cache-dir [path] Override path to global Zig cache directory
- \\ --zig-lib-dir [arg] Override path to Zig lib directory
- \\ --build-runner [file] Override path to build runner
- \\ -h, --help Print this help and exit
+ \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
+ \\ -fno-reference-trace Disable reference trace
+ \\ --summary [mode] Control the printing of the build summary
+ \\ all Print the build summary in its entirety
+ \\ failures (Default) Only print failed steps
+ \\ none Do not print the build summary
+ \\ --build-file [file] Override path to build.zig
+ \\ --cache-dir [path] Override path to local Zig cache directory
+ \\ --global-cache-dir [path] Override path to global Zig cache directory
+ \\ --zig-lib-dir [arg] Override path to Zig lib directory
+ \\ --build-runner [file] Override path to build runner
+ \\ -h, --help Print this help and exit
\\
;
@@ -4576,13 +4578,13 @@ pub const usage_fmt =
\\ recursively.
\\
\\Options:
- \\ -h, --help Print this help and exit
- \\ --color [auto|off|on] Enable or disable colored error messages
- \\ --stdin Format code from stdin; output to stdout
- \\ --check List non-conforming files and exit with an error
- \\ if the list is non-empty
- \\ --ast-check Run zig ast-check on every file
- \\ --exclude [file] Exclude file or directory from formatting
+ \\ -h, --help Print this help and exit
+ \\ --color [auto|off|on] Enable or disable colored error messages
+ \\ --stdin Format code from stdin; output to stdout
+ \\ --check List non-conforming files and exit with an error
+ \\ if the list is non-empty
+ \\ --ast-check Run zig ast-check on every file
+ \\ --exclude [file] Exclude file or directory from formatting
\\
\\
;
@@ -5391,7 +5393,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void {
// setrlimit() now returns with errno set to EINVAL in places that historically succeeded.
// It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE.
// Use "rlim_cur = min(OPEN_MAX, rlim_max)".
- lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max);
+ lim.max = @min(std.os.darwin.OPEN_MAX, lim.max);
}
if (lim.cur == lim.max) return;
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 8cc2d1856c..67176ff74b 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -2400,7 +2400,7 @@ fn transStringLiteralInitializer(
if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type);
- const num_inits = math.min(str_length, array_size);
+ const num_inits = @min(str_length, array_size);
const init_node = if (num_inits > 0) blk: {
if (is_narrow) {
// "string literal".* or string literal"[0..num_inits].*
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 6c6bbf28bd..443c56a84a 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -1824,7 +1824,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.switch_prong => {
const payload = node.castTag(.switch_prong).?.data;
- var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1));
+ var items = try c.gpa.alloc(NodeIndex, @max(payload.cases.len, 1));
defer c.gpa.free(items);
items[0] = 0;
for (payload.cases, 0..) |item, i| {
@@ -1973,7 +1973,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const payload = node.castTag(.tuple).?.data;
_ = try c.addToken(.period, ".");
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2));
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
@@ -2007,7 +2007,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const payload = node.castTag(.container_init_dot).?.data;
_ = try c.addToken(.period, ".");
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2));
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
@@ -2046,7 +2046,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const lhs = try renderNode(c, payload.lhs);
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.inits.len, 1));
defer c.gpa.free(inits);
inits[0] = 0;
for (payload.inits, 0..) |init, i| {
@@ -2102,7 +2102,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
const num_vars = payload.variables.len;
const num_funcs = payload.functions.len;
const total_members = payload.fields.len + num_vars + num_funcs;
- const members = try c.gpa.alloc(NodeIndex, std.math.max(total_members, 2));
+ const members = try c.gpa.alloc(NodeIndex, @max(total_members, 2));
defer c.gpa.free(members);
members[0] = 0;
members[1] = 0;
@@ -2195,7 +2195,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI
fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex {
const l_brace = try c.addToken(.l_brace, "{");
- var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1));
+ var rendered = try c.gpa.alloc(NodeIndex, @max(inits.len, 1));
defer c.gpa.free(rendered);
rendered[0] = 0;
for (inits, 0..) |init, i| {
@@ -2904,7 +2904,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex {
fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) {
_ = try c.addToken(.l_paren, "(");
- var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
+ var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, @max(params.len, 1));
errdefer rendered.deinit();
for (params, 0..) |param, i| {
diff --git a/src/type.zig b/src/type.zig
index 22523a7141..bb82a50682 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -1633,7 +1633,7 @@ pub const Type = struct {
const len = array_type.len + @boolToInt(array_type.sentinel != .none);
if (len == 0) return 0;
const elem_ty = array_type.child.toType();
- const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
+ const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
if (elem_size == 0) return 0;
const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
return (len - 1) * 8 * elem_size + elem_bit_size;
diff --git a/src/value.zig b/src/value.zig
index d3f15121b8..8590aa8872 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -2458,7 +2458,7 @@ pub const Value = struct {
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(
+ @max(
// For the saturate
std.math.big.int.calcTwosCompLimbCount(info.bits),
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2572,7 +2572,7 @@ pub const Value = struct {
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
@@ -2638,7 +2638,7 @@ pub const Value = struct {
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
@@ -2677,7 +2677,7 @@ pub const Value = struct {
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
@@ -4146,6 +4146,20 @@ pub const Value = struct {
return val.toIntern() == .generic_poison;
}
+ /// For an integer (comptime or fixed-width) `val`, returns the comptime-known bounds of the value.
+ /// If `val` is not undef, the bounds are both `val`.
+ /// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type.
+ /// If `val` is undef and is a `comptime_int`, returns null.
+ pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value {
+ if (!val.isUndef(mod)) return .{ val, val };
+ const ty = mod.intern_pool.typeOf(val.toIntern());
+ if (ty == .comptime_int_type) return null;
+ return .{
+ try ty.toType().minInt(mod, ty.toType()),
+ try ty.toType().maxInt(mod, ty.toType()),
+ };
+ }
+
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
diff --git a/stage1/zig.h b/stage1/zig.h
index 34407d8cda..f0c6f4178b 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -487,14 +487,14 @@ typedef ptrdiff_t intptr_t;
zig_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \
- return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \
+ return lhs / rhs + (lhs % rhs != INT##w##_C(0) ? zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \
} \
\
zig_basic_operator(uint##w##_t, mod_u##w, %) \
\
static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \
int##w##_t rem = lhs % rhs; \
- return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \
+ return rem + (rem != INT##w##_C(0) ? rhs & zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \
} \
\
static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
@@ -1078,7 +1078,7 @@ static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) {
uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \
temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \
temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \
- return temp * (UINT##w##_MAX / 255) >> (w - 8); \
+ return temp * (UINT##w##_MAX / 255) >> (UINT8_C(w) - UINT8_C(8)); \
} \
\
zig_builtin_popcount_common(w)
@@ -1298,15 +1298,6 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
return lhs % rhs;
}
-static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0));
-}
-
-static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
- zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0));
-}
-
#else /* zig_has_int128 */
static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
@@ -1394,20 +1385,26 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
return __modti3(lhs, rhs);
}
-static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
- zig_i128 rem = zig_rem_i128(lhs, rhs);
- return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0));
-}
+#endif /* zig_has_int128 */
+
+#define zig_div_floor_u128 zig_div_trunc_u128
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0)));
+ zig_i128 rem = zig_rem_i128(lhs, rhs);
+ int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0)
+ ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0);
+ return zig_add_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(mask, (uint64_t)mask));
}
-#endif /* zig_has_int128 */
-
-#define zig_div_floor_u128 zig_div_trunc_u128
#define zig_mod_u128 zig_rem_u128
+static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
+ zig_i128 rem = zig_rem_i128(lhs, rhs);
+ int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0)
+ ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0);
+ return zig_add_i128(rem, zig_and_i128(rhs, zig_make_i128(mask, (uint64_t)mask)));
+}
+
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index 600e273ec1..96feefddde 100644
--- a/stage1/zig1.wasm
+++ b/stage1/zig1.wasm
Binary files differ
diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig
index 7d382e961f..8d1153638d 100644
--- a/test/behavior/maximum_minimum.zig
+++ b/test/behavior/maximum_minimum.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
+const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
@@ -210,3 +211,87 @@ test "@min/@max on comptime_int" {
try expectEqual(-2, min);
try expectEqual(2, max);
}
+
+test "@min/@max notices bounds from types" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+
+ var x: u16 = 123;
+ var y: u32 = 456;
+ var z: u8 = 10;
+
+ const min = @min(x, y, z);
+ const max = @max(x, y, z);
+
+ comptime assert(@TypeOf(min) == u8);
+ comptime assert(@TypeOf(max) == u32);
+
+ try expectEqual(z, min);
+ try expectEqual(y, max);
+}
+
+test "@min/@max notices bounds from vector types" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+
+ var x: @Vector(2, u16) = .{ 30, 67 };
+ var y: @Vector(2, u32) = .{ 20, 500 };
+ var z: @Vector(2, u8) = .{ 60, 15 };
+
+ const min = @min(x, y, z);
+ const max = @max(x, y, z);
+
+ comptime assert(@TypeOf(min) == @Vector(2, u8));
+ comptime assert(@TypeOf(max) == @Vector(2, u32));
+
+ try expectEqual(@Vector(2, u8){ 20, 15 }, min);
+ try expectEqual(@Vector(2, u32){ 60, 500 }, max);
+}
+
+test "@min/@max notices bounds from types when comptime-known value is undef" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+
+ var x: u32 = 1_000_000;
+ const y: u16 = undefined;
+ // y is comptime-known, but is undef, so bounds cannot be refined using its value
+
+ const min = @min(x, y);
+ const max = @max(x, y);
+
+ comptime assert(@TypeOf(min) == u16);
+ comptime assert(@TypeOf(max) == u32);
+
+ // Cannot assert values as one was undefined
+}
+
+test "@min/@max notices bounds from vector types when element of comptime-known vector is undef" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+
+ var x: @Vector(2, u32) = .{ 1_000_000, 12345 };
+ const y: @Vector(2, u16) = .{ 10, undefined };
+ // y is comptime-known, but an element is undef, so bounds cannot be refined using its value
+
+ const min = @min(x, y);
+ const max = @max(x, y);
+
+ comptime assert(@TypeOf(min) == @Vector(2, u16));
+ comptime assert(@TypeOf(max) == @Vector(2, u32));
+
+ try expectEqual(@as(u16, 10), min[0]);
+ try expectEqual(@as(u32, 1_000_000), max[0]);
+ // Cannot assert values at index 1 as one was undefined
+}