From 94e30a756edc4c2182168dabd97d481b8aec0ff2 Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Sun, 30 Apr 2023 18:02:08 +0100 Subject: std: fix a bunch of typos The majority of these are in comments, some in doc comments which might affect the generated documentation, and a few in parameter names - nothing that should be breaking, however. --- lib/std/os/linux/bpf/btf.zig | 6 +++--- lib/std/os/linux/seccomp.zig | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/std/os/linux') diff --git a/lib/std/os/linux/bpf/btf.zig b/lib/std/os/linux/bpf/btf.zig index 7b85a618b3..39d25014da 100644 --- a/lib/std/os/linux/bpf/btf.zig +++ b/lib/std/os/linux/bpf/btf.zig @@ -109,7 +109,7 @@ pub const Enum64 = extern struct { val_hi32: i32, }; -/// array kind is followd by this struct +/// array kind is followed by this struct pub const Array = extern struct { typ: u32, index_type: u32, @@ -149,13 +149,13 @@ pub const FuncLinkage = enum { external, }; -/// var kind is followd by a single Var struct to describe additional +/// var kind is followed by a single Var struct to describe additional /// information related to the variable such as its linkage pub const Var = extern struct { linkage: u32, }; -/// datasec kind is followed by multible VarSecInfo to describe all Var kind +/// datasec kind is followed by multiple VarSecInfo to describe all Var kind /// types it contains along with it's in-section offset as well as size. pub const VarSecInfo = extern struct { typ: u32, diff --git a/lib/std/os/linux/seccomp.zig b/lib/std/os/linux/seccomp.zig index 23dbb6ee38..f10cb84aa0 100644 --- a/lib/std/os/linux/seccomp.zig +++ b/lib/std/os/linux/seccomp.zig @@ -65,7 +65,7 @@ //! //! Unfortunately, there is no easy solution for issue 5. The most reliable //! strategy is to keep testing; test newer Zig versions, different libcs, -//! different distros, and design your filter to accomidate all of them. +//! different distros, and design your filter to accommodate all of them. //! Alternatively, you could inject a filter at runtime. Since filters are //! preserved across execve(2), a filter could be setup before executing your //! program, without your program having any knowledge of this happening. This -- cgit v1.2.3 From 855493bb8b395970921494d3a11ccfeaac30c2dc Mon Sep 17 00:00:00 2001 From: Tw Date: Fri, 28 Apr 2023 16:06:56 +0800 Subject: bpf: correct return type of ringbuf_output helper Signed-off-by: Tw --- lib/std/os/linux/bpf/helpers.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/std/os/linux') diff --git a/lib/std/os/linux/bpf/helpers.zig b/lib/std/os/linux/bpf/helpers.zig index dafd4de69f..6084b01e6c 100644 --- a/lib/std/os/linux/bpf/helpers.zig +++ b/lib/std/os/linux/bpf/helpers.zig @@ -143,7 +143,7 @@ pub const seq_printf = @intToPtr(*const fn (m: *kern.SeqFile, fmt: ?*const u8, f pub const seq_write = @intToPtr(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127); pub const sk_cgroup_id = @intToPtr(*const fn (sk: *kern.BpfSock) u64, 128); pub const sk_ancestor_cgroup_id = @intToPtr(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129); -pub const ringbuf_output = @intToPtr(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 130); +pub const ringbuf_output = @intToPtr(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130); pub const ringbuf_reserve = @intToPtr(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131); pub const ringbuf_submit = @intToPtr(*const fn (data: ?*anyopaque, flags: u64) void, 132); pub const ringbuf_discard = @intToPtr(*const fn (data: ?*anyopaque, flags: u64) void, 133); -- cgit v1.2.3 From f0fdaf32d3b802e9db16a0753d9ff49a8667089b Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 10 May 2023 16:26:54 +0300 Subject: fix incorrect use of mutable pointers to temporary values --- lib/std/os/linux/x86.zig | 2 +- src/Module.zig | 2 +- src/link/MachO/Atom.zig | 2 +- test/behavior/array.zig | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/std/os/linux') diff --git a/lib/std/os/linux/x86.zig b/lib/std/os/linux/x86.zig index 2e67fa6b5b..c9274e11ee 100644 --- a/lib/std/os/linux/x86.zig +++ b/lib/std/os/linux/x86.zig @@ -108,7 +108,7 @@ pub fn syscall6( ); } -pub fn socketcall(call: usize, args: [*]usize) usize { +pub fn socketcall(call: usize, args: [*]const usize) usize { return asm volatile ("int $0x80" : [ret] "={eax}" (-> usize), : [number] "{eax}" (@enumToInt(SYS.socketcall)), diff --git a/src/Module.zig b/src/Module.zig index b0c18def78..6d1a5acb09 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6098,7 +6098,7 @@ pub const PeerTypeCandidateSrc = union(enum) { none: void, /// When we want to know the the src of candidate i, look up at /// index i in this slice - override: []?LazySrcLoc, + override: []const ?LazySrcLoc, /// resolvePeerTypes originates from a @TypeOf(...) call typeof_builtin_call_node_offset: i32, diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index fb05595b7d..970371e455 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -116,7 +116,7 @@ pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) ! return addRelocations(macho_file, atom_index, &[_]Relocation{reloc}); } -pub fn addRelocations(macho_file: *MachO, atom_index: Index, relocs: []Relocation) !void { +pub fn addRelocations(macho_file: *MachO, atom_index: Index, relocs: []const Relocation) !void { const gpa = macho_file.base.allocator; const gop = try macho_file.relocs.getOrPut(gpa, atom_index); if (!gop.found_existing) { diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 6cf0ab9e1d..a54aaa898e 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -667,7 +667,7 @@ test "array init of container level array variable" { test "runtime initialized sentinel-terminated array literal" { var c: u16 = 300; const f = &[_:0x9999]u16{c}; - const g = @ptrCast(*[4]u8, f); + const g = @ptrCast(*const [4]u8, f); try std.testing.expect(g[2] == 0x99); try std.testing.expect(g[3] == 0x99); } -- cgit v1.2.3 From 4f914c8414c5220de145b141103ea71e01b23bda Mon Sep 17 00:00:00 2001 From: bfredl Date: Mon, 5 Jun 2023 15:56:25 +0200 Subject: bpf: expose "syscall" program type and F_SLEEPABLE flag --- lib/std/os/linux/bpf.zig | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'lib/std/os/linux') diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig index 63b669acd5..9f9e253547 100644 --- a/lib/std/os/linux/bpf.zig +++ b/lib/std/os/linux/bpf.zig @@ -167,6 +167,13 @@ pub const F_ANY_ALIGNMENT = 0x2; /// will regress tests to expose bugs. pub const F_TEST_RND_HI32 = 0x4; +/// If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will +/// restrict map and helper usage for such programs. Sleepable BPF programs can +/// only be attached to hooks where kernel execution context allows sleeping. +/// Such programs are allowed to use helpers that may sleep like +/// bpf_copy_from_user(). +pub const F_SLEEPABLE = 0x10; + /// When BPF ldimm64's insn[0].src_reg != 0 then this can have two extensions: /// insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE /// insn[0].imm: map fd map fd @@ -1134,6 +1141,10 @@ pub const ProgType = enum(u32) { /// context type: bpf_sk_lookup sk_lookup, + + /// context type: void * + syscall, + _, }; @@ -1649,6 +1660,7 @@ pub fn prog_load( log: ?*Log, license: []const u8, kern_version: u32, + flags: u32, ) !fd_t { var attr = Attr{ .prog_load = std.mem.zeroes(ProgLoadAttr), @@ -1659,6 +1671,7 @@ pub fn prog_load( attr.prog_load.insn_cnt = @intCast(u32, insns.len); attr.prog_load.license = @ptrToInt(license.ptr); attr.prog_load.kern_version = kern_version; + attr.prog_load.prog_flags = flags; if (log) |l| { attr.prog_load.log_buf = @ptrToInt(l.buf.ptr); @@ -1688,8 +1701,8 @@ test "prog_load" { Insn.exit(), }; - const prog = try prog_load(.socket_filter, &good_prog, null, "MIT", 0); + const prog = try prog_load(.socket_filter, &good_prog, null, "MIT", 0, 0); defer std.os.close(prog); - try expectError(error.UnsafeProgram, prog_load(.socket_filter, &bad_prog, null, "MIT", 0)); + try expectError(error.UnsafeProgram, prog_load(.socket_filter, &bad_prog, null, "MIT", 0, 0)); } -- cgit v1.2.3 From 259315606827620daaabf82b479e59ee710097cd Mon Sep 17 00:00:00 2001 From: r00ster91 Date: Fri, 2 Jun 2023 22:02:45 -0400 Subject: migration: std.math.{min, min3, max, max3} -> `@min` & `@max` --- doc/docgen.zig | 2 +- lib/compiler_rt/divc3.zig | 3 +- lib/compiler_rt/emutls.zig | 4 +- lib/std/Build/Cache/DepTokenizer.zig | 2 +- lib/std/Thread.zig | 6 +- lib/std/Uri.zig | 4 +- lib/std/array_hash_map.zig | 6 +- lib/std/ascii.zig | 2 +- lib/std/compress/lzma/decode.zig | 2 +- lib/std/crypto/blake3.zig | 8 +-- lib/std/crypto/ff.zig | 2 +- lib/std/crypto/ghash_polyval.zig | 2 +- lib/std/crypto/keccak_p.zig | 4 +- lib/std/crypto/poly1305.zig | 2 +- lib/std/crypto/salsa20.zig | 2 +- lib/std/crypto/scrypt.zig | 4 +- lib/std/crypto/sha3.zig | 2 +- lib/std/crypto/siphash.zig | 2 +- lib/std/debug.zig | 4 +- lib/std/dynamic_library.zig | 3 +- lib/std/event/loop.zig | 2 +- lib/std/fifo.zig | 2 +- lib/std/fmt.zig | 18 +++--- lib/std/hash/wyhash.zig | 2 +- lib/std/hash_map.zig | 6 +- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/memory_pool.zig | 4 +- lib/std/http/protocol.zig | 2 +- lib/std/io/fixed_buffer_stream.zig | 4 +- lib/std/io/limited_reader.zig | 2 +- lib/std/io/reader.zig | 2 +- lib/std/io/writer.zig | 2 +- lib/std/math.zig | 103 +++----------------------------- lib/std/math/big/int.zig | 96 ++++++++++++++--------------- lib/std/math/ldexp.zig | 2 +- lib/std/mem.zig | 12 ++-- lib/std/net.zig | 8 +-- lib/std/os/linux.zig | 4 +- lib/std/os/linux/io_uring.zig | 4 +- lib/std/os/windows.zig | 4 +- lib/std/pdb.zig | 2 +- lib/std/rand.zig | 2 +- lib/std/sort/block.zig | 10 ++-- lib/std/zig/render.zig | 4 +- lib/std/zig/system/NativeTargetInfo.zig | 6 +- src/Sema.zig | 10 ++-- src/TypedValue.zig | 10 ++-- src/arch/x86_64/CodeGen.zig | 4 +- src/link/Elf.zig | 2 +- src/link/MachO/CodeSignature.zig | 6 +- src/link/MachO/Object.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/Wasm/Object.zig | 2 +- src/main.zig | 2 +- src/translate_c.zig | 2 +- src/translate_c/ast.zig | 14 ++--- src/type.zig | 2 +- src/value.zig | 8 +-- 58 files changed, 173 insertions(+), 264 deletions(-) (limited to 'lib/std/os/linux') diff --git a/doc/docgen.zig b/doc/docgen.zig index bdbde6f5d2..4a9e33fbdd 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -276,7 +276,7 @@ fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, arg } } { - const caret_count = std.math.min(token.end, loc.line_end) - token.start; + const caret_count = @min(token.end, loc.line_end) - token.start; var i: usize = 0; while (i < caret_count) : (i += 1) { print("~", .{}); diff --git a/lib/compiler_rt/divc3.zig b/lib/compiler_rt/divc3.zig index 4e4dba2856..c4241c1483 100644 --- a/lib/compiler_rt/divc3.zig +++ b/lib/compiler_rt/divc3.zig @@ -3,7 +3,6 @@ const isNan = std.math.isNan; const isInf = std.math.isInf; const scalbn = std.math.scalbn; const ilogb = std.math.ilogb; -const max = std.math.max; const fabs = std.math.fabs; const maxInt = std.math.maxInt; const minInt = std.math.minInt; @@ -17,7 +16,7 @@ pub inline fn divc3(comptime T: type, a: T, b: T, c_in: T, d_in: T) Complex(T) { var d = d_in; // logbw used to prevent under/over-flow - const logbw = ilogb(max(fabs(c), fabs(d))); + const logbw = ilogb(@max(fabs(c), fabs(d))); const logbw_finite = logbw != maxInt(i32) and logbw != minInt(i32); const ilogbw = if (logbw_finite) b: { c = scalbn(c, -logbw); diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig index 05a2de97a8..47c71efadd 100644 --- a/lib/compiler_rt/emutls.zig +++ b/lib/compiler_rt/emutls.zig @@ -49,7 +49,7 @@ const simple_allocator = struct { /// Allocate a memory chunk. pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 { - const minimal_alignment = std.math.max(@alignOf(usize), alignment); + const minimal_alignment = @max(@alignOf(usize), alignment); var aligned_ptr: ?*anyopaque = undefined; if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) { @@ -170,7 +170,7 @@ const current_thread_storage = struct { // make it to contains at least 16 objects (to avoid too much // reallocation at startup). - const size = std.math.max(16, index); + const size = @max(16, index); // create a new array and store it. var array: *ObjectArray = ObjectArray.init(size); diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig index 1a4e2ddb74..0e5224edc0 100644 --- a/lib/std/Build/Cache/DepTokenizer.zig +++ b/lib/std/Build/Cache/DepTokenizer.zig @@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.writeAll(":"); try out.writeAll(" "); - var end1 = std.math.min(offset + n, offset + 8); + var end1 = @min(offset + n, offset + 8); for (bytes[offset..end1]) |b| { try out.writeAll(" "); try printHexValue(out, b, 2); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index ed6a9383e3..76650a9072 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -541,7 +541,7 @@ const WindowsThreadImpl = struct { // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32); - stack_size = std.math.max(64 * 1024, stack_size); + stack_size = @max(64 * 1024, stack_size); instance.thread.thread_handle = windows.kernel32.CreateThread( null, @@ -690,7 +690,7 @@ const PosixThreadImpl = struct { defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS); // Use the same set of parameters used by the libc-less impl. - const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN); + const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN); assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); @@ -930,7 +930,7 @@ const LinuxThreadImpl = struct { var bytes: usize = page_size; guard_offset = bytes; - bytes += std.math.max(page_size, config.stack_size); + bytes += @max(page_size, config.stack_size); bytes = std.mem.alignForward(bytes, page_size); stack_offset = bytes; diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig index 7a9755bd28..198ab461ae 100644 --- a/lib/std/Uri.zig +++ b/lib/std/Uri.zig @@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri { if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= end_of_host) { // if not part of the V6 address field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } } else if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= start_of_host) { // if not part of the userinfo field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 55b9aac6e4..b46b5c12f0 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged( /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: Self) usize { const entry_cap = self.entries.capacity; - const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const header = self.index_header orelse return @min(linear_scan_max, entry_cap); const indexes_cap = header.capacity(); - return math.min(entry_cap, indexes_cap); + return @min(entry_cap, indexes_cap); } /// Clobbers any existing data. To detect if a put would clobber @@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type { /// length * the size of an Index(u32). The index is 8 bytes (3 bits repr) /// and max_usize + 1 is not representable, so we need to subtract out 4 bits. const max_representable_index_len = @bitSizeOf(usize) - 4; -const max_bit_index = math.min(32, max_representable_index_len); +const max_bit_index = @min(32, max_representable_index_len); const min_bit_index = 5; const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 941f398f20..e47ef4db65 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -422,7 +422,7 @@ test "indexOfIgnoreCase" { /// Returns the lexicographical order of two slices. O(n). pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order { - const n = std.math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) { diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index dc220d8e87..f539abf8b1 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -59,7 +59,7 @@ pub const Params = struct { const pb = @intCast(u3, props); const dict_size_provided = try reader.readIntLittle(u32); - const dict_size = math.max(0x1000, dict_size_provided); + const dict_size = @max(0x1000, dict_size_provided); const unpacked_size = switch (options.unpacked_size) { .read_from_header => blk: { diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index fb580fda13..7ad1511e79 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -20,7 +20,7 @@ const ChunkIterator = struct { } fn next(self: *ChunkIterator) ?[]u8 { - const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)]; + const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)]; self.slice = self.slice[next_chunk.len..]; return if (next_chunk.len > 0) next_chunk else null; } @@ -283,7 +283,7 @@ const ChunkState = struct { fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 { const want = BLOCK_LEN - self.block_len; - const take = math.min(want, input.len); + const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); self.block_len += @truncate(u8, take); return input[take..]; @@ -450,7 +450,7 @@ pub const Blake3 = struct { // Compress input bytes into the current chunk state. const want = CHUNK_LEN - self.chunk_state.len(); - const take = math.min(want, input.len); + const take = @min(want, input.len); self.chunk_state.update(input[0..take]); input = input[take..]; } @@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Write repeating input pattern to hasher var input_counter = input_len; while (input_counter > 0) { - const update_len = math.min(input_counter, input_pattern.len); + const update_len = @min(input_counter, input_pattern.len); hasher.update(input_pattern[0..update_len]); input_counter -= update_len; } diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 84753ddefb..37e3d1c1b3 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var out = self.zero; var i = x.limbs_count() - 1; if (self.limbs_count() >= 2) { - const start = math.min(i, self.limbs_count() - 2); + const start = @min(i, self.limbs_count() - 2); var j = start; while (true) : (j -= 1) { out.v.limbs.set(j, x.limbs.get(i)); diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 46645d710f..2fbff25f72 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { var mb = m; if (st.leftover > 0) { - const want = math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index 9226f2f6d4..ddc9b1b847 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn absorb(self: *Self, bytes_: []const u8) void { var bytes = bytes_; if (self.offset > 0) { - const left = math.min(rate - self.offset, bytes.len); + const left = @min(rate - self.offset, bytes.len); @memcpy(self.buf[self.offset..][0..left], bytes[0..left]); self.offset += left; if (self.offset == rate) { @@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn squeeze(self: *Self, out: []u8) void { var i: usize = 0; while (i < out.len) : (i += rate) { - const left = math.min(rate, out.len - i); + const left = @min(rate, out.len - i); self.st.extractBytes(out[i..][0..left]); self.st.permuteR(rounds); } diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index a2873f1145..51e1c2ab24 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -112,7 +112,7 @@ pub const Poly1305 = struct { // handle leftover if (st.leftover > 0) { - const want = std.math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index 7f57e6cecb..c8a639ad0b 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct { debug.assert(c.len == m.len); const extended = extend(rounds, k, npub); var block0 = [_]u8{0} ** 64; - const mlen0 = math.min(32, c.len); + const mlen0 = @min(32, c.len); @memcpy(block0[32..][0..mlen0], c[0..mlen0]); Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce); var mac = Poly1305.init(block0[0..32]); diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index b8e8ef55e2..97dd9b95d0 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -143,7 +143,7 @@ pub const Params = struct { /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self { - const ops = math.max(32768, ops_limit); + const ops = @max(32768, ops_limit); const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); @@ -151,7 +151,7 @@ pub const Params = struct { } else { const max_n = mem_limit / (@intCast(usize, r) * 128); const ln = @intCast(u6, math.log2(max_n)); - const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); + const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; } } diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig index 23f9e65534..0226490881 100644 --- a/lib/std/crypto/sha3.zig +++ b/lib/std/crypto/sha3.zig @@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds: if (self.offset > 0) { const left = self.buf.len - self.offset; if (left > 0) { - const n = math.min(left, out.len); + const n = @min(left, out.len); @memcpy(out[0..n], self.buf[self.offset..][0..n]); out = out[n..]; self.offset += n; diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 37d219f868..70f4f2fd53 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -433,7 +433,7 @@ test "iterative non-divisible update" { var siphash = Siphash.init(key); var i: usize = 0; while (i < end) : (i += 7) { - siphash.update(buf[i..std.math.min(i + 7, end)]); + siphash.update(buf[i..@min(i + 7, end)]); } const iterative_hash = siphash.finalInt(); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ea0d467085..3015c30bfb 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT stack_trace.index = 0; return; }; - const end_index = math.min(first_index + addrs.len, n); + const end_index = @min(first_index + addrs.len, n); const slice = addr_buf[first_index..end_index]; // We use a for loop here because slice and addrs may alias. for (slice, 0..) |addr, i| { @@ -380,7 +380,7 @@ pub fn writeStackTrace( _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; var frame_index: usize = 0; - var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len); + var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); while (frames_left != 0) : ({ frames_left -= 1; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 59ad7429cf..94da2f4d6d 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -8,7 +8,6 @@ const elf = std.elf; const windows = std.os.windows; const system = std.os.system; const maxInt = std.math.maxInt; -const max = std.math.max; pub const DynLib = switch (builtin.os.tag) { .linux => if (builtin.link_libc) DlDynlib else ElfDynLib, @@ -152,7 +151,7 @@ pub const ElfDynLib = struct { }) { const ph = @intToPtr(*elf.Phdr, ph_addr); switch (ph.p_type) { - elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz), + elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset), else => {}, } diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index c8d41d3eb0..bc0162423b 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -179,7 +179,7 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; - const resume_node_count = std.math.max(extra_thread_count, 1); + const resume_node_count = @max(extra_thread_count, 1); self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index bc88e61d76..535376d38f 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -150,7 +150,7 @@ pub fn LinearFifo( start -= self.buf.len; return self.buf[start .. start + (self.count - offset)]; } else { - const end = math.min(self.head + self.count, self.buf.len); + const end = @min(self.head + self.count, self.buf.len); return self.buf[start..end]; } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 6896d0a7a0..c9d8e611ca 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type { const log2 = math.log2(value); const magnitude = switch (base) { - 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1), - 1024 => math.min(log2 / 10, mags_iec.len - 1), + 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1), + 1024 => @min(log2 / 10, mags_iec.len - 1), else => unreachable, }; const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude)); @@ -1103,7 +1103,7 @@ pub fn formatFloatScientific( var printed: usize = 0; if (float_decimal.digits.len > 1) { - const num_digits = math.min(float_decimal.digits.len, precision + 1); + const num_digits = @min(float_decimal.digits.len, precision + 1); try writer.writeAll(float_decimal.digits[1..num_digits]); printed += num_digits - 1; } @@ -1116,7 +1116,7 @@ pub fn formatFloatScientific( try writer.writeAll(float_decimal.digits[0..1]); try writer.writeAll("."); if (float_decimal.digits.len > 1) { - const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; + const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; try writer.writeAll(float_decimal.digits[1..num_digits]); } else { @@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { const zero_digit_count = @intCast(usize, -float_decimal.exp); - const zeros_to_print = math.min(zero_digit_count, precision); + const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; while (i < zeros_to_print) : (i += 1) { @@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1410,12 +1410,12 @@ pub fn formatInt( // The type must have the same size as `base` or be wider in order for the // division to work - const min_int_bits = comptime math.max(value_info.bits, 8); + const min_int_bits = comptime @max(value_info.bits, 8); const MinInt = std.meta.Int(.unsigned, min_int_bits); const abs_value = math.absCast(int_value); // The worst case in terms of space needed is base 2, plus 1 for the sign - var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined; + var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined; var a: MinInt = abs_value; var index: usize = buf.len; diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3426bca9f4..c36c3fe87c 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -252,7 +252,7 @@ test "iterative non-divisible update" { var wy = Wyhash.init(seed); var i: usize = 0; while (i < end) : (i += 33) { - wy.update(buf[i..std.math.min(i + 33, end)]); + wy.update(buf[i..@min(i + 33, end)]); } const iterative_hash = wy.final(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 041d99606e..5b539ddaad 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged( fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void { @setCold(true); - const new_cap = std.math.max(new_capacity, minimal_capacity); + const new_cap = @max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); assert(std.math.isPowerOfTwo(new_cap)); @@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); @@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const cap = self.capacity(); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c0eeae6e61..c7e0569067 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -110,7 +110,7 @@ pub const ArenaAllocator = struct { // value. const requested_capacity = switch (mode) { .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()), + .retain_with_limit => |limit| @min(limit, self.queryCapacity()), .free_all => 0, }; if (requested_capacity == 0) { diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index ca6eb7f518..3fc7dfbfca 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type /// Size of the memory pool items. This is not necessarily the same /// as `@sizeOf(Item)` as the pool also uses the items for internal means. - pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item)); + pub const item_size = @max(@sizeOf(Node), @sizeOf(Item)); /// Alignment of the memory pool items. This is not necessarily the same /// as `@alignOf(Item)` as the pool also uses the items for internal means. - pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0); + pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0); const Node = struct { next: ?*@This(), diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index b001b3cddf..b5c2cdfa0c 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -82,7 +82,7 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { - const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8); + const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); const len = @intCast(u32, bytes.len); var index: u32 = 0; diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index c170dd1f74..27b978744c 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } pub fn seekTo(self: *Self, pos: u64) SeekError!void { - self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len; + self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len; } pub fn seekBy(self: *Self, amt: i64) SeekError!void { @@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } else { const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize); const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize); - self.pos = std.math.min(self.buffer.len, new_pos); + self.pos = @min(self.buffer.len, new_pos); } } diff --git a/lib/std/io/limited_reader.zig b/lib/std/io/limited_reader.zig index aa00af0d09..09d76007da 100644 --- a/lib/std/io/limited_reader.zig +++ b/lib/std/io/limited_reader.zig @@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type { const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { - const max_read = std.math.min(self.bytes_left, dest.len); + const max_read = @min(self.bytes_left, dest.len); const n = try self.inner_reader.read(dest[0..max_read]); self.bytes_left -= n; return n; diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 344515d07b..abdca56d3c 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -325,7 +325,7 @@ pub fn Reader( var remaining = num_bytes; while (remaining > 0) { - const amt = std.math.min(remaining, options.buf_size); + const amt = @min(remaining, options.buf_size); try self.readNoEof(buf[0..amt]); remaining -= amt; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index cfc76de452..d0b7fa11ee 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -39,7 +39,7 @@ pub fn Writer( var remaining: usize = n; while (remaining > 0) { - const to_write = std.math.min(remaining, bytes.len); + const to_write = @min(remaining, bytes.len); try self.writeAll(bytes[0..to_write]); remaining -= to_write; } diff --git a/lib/std/math.zig b/lib/std/math.zig index 46a7e40a37..e60e964747 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool { if (isNan(x) or isNan(y)) return false; - return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance; + return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance; } test "approxEqAbs and approxEqRel" { @@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type { return @TypeOf(@as(A, 0) + @as(B, 0)); } -/// Returns the smaller number. When one parameter's type's full range -/// fits in the other, the return type is the smaller type. -pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { - const Result = Min(@TypeOf(x), @TypeOf(y)); - if (x < y) { - // TODO Zig should allow this as an implicit cast because x is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, x), - else => return x, - } - } else { - // TODO Zig should allow this as an implicit cast because y is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, y), - else => return y, - } - } -} - -test "min" { - try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1); - { - var a: u16 = 999; - var b: u32 = 10; - var result = min(a, b); - try testing.expect(@TypeOf(result) == u16); - try testing.expect(result == 10); - } - { - var a: f64 = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f64); - try testing.expect(result == 10.34); - } - { - var a: i8 = -127; - var b: i16 = -200; - var result = min(a, b); - try testing.expect(@TypeOf(result) == i16); - try testing.expect(result == -200); - } - { - const a = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f32); - try testing.expect(result == 10.34); - } -} - -/// Finds the minimum of three numbers. -pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return min(x, min(y, z)); -} - -test "min3" { - try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0); -} - -/// Returns the maximum of two numbers. Return type is the one with the -/// larger range. -pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { - return if (x > y) x else y; -} - -test "max" { - try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); - try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2); -} - -/// Finds the maximum of three numbers. -pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return max(x, max(y, z)); -} - -test "max3" { - try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2); -} +pub const min = @compileError("deprecated; use @min instead"); +pub const max = @compileError("deprecated; use @max instead"); +pub const min3 = @compileError("deprecated; use @min instead"); +pub const max3 = @compileError("deprecated; use @max instead"); /// Limit val to the inclusive range [lower, upper]. pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); - return max(lower, min(val, upper)); + return @max(lower, @min(val, upper)); } test "clamp" { // Within range @@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t return u0; } const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned; - const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement + const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement const base = log2(largest_positive_integer); const upper = (1 << base) - 1; var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1; diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index ec79d843da..487812e1de 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize { } pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize { - return aliases * math.max(a_len, b_len); + return aliases * @max(a_len, b_len); } pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize { const req_limbs = calcTwosCompLimbCount(bit_count); - return aliases * math.min(req_limbs, math.max(a_len, b_len)); + return aliases * @min(req_limbs, @max(a_len, b_len)); } pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { @@ -396,7 +396,7 @@ pub const Mutable = struct { /// scalar is a primitive integer type. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. + /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`. pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { // Normally we could just determine the number of limbs needed with calcLimbLen, // but that is not comptime-known when scalar is not a comptime_int. Instead, we @@ -414,11 +414,11 @@ pub const Mutable = struct { return add(r, a, operand); } - /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn addCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -452,12 +452,12 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn add(r: *Mutable, a: Const, b: Const) void { if (r.addCarry(a, b)) { // Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len, // so we need to set the length here. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); // `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here. // Note, the fact that it normalized means that the intermediary limbs are zero here. r.len = msl + 1; @@ -477,12 +477,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; var carry_truncated = false; @@ -492,7 +492,7 @@ pub const Mutable = struct { // truncate anyway. // - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled. // Note: after this we still might need to wrap. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -522,12 +522,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; if (r.addCarry(x, y)) { @@ -535,7 +535,7 @@ pub const Mutable = struct { // - We overflowed req_limbs, in which case we need to saturate. // - a and b had less elements than req_limbs, and those were overflowed. // Note: In this case, might _also_ need to saturate. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -550,11 +550,11 @@ pub const Mutable = struct { r.saturate(r.toConst(), signedness, bit_count); } - /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn subCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -607,7 +607,7 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. pub fn sub(r: *Mutable, a: Const, b: Const) void { r.add(a, b.negate()); } @@ -714,7 +714,7 @@ pub const Mutable = struct { const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { const start = buf_index; - const a_len = math.min(req_limbs, a.limbs.len); + const a_len = @min(req_limbs, a.limbs.len); @memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]); buf_index += a_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -722,7 +722,7 @@ pub const Mutable = struct { const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: { const start = buf_index; - const b_len = math.min(req_limbs, b.limbs.len); + const b_len = @min(req_limbs, b.limbs.len); @memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]); buf_index += b_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -755,13 +755,13 @@ pub const Mutable = struct { const req_limbs = calcTwosCompLimbCount(bit_count); // We can ignore the upper bits here, those results will be discarded anyway. - const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)]; - const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)]; + const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)]; + const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)]; @memset(rma.limbs[0..req_limbs], 0); llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs); - rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len)); + rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len)); rma.positive = (a.positive == b.positive); rma.truncate(rma.toConst(), signedness, bit_count); } @@ -1211,7 +1211,7 @@ pub const Mutable = struct { /// /// a and b are zero-extended to the longer of a or b. /// - /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`. pub fn bitOr(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedor does not support zero. if (a.eqZero()) { @@ -1235,8 +1235,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. - /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`. - /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`. + /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitAnd(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedand does not support zero. if (a.eqZero()) { @@ -1260,8 +1260,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the - /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative - /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative + /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitXor(r: *Mutable, a: Const, b: Const) void { // Trivial cases, because llsignedxor does not support negative zero. if (a.eqZero()) { @@ -1284,7 +1284,7 @@ pub const Mutable = struct { /// rma may alias x or y. /// x and y may alias each other. /// Asserts that `rma` has enough limbs to store the result. Upper bound is - /// `math.min(x.limbs.len, y.limbs.len)`. + /// `@min(x.limbs.len, y.limbs.len)`. /// /// `limbs_buffer` is used for temporary storage during the operation. When this function returns, /// it will have the same length as it had when the function was called. @@ -1546,7 +1546,7 @@ pub const Mutable = struct { if (yi != 0) break i; } else unreachable; - const xy_trailing = math.min(x_trailing, y_trailing); + const xy_trailing = @min(x_trailing, y_trailing); if (y.len - xy_trailing == 1) { const divisor = y.limbs[y.len - 1]; @@ -2589,7 +2589,7 @@ pub const Managed = struct { .allocator = allocator, .metadata = 1, .limbs = block: { - const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); + const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity)); limbs[0] = 0; break :block limbs; }, @@ -2918,7 +2918,7 @@ pub const Managed = struct { /// /// Returns an error if memory could not be allocated. pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len()) + 1); + try r.ensureCapacity(@max(a.len(), b.len()) + 1); var m = r.toMutable(); m.sub(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3025,11 +3025,11 @@ pub const Managed = struct { } pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void { - try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); + try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1); } pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void { - try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1); } pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void { @@ -3123,7 +3123,7 @@ pub const Managed = struct { /// /// a and b are zero-extended to the longer of a or b. pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len())); + try r.ensureCapacity(@max(a.len(), b.len())); var m = r.toMutable(); m.bitOr(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3132,9 +3132,9 @@ pub const Managed = struct { /// r = a & b pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void { const cap = if (a.isPositive() or b.isPositive()) - math.min(a.len(), b.len()) + @min(a.len(), b.len()) else - math.max(a.len(), b.len()) + 1; + @max(a.len(), b.len()) + 1; try r.ensureCapacity(cap); var m = r.toMutable(); m.bitAnd(a.toConst(), b.toConst()); @@ -3143,7 +3143,7 @@ pub const Managed = struct { /// r = a ^ b pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void { - var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); + var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); try r.ensureCapacity(cap); var m = r.toMutable(); @@ -3156,7 +3156,7 @@ pub const Managed = struct { /// /// rma's allocator is used for temporary storage to boost multiplication performance. pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void { - try rma.ensureCapacity(math.min(x.len(), y.len())); + try rma.ensureCapacity(@min(x.len(), y.len())); var m = rma.toMutable(); var limbs_buffer = std.ArrayList(Limb).init(rma.allocator); defer limbs_buffer.deinit(); @@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba( // For a1 and b1 we only need `limbs_after_split` limbs. const a1 = blk: { var a1 = a[split..]; - a1.len = math.min(llnormalize(a1), limbs_after_split); + a1.len = @min(llnormalize(a1), limbs_after_split); break :blk a1; }; const b1 = blk: { var b1 = b[split..]; - b1.len = math.min(llnormalize(b1), limbs_after_split); + b1.len = @min(llnormalize(b1), limbs_after_split); break :blk b1; }; @@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba( // Compute p2. // Note, we don't need to compute all of p2, just enough limbs to satisfy r. - const p2_limbs = math.min(limbs_after_split, a1.len + b1.len); + const p2_limbs = @min(limbs_after_split, a1.len + b1.len); @memset(tmp[0..p2_limbs], 0); - llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]); + llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]); const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])]; // Add p2 * B to the result. @@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba( // Add p2 * B^2 to the result if required. if (limbs_after_split2 > 0) { - llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]); + llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]); } // Compute p0. @@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba( llaccum(op, r, p0); // Add p0 * B to the result. In this case, we may not need all of it. - llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]); + llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]); // Finally, compute and add p1. // From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the // following computation will be added * B. - const a0x = a0[0..std.math.min(a0.len, limbs_after_split)]; - const b0x = b0[0..std.math.min(b0.len, limbs_after_split)]; + const a0x = a0[0..@min(a0.len, limbs_after_split)]; + const b0x = b0[0..@min(b0.len, limbs_after_split)]; const j0_sign = llcmp(a0x, a1); const j1_sign = llcmp(b1, b0x); @@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool { return false; } - const split = std.math.min(y.len, acc.len); + const split = @min(y.len, acc.len); var a_lo = acc[0..split]; var a_hi = acc[split..]; @@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_ // r may alias. // a and b must not be -0. // Returns `true` when the result is positive. -// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required. -// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs. +// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required. +// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs. fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool { @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index d2fd8db9b7..8947475159 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1)); + const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index c4ad708887..2f34745a64 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void { /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { - const n = math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (math.order(lhs[i], rhs[i])) { @@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool { /// Compares two slices and returns the index of the first inequality. /// Returns null if the slices are equal. pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize { - const shortest = math.min(a.len, b.len); + const shortest = @min(a.len, b.len); if (a.ptr == b.ptr) return if (a.len == b.len) null else shortest; var index: usize = 0; @@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.min(best, item); + best = @min(best, item); } return best; } @@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.max(best, item); + best = @max(best, item); } return best; } @@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } { var minVal = slice[0]; var maxVal = slice[0]; for (slice[1..]) |item| { - minVal = math.min(minVal, item); - maxVal = math.max(maxVal, item); + minVal = @min(minVal, item); + maxVal = @max(maxVal, item); } return .{ .min = minVal, .max = maxVal }; } diff --git a/lib/std/net.zig b/lib/std/net.zig index 64b13ec544..dfd6fe4a9e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { error.InvalidCharacter => continue, }; if (mem.eql(u8, name, "ndots")) { - rc.ndots = std.math.min(value, 15); + rc.ndots = @min(value, 15); } else if (mem.eql(u8, name, "attempts")) { - rc.attempts = std.math.min(value, 10); + rc.attempts = @min(value, 10); } else if (mem.eql(u8, name, "timeout")) { - rc.timeout = std.math.min(value, 60); + rc.timeout = @min(value, 60); } } } else if (mem.eql(u8, token, "nameserver")) { @@ -1615,7 +1615,7 @@ fn resMSendRc( } // Wait for a response, or until time to retry - const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); + const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); const nevents = os.poll(&pfd, clamped_timeout) catch 0; if (nevents == 0) continue; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index ef0ec94d3b..e4d6790505 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { .getdents, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } @@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { .getdents64, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index b7467d765f..0610b214d5 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -277,7 +277,7 @@ pub const IO_Uring = struct { fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 { _ = wait_nr; const ready = self.cq_ready(); - const count = std.math.min(cqes.len, ready); + const count = @min(cqes.len, ready); var head = self.cq.head.*; var tail = head +% count; // TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop. @@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct { pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); - const size = std.math.max( + const size = @max( p.sq_off.array + p.sq_entries * @sizeOf(u32), p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe), ); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index e559e48915..389c4bea12 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { const max_read_size: ULONG = maxInt(ULONG); while (total_read < output.len) { - const to_read: ULONG = math.min(buff.len, max_read_size); + const to_read: ULONG = @min(buff.len, max_read_size); if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { return unexpectedError(kernel32.GetLastError()); @@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo return @as(usize, bytes_transferred); } else { while (true) { - const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len)); + const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len); var amt_read: DWORD = undefined; var overlapped_data: OVERLAPPED = undefined; const overlapped: ?*OVERLAPPED = if (offset) |off| blk: { diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 5bc836b08e..180507ba71 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -1049,7 +1049,7 @@ const MsfStream = struct { var size: usize = 0; var rem_buffer = buffer; while (size < buffer.len) { - const size_to_read = math.min(self.block_size - offset, rem_buffer.len); + const size_to_read = @min(self.block_size - offset, rem_buffer.len); size += try in.read(rem_buffer[0..size_to_read]); rem_buffer = buffer[size..]; offset += size_to_read; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 1e9f4051e9..f07562c911 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -410,7 +410,7 @@ pub const Random = struct { r.uintLessThan(T, sum) else if (comptime std.meta.trait.isFloat(T)) // take care that imprecision doesn't lead to a value slightly greater than sum - std.math.min(r.float(T) * sum, sum - std.math.floatEps(T)) + @min(r.float(T) * sum, sum - std.math.floatEps(T)) else @compileError("weightedIndex does not support proportions of type " ++ @typeName(T)); diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig index 6c1be9c6c2..518d148a73 100644 --- a/lib/std/sort/block.zig +++ b/lib/std/sort/block.zig @@ -590,7 +590,7 @@ pub fn block( // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well var lastA = firstA; var lastB = Range.init(0, 0); - var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + var blockB = Range.init(B.start, B.start + @min(block_size, B.length())); blockA.start += firstA.length(); indexA = buffer1.start; @@ -849,7 +849,7 @@ fn findFirstForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (lessThan(context, items[index - 1], value)) : (index += skip) { @@ -871,7 +871,7 @@ fn findFirstBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { @@ -893,7 +893,7 @@ fn findLastForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (!lessThan(context, value, items[index - 1])) : (index += skip) { @@ -915,7 +915,7 @@ fn findLastBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 83fa68567f..3930c9714a 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1960,7 +1960,7 @@ fn renderArrayInit( if (!this_contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); const expr_last_token = tree.lastToken(expr) + 1; const next_expr = section_exprs[i + 1]; @@ -1980,7 +1980,7 @@ fn renderArrayInit( if (!contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); } } } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index f17356fdcd..cddaea2295 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version { const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum); @@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile( const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; @@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile( const rpoff_file = ds.offset + rpoff_usize; const rp_max_size = ds.size - rpoff_usize; - const strtab_len = std.math.min(rp_max_size, strtab_buf.len); + const strtab_len = @min(rp_max_size, strtab_buf.len); const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len); const strtab = strtab_buf[0..strtab_read_len]; diff --git a/src/Sema.zig b/src/Sema.zig index 99ebd044f9..36fe5a6ee8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22367,9 +22367,9 @@ fn analyzeShuffle( // to it up to the length of the longer vector. This recursion terminates // in 1 call because these calls to analyzeShuffle guarantee a_len == b_len. if (a_len != b_len) { - const min_len = std.math.min(a_len, b_len); + const min_len = @min(a_len, b_len); const max_src = if (a_len > b_len) a_src else b_src; - const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); + const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { @@ -31301,7 +31301,7 @@ fn cmpNumeric( } const dest_ty = if (dest_float_type) |ft| ft else blk: { - const max_bits = std.math.max(lhs_bits, rhs_bits); + const max_bits = @max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try mod.intType(signedness, casted_bits); @@ -35828,7 +35828,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); @@ -35918,7 +35918,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 9d3fb67d1f..93454710dc 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -111,7 +111,7 @@ pub fn print( .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); try print(elem_tv, writer, level - 1, mod); @@ -130,7 +130,7 @@ pub fn print( const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -149,7 +149,7 @@ pub fn print( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -455,7 +455,7 @@ fn printAggregate( const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -471,7 +471,7 @@ fn printAggregate( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a1b57516ee..6d98ecce4f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2907,7 +2907,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const dst_info = dst_ty.intInfo(mod); const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, - .mul, .mulwrap => math.max3( + .mul, .mulwrap => @max( self.activeIntBits(bin_op.lhs), self.activeIntBits(bin_op.rhs), dst_info.bits / 2, @@ -3349,7 +3349,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 409eca6e7a..0863a22fac 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2326,7 +2326,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme self.debug_aranges_section_dirty = true; } } - shdr.sh_addralign = math.max(shdr.sh_addralign, alignment); + shdr.sh_addralign = @max(shdr.sh_addralign, alignment); // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 59b3e50b07..4709560ba7 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -99,7 +99,7 @@ const CodeDirectory = struct { fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void { assert(index > 0); - self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index); + self.inner.nSpecialSlots = @max(self.inner.nSpecialSlots, index); self.special_slots[index - 1] = hash; } @@ -426,11 +426,11 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var n_special_slots: u32 = 0; if (self.requirements) |req| { ssize += @sizeOf(macho.BlobIndex) + req.size(); - n_special_slots = std.math.max(n_special_slots, req.slotType()); + n_special_slots = @max(n_special_slots, req.slotType()); } if (self.entitlements) |ent| { ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size; - n_special_slots = std.math.max(n_special_slots, ent.slotType()); + n_special_slots = @max(n_special_slots, ent.slotType()); } if (self.signature) |sig| { ssize += @sizeOf(macho.BlobIndex) + sig.size(); diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index b218fdbd2d..105a806075 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -530,7 +530,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { sect.addr + sect.size - addr; const atom_align = if (addr > 0) - math.min(@ctz(addr), sect.@"align") + @min(@ctz(addr), sect.@"align") else sect.@"align"; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fdac7dfa63..5126033995 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2027,7 +2027,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }; const segment: *Segment = &wasm.segments.items[final_index]; - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); try wasm.appendAtomAtIndex(final_index, atom_index); } diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index 363648971a..33f54dece5 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -979,7 +979,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index]; if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); } try wasm_bin.appendAtomAtIndex(final_index, atom_index); diff --git a/src/main.zig b/src/main.zig index 5d666840c0..aedca80d26 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5391,7 +5391,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void { // setrlimit() now returns with errno set to EINVAL in places that historically succeeded. // It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE. // Use "rlim_cur = min(OPEN_MAX, rlim_max)". - lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max); + lim.max = @min(std.os.darwin.OPEN_MAX, lim.max); } if (lim.cur == lim.max) return; diff --git a/src/translate_c.zig b/src/translate_c.zig index 8cc2d1856c..67176ff74b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2400,7 +2400,7 @@ fn transStringLiteralInitializer( if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type); - const num_inits = math.min(str_length, array_size); + const num_inits = @min(str_length, array_size); const init_node = if (num_inits > 0) blk: { if (is_narrow) { // "string literal".* or string literal"[0..num_inits].* diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 6c6bbf28bd..443c56a84a 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -1824,7 +1824,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .switch_prong => { const payload = node.castTag(.switch_prong).?.data; - var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1)); + var items = try c.gpa.alloc(NodeIndex, @max(payload.cases.len, 1)); defer c.gpa.free(items); items[0] = 0; for (payload.cases, 0..) |item, i| { @@ -1973,7 +1973,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.tuple).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2007,7 +2007,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.container_init_dot).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2046,7 +2046,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const lhs = try renderNode(c, payload.lhs); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.inits.len, 1)); defer c.gpa.free(inits); inits[0] = 0; for (payload.inits, 0..) |init, i| { @@ -2102,7 +2102,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex { const num_vars = payload.variables.len; const num_funcs = payload.functions.len; const total_members = payload.fields.len + num_vars + num_funcs; - const members = try c.gpa.alloc(NodeIndex, std.math.max(total_members, 2)); + const members = try c.gpa.alloc(NodeIndex, @max(total_members, 2)); defer c.gpa.free(members); members[0] = 0; members[1] = 0; @@ -2195,7 +2195,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex { const l_brace = try c.addToken(.l_brace, "{"); - var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1)); + var rendered = try c.gpa.alloc(NodeIndex, @max(inits.len, 1)); defer c.gpa.free(rendered); rendered[0] = 0; for (inits, 0..) |init, i| { @@ -2904,7 +2904,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex { fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) { _ = try c.addToken(.l_paren, "("); - var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1)); + var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, @max(params.len, 1)); errdefer rendered.deinit(); for (params, 0..) |param, i| { diff --git a/src/type.zig b/src/type.zig index 22523a7141..bb82a50682 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1633,7 +1633,7 @@ pub const Type = struct { const len = array_type.len + @boolToInt(array_type.sentinel != .none); if (len == 0) return 0; const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); if (elem_size == 0) return 0; const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); return (len - 1) * 8 * elem_size + elem_bit_size; diff --git a/src/value.zig b/src/value.zig index 85204e2b10..8590aa8872 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2458,7 +2458,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max( + @max( // For the saturate std.math.big.int.calcTwosCompLimbCount(info.bits), lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -2572,7 +2572,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); @@ -2638,7 +2638,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); @@ -2677,7 +2677,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); -- cgit v1.2.3 From d41111d7ef531f6f55a19c56205d6d2f1134c224 Mon Sep 17 00:00:00 2001 From: Motiejus Jakštys Date: Fri, 9 Jun 2023 16:02:18 -0700 Subject: mem: rename align*Generic to mem.align* Anecdote 1: The generic version is way more popular than the non-generic one in Zig codebase: git grep -w alignForward | wc -l 56 git grep -w alignForwardGeneric | wc -l 149 git grep -w alignBackward | wc -l 6 git grep -w alignBackwardGeneric | wc -l 15 Anecdote 2: In my project (turbonss) that does much arithmetic and alignment I exclusively use the Generic functions. Anecdote 3: we used only the Generic versions in the Macho Man's linker workshop. --- lib/std/Thread.zig | 8 ++--- lib/std/dynamic_library.zig | 4 +-- lib/std/hash_map.zig | 12 +++---- lib/std/heap.zig | 8 ++--- lib/std/heap/PageAllocator.zig | 12 +++---- lib/std/heap/WasmPageAllocator.zig | 6 ++-- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 1 + lib/std/mem.zig | 56 ++++++++++++------------------ lib/std/mem/Allocator.zig | 4 +-- lib/std/meta/trailer_flags.zig | 6 ++-- lib/std/os/linux/tls.zig | 8 ++--- lib/std/os/uefi/pool_allocator.zig | 4 +-- lib/std/tar.zig | 2 +- lib/std/target.zig | 2 +- lib/std/testing.zig | 2 +- src/Module.zig | 10 +++--- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 10 +++--- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +-- src/arch/wasm/CodeGen.zig | 8 ++--- src/arch/x86_64/CodeGen.zig | 10 +++--- src/codegen.zig | 8 ++--- src/codegen/llvm.zig | 44 +++++++++++------------ src/codegen/spirv.zig | 4 +-- src/link/Coff.zig | 30 ++++++++-------- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 10 +++--- src/link/MachO.zig | 50 +++++++++++++------------- src/link/MachO/CodeSignature.zig | 8 ++--- src/link/MachO/DebugSymbols.zig | 18 +++++----- src/link/MachO/load_commands.zig | 8 ++--- src/link/MachO/thunks.zig | 6 ++-- src/link/MachO/zld.zig | 34 +++++++++--------- src/link/Wasm.zig | 16 ++++----- src/objcopy.zig | 6 ++-- src/type.zig | 22 ++++++------ 39 files changed, 223 insertions(+), 232 deletions(-) (limited to 'lib/std/os/linux') diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 76650a9072..d7bcbee66f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -931,18 +931,18 @@ const LinuxThreadImpl = struct { guard_offset = bytes; bytes += @max(page_size, config.stack_size); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); stack_offset = bytes; - bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); + bytes = std.mem.alignForward(usize, bytes, linux.tls.tls_image.alloc_align); tls_offset = bytes; bytes += linux.tls.tls_image.alloc_size; - bytes = std.mem.alignForward(bytes, @alignOf(Instance)); + bytes = std.mem.alignForward(usize, bytes, @alignOf(Instance)); instance_offset = bytes; bytes += @sizeOf(Instance); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); break :blk bytes; }; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 94da2f4d6d..928d0cc9c3 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -124,7 +124,7 @@ pub const ElfDynLib = struct { // corresponding to the actual LOAD sections. const file_bytes = try os.mmap( null, - mem.alignForward(size, mem.page_size), + mem.alignForward(usize, size, mem.page_size), os.PROT.READ, os.MAP.PRIVATE, fd, @@ -187,7 +187,7 @@ pub const ElfDynLib = struct { // extra nonsense mapped before/after the VirtAddr,MemSiz const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; - const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); + const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 5b539ddaad..8c05dfeca5 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1545,13 +1545,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + new_capacity * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + new_capacity * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = try allocator.alignedAlloc(u8, max_align, total_size); const ptr = @ptrToInt(slice.ptr); @@ -1581,13 +1581,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + cap * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + cap * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size]; allocator.free(slice); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7d2a66df1e..7b4bf3af21 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -83,7 +83,7 @@ const CAllocator = struct { // the aligned address. var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; @@ -249,7 +249,7 @@ pub const wasm_allocator = Allocator{ /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize) usize { const aligned_len = mem.alignAllocLen(full_len, len); - assert(mem.alignForward(aligned_len, mem.page_size) == full_len); + assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); return aligned_len; } @@ -307,7 +307,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @ptrToInt(ptr); - const aligned_addr = mem.alignForward(root_addr, ptr_align); + const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); const buf = @intToPtr([*]u8, aligned_addr)[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; @@ -840,7 +840,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { // which is 16 pages, hence the 32. This test may require to increase // the size of the allocations feeding the `allocator` parameter if they // fail, because of this high over-alignment we want to have. - while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { + while (@ptrToInt(slice.ptr) == mem.alignForward(usize, @ptrToInt(slice.ptr), mem.page_size * 32)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 2c8146caf3..5da570fa42 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -17,7 +17,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { _ = log2_align; assert(n > 0); if (n > maxInt(usize) - (mem.page_size - 1)) return null; - const aligned_len = mem.alignForward(n, mem.page_size); + const aligned_len = mem.alignForward(usize, n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; @@ -54,14 +54,14 @@ fn resize( ) bool { _ = log2_buf_align; _ = return_address; - const new_size_aligned = mem.alignForward(new_size, mem.page_size); + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; - const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); + const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. @@ -73,14 +73,14 @@ fn resize( } return true; } - const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); + const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned <= old_size_aligned) { return true; } return false; } - const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned == buf_aligned_len) return true; @@ -103,7 +103,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v if (builtin.os.tag == .windows) { os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { - const buf_aligned_len = mem.alignForward(slice.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); const ptr = @alignCast(mem.page_size, slice.ptr); os.munmap(ptr[0..buf_aligned_len]); } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index 1370af022c..63ae226196 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -100,7 +100,7 @@ fn extendedOffset() usize { } fn nPages(memsize: usize) usize { - return mem.alignForward(memsize, mem.page_size) / mem.page_size; + return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size; } fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { @@ -170,7 +170,7 @@ fn resize( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); if (new_len > aligned_len) return false; const current_n = nPages(aligned_len); const new_n = nPages(new_len); @@ -190,7 +190,7 @@ fn free( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); const current_n = nPages(aligned_len); const base = nPages(@ptrToInt(buf.ptr)); freePages(base, base + current_n); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c7e0569067..f858510bcf 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -186,7 +186,7 @@ pub const ArenaAllocator = struct { const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(addr, ptr_align); + const adjusted_addr = mem.alignForward(usize, addr, ptr_align); const adjusted_index = self.state.end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index ef88787fc6..51b6c1744f 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -309,6 +309,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn bucketStackFramesStart(size_class: usize) usize { return mem.alignForward( + usize, @sizeOf(BucketHeader) + usedBitsCount(size_class), @alignOf(usize), ); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 87f436d156..23e24b0c09 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4213,23 +4213,17 @@ test "sliceAsBytes preserves pointer attributes" { /// Round an address up to the next (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForward(addr: usize, alignment: usize) usize { - return alignForwardGeneric(usize, addr, alignment); +pub fn alignForward(comptime T: type, addr: T, alignment: T) T { + assert(isValidAlignGeneric(T, alignment)); + return alignBackward(T, addr + (alignment - 1), alignment); } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); - return alignForward(addr, alignment); + return alignForward(usize, addr, alignment); } -/// Round an address up to the next (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -/// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(alignment > 0); - assert(std.math.isPowerOfTwo(alignment)); - return alignBackwardGeneric(T, addr + (alignment - 1), alignment); -} +pub const alignForwardGeneric = @compileError("renamed to alignForward"); /// Force an evaluation of the expression; this tries to prevent /// the compiler from optimizing the computation away even if the @@ -4322,38 +4316,32 @@ test "doNotOptimizeAway" { } test "alignForward" { - try testing.expect(alignForward(1, 1) == 1); - try testing.expect(alignForward(2, 1) == 2); - try testing.expect(alignForward(1, 2) == 2); - try testing.expect(alignForward(2, 2) == 2); - try testing.expect(alignForward(3, 2) == 4); - try testing.expect(alignForward(4, 2) == 4); - try testing.expect(alignForward(7, 8) == 8); - try testing.expect(alignForward(8, 8) == 8); - try testing.expect(alignForward(9, 8) == 16); - try testing.expect(alignForward(15, 8) == 16); - try testing.expect(alignForward(16, 8) == 16); - try testing.expect(alignForward(17, 8) == 24); + try testing.expect(alignForward(usize, 1, 1) == 1); + try testing.expect(alignForward(usize, 2, 1) == 2); + try testing.expect(alignForward(usize, 1, 2) == 2); + try testing.expect(alignForward(usize, 2, 2) == 2); + try testing.expect(alignForward(usize, 3, 2) == 4); + try testing.expect(alignForward(usize, 4, 2) == 4); + try testing.expect(alignForward(usize, 7, 8) == 8); + try testing.expect(alignForward(usize, 8, 8) == 8); + try testing.expect(alignForward(usize, 9, 8) == 16); + try testing.expect(alignForward(usize, 15, 8) == 16); + try testing.expect(alignForward(usize, 16, 8) == 16); + try testing.expect(alignForward(usize, 17, 8) == 24); } /// Round an address down to the previous (or current) aligned address. /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (isValidAlign(alignment)) - return alignBackward(i, alignment); + return alignBackward(usize, i, alignment); assert(alignment != 0); return i - @mod(i, alignment); } /// Round an address down to the previous (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. -pub fn alignBackward(addr: usize, alignment: usize) usize { - return alignBackwardGeneric(usize, addr, alignment); -} - -/// Round an address down to the previous (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { +pub fn alignBackward(comptime T: type, addr: T, alignment: T) T { assert(isValidAlignGeneric(T, alignment)); // 000010000 // example alignment // 000001111 // subtract 1 @@ -4361,6 +4349,8 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { return addr & ~(alignment - 1); } +pub const alignBackwardGeneric = @compileError("renamed to alignBackward"); + /// Returns whether `alignment` is a valid alignment, meaning it is /// a positive power of 2. pub fn isValidAlign(alignment: usize) bool { @@ -4391,7 +4381,7 @@ pub fn isAligned(addr: usize, alignment: usize) bool { } pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { - return alignBackwardGeneric(T, addr, alignment) == addr; + return alignBackward(T, addr, alignment) == addr; } test "isAligned" { @@ -4439,7 +4429,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali const begin_address = @ptrToInt(bytes.ptr); const end_address = begin_address + bytes.len; - const begin_address_aligned = mem.alignForward(begin_address, new_alignment); + const begin_address_aligned = mem.alignForward(usize, begin_address, new_alignment); const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { error.Overflow => return null, }; diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 5110534ed4..4a1ff86721 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -208,7 +208,7 @@ pub fn allocAdvancedWithRetAddr( comptime assert(a <= mem.page_size); if (n == 0) { - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); return @intToPtr([*]align(a) T, ptr)[0..0]; } @@ -267,7 +267,7 @@ pub fn reallocAdvanced( } if (new_n == 0) { self.free(old_mem); - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0]; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index 0c43a5ff28..a4d83dcbb3 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -105,9 +105,9 @@ pub fn TrailerFlags(comptime Fields: type) type { const active = (self.bits & (1 << i)) != 0; if (i == @enumToInt(field)) { assert(active); - return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + return mem.alignForward(usize, off, @alignOf(field_info.type)); } else if (active) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + off = mem.alignForward(usize, off, @alignOf(field_info.type)); off += @sizeOf(field_info.type); } } @@ -123,7 +123,7 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(field.type) == 0) continue; if ((self.bits & (1 << i)) != 0) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field.type)); + off = mem.alignForward(usize, off, @alignOf(field.type)); off += @sizeOf(field.type); } } diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index 311e5609e8..d765e403c8 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -233,7 +233,7 @@ fn initTLS(phdrs: []elf.Phdr) void { l += tls_align_factor - delta; l += @sizeOf(CustomData); tcb_offset = l; - l += mem.alignForward(tls_tcb_size, tls_align_factor); + l += mem.alignForward(usize, tls_tcb_size, tls_align_factor); data_offset = l; l += tls_data_alloc_size; break :blk l; @@ -241,14 +241,14 @@ fn initTLS(phdrs: []elf.Phdr) void { .VariantII => blk: { var l: usize = 0; data_offset = l; - l += mem.alignForward(tls_data_alloc_size, tls_align_factor); + l += mem.alignForward(usize, tls_data_alloc_size, tls_align_factor); // The thread pointer is aligned to p_align tcb_offset = l; l += tls_tcb_size; // The CustomData structure is right after the TCB with no padding // in between so it can be easily found l += @sizeOf(CustomData); - l = mem.alignForward(l, @alignOf(DTV)); + l = mem.alignForward(usize, l, @alignOf(DTV)); dtv_offset = l; l += @sizeOf(DTV); break :blk l; @@ -329,7 +329,7 @@ pub fn initStaticTLS(phdrs: []elf.Phdr) void { // Make sure the slice is correctly aligned. const begin_addr = @ptrToInt(alloc_tls_area.ptr); - const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align); + const begin_aligned_addr = mem.alignForward(usize, begin_addr, tls_image.alloc_align); const start = begin_aligned_addr - begin_addr; break :blk alloc_tls_area[start .. start + tls_image.alloc_size]; }; diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index 8f26aac32c..00b8941974 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -24,7 +24,7 @@ const UefiPoolAllocator = struct { const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); - const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); + const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); const full_len = metadata_len + len; @@ -32,7 +32,7 @@ const UefiPoolAllocator = struct { if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null; const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), ptr_align); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index c570c8e09c..14a9ce5d3f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -116,7 +116,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi const header: Header = .{ .bytes = buffer[start..][0..512] }; start += 512; const file_size = try header.fileSize(); - const rounded_file_size = std.mem.alignForwardGeneric(u64, file_size, 512); + const rounded_file_size = std.mem.alignForward(u64, file_size, 512); const pad_len = @intCast(usize, rounded_file_size - file_size); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { diff --git a/lib/std/target.zig b/lib/std/target.zig index 15bb65cd4b..4c7bcfc37a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1944,7 +1944,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))), + 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 6b1e0bb640..bbb0905121 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -305,7 +305,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const var window_start: usize = 0; if (@max(actual.len, expected.len) > max_window_size) { const alignment = if (T == u8) 16 else 2; - window_start = std.mem.alignBackward(diff_index - @min(diff_index, alignment), alignment); + window_start = std.mem.alignBackward(usize, diff_index - @min(diff_index, alignment), alignment); } const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)]; const expected_truncated = window_start + expected_window.len < expected.len; diff --git a/src/Module.zig b/src/Module.zig index 8c5a86652d..8d9f9593dd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1293,7 +1293,7 @@ pub const Union = struct { payload_align = @max(payload_align, 1); if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ - .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), + .abi_size = std.mem.alignForward(u64, payload_size, payload_align), .abi_align = payload_align, .most_aligned_field = most_aligned_field, .most_aligned_field_size = most_aligned_field_size, @@ -1314,18 +1314,18 @@ pub const Union = struct { if (tag_align >= payload_align) { // {Tag, Payload} size += tag_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); padding = @intCast(u32, size - prev_size); } else { // {Payload, Tag} size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); size += tag_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); padding = @intCast(u32, size - prev_size); } return .{ diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dd752555b7..1355f96231 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -566,7 +566,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; if (math.cast(u12, stack_size)) |size| { @@ -1011,7 +1011,7 @@ fn allocMem( std.math.ceilPowerOfTwoAssert(u32, abi_size); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6328,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 69a156999b..a2a5a3d4d3 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -560,7 +560,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; self.mir_instructions.set(sub_reloc, .{ @@ -991,7 +991,7 @@ fn allocMem( assert(abi_align > 0); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6214,7 +6214,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiAlignment(mod) == 8) - ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); + ncrn = std.mem.alignForward(usize, ncrn, 2); const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { @@ -6229,7 +6229,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else { ncrn = 4; if (ty.toType().abiAlignment(mod) == 8) - nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); + nsaa = std.mem.alignForward(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; nsaa += param_size; @@ -6267,7 +6267,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index e4a07f22bf..a4a4fe472b 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForwardGeneric(u64, total_size, arr_size) / arr_size); + const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 809c388532..c6ac3255c6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -792,7 +792,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align); self.next_stack_offset = offset + abi_size; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b660126604..e339794fd4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -423,7 +423,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + abi.stack_reserved_area; - const stack_size = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align); if (math.cast(i13, stack_size)) |size| { self.mir_instructions.set(save_inst, .{ .tag = .save, @@ -2781,7 +2781,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index aa44dc2bc8..495ca7f6dd 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1286,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // store stack pointer so we can restore it when we return from the function try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size - const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment); + const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); @@ -1531,7 +1531,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { func.stack_alignment = abi_align; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align); + const offset = std.mem.alignForward(u32, func.stack_size, abi_align); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -1564,7 +1564,7 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { func.stack_alignment = abi_alignment; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment); + const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6e13a55008..a33faecca3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2150,7 +2150,7 @@ fn setFrameLoc( const frame_i = @enumToInt(frame_index); if (aligned) { const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i]; - offset.* = mem.alignForwardGeneric(i32, offset.*, alignment); + offset.* = mem.alignForward(i32, offset.*, alignment); } self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); offset.* += self.frame_allocs.items(.abi_size)[frame_i]; @@ -2207,7 +2207,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true); for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true); rsp_offset += stack_frame_align_offset; - rsp_offset = mem.alignForwardGeneric(i32, rsp_offset, @as(i32, 1) << needed_align); + rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@enumToInt(FrameIndex.call_frame)] = @intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]); @@ -11807,7 +11807,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11847,7 +11847,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11858,7 +11858,7 @@ fn resolveCallingConventionValues( else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), } - result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, result.stack_align); + result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align); return result; } diff --git a/src/codegen.zig b/src/codegen.zig index 6145d8778b..430562fe9b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -290,7 +290,7 @@ pub fn generateSymbol( .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -303,7 +303,7 @@ pub fn generateSymbol( const begin = code.items.len; try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -1020,7 +1020,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); + return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align); } } @@ -1029,7 +1029,7 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); + return mem.alignForward(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 47be4148d3..11cd752000 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1633,7 +1633,7 @@ pub const Object = struct { var offset: u64 = 0; offset += ptr_size; - offset = std.mem.alignForwardGeneric(u64, offset, len_align); + offset = std.mem.alignForward(u64, offset, len_align); const len_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1801,7 +1801,7 @@ pub const Object = struct { var offset: u64 = 0; offset += payload_size; - offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); + offset = std.mem.alignForward(u64, offset, non_null_align); const non_null_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1888,12 +1888,12 @@ pub const Object = struct { error_index = 0; payload_index = 1; error_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + payload_offset = std.mem.alignForward(u64, error_size, payload_align); } else { payload_index = 0; error_index = 1; payload_offset = 0; - error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); + error_offset = std.mem.alignForward(u64, payload_size, error_align); } var fields: [2]*llvm.DIType = undefined; @@ -1995,7 +1995,7 @@ pub const Object = struct { const field_size = field_ty.toType().abiSize(mod); const field_align = field_ty.toType().abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = if (tuple.names.len != 0) @@ -2086,7 +2086,7 @@ pub const Object = struct { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); const field_align = field.alignment(mod, layout); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); @@ -2242,10 +2242,10 @@ pub const Object = struct { var payload_offset: u64 = undefined; if (layout.tag_align >= layout.payload_align) { tag_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { payload_offset = 0; - tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align); + tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align); } const tag_di = dib.createMemberType( @@ -2861,9 +2861,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_error_type; fields_buf[1] = llvm_payload_type; const payload_end = - std.mem.alignForwardGeneric(u64, error_size, payload_align) + + std.mem.alignForward(u64, error_size, payload_align) + payload_size; - const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align); + const abi_size = std.mem.alignForward(u64, payload_end, error_align); const padding = @intCast(c_uint, abi_size - payload_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2874,9 +2874,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_payload_type; fields_buf[1] = llvm_error_type; const error_end = - std.mem.alignForwardGeneric(u64, payload_size, error_align) + + std.mem.alignForward(u64, payload_size, error_align) + error_size; - const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align); + const abi_size = std.mem.alignForward(u64, error_end, payload_align); const padding = @intCast(c_uint, abi_size - error_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2910,7 +2910,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2924,7 +2924,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -2979,7 +2979,7 @@ pub const DeclGen = struct { field_align < field_ty_align; big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2993,7 +2993,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3552,7 +3552,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3575,7 +3575,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3650,7 +3650,7 @@ pub const DeclGen = struct { const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3673,7 +3673,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -10274,7 +10274,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -10308,7 +10308,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4fd91aded4..dc1f23dad4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -472,12 +472,12 @@ pub const DeclGen = struct { try self.initializers.append(result_id); self.partial_word.len = 0; - self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word)); + self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word)); } /// Fill the buffer with undefined values until the size is aligned to `align`. fn fillToAlign(self: *@This(), alignment: u32) !void { - const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment); + const target_size = std.mem.alignForward(u32, self.size, alignment); try self.addUndef(target_size - self.size); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7785858dd..202bb71e9b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section const vaddr = blk: { if (index == 0) break :blk self.page_size; const prev_header = self.sections.items(.header)[index - 1]; - break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); + break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100; + const memsz = mem.alignForward(u32, size, self.page_size) * 100; log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ name, off, @@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void { fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void { const header = &self.sections.items(.header)[sect_id]; const increased_size = padToIdeal(needed_size); - const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); - const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size); + const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size); + const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff }); @@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const last_symbol = last.getSymbol(self); const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); + break :blk mem.alignForward(u32, header.virtual_address, alignment); } }; @@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; + const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void { for (offsets.items) |offset| { const rva = sym.value + offset; - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void { if (sym.section_number == .UNDEFINED) continue; const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void { lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); } dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); } @@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void { }; const subsystem: coff.Subsystem = .WINDOWS_CUI; const size_of_image: u32 = self.getSizeOfImage(); - const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment); + const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment); const image_base = self.getImageBase(); const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address; @@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 { fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 { var start: u32 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u32, item_end, min_alignment); + start = mem.alignForward(u32, item_end, min_alignment); } return start; } @@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 { } inline fn getSizeOfImage(self: Coff) u32 { - var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size); + var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size); for (self.sections.items(.header)) |header| { - image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); + image_size += mem.alignForward(u32, header.virtual_size, self.page_size); } return image_size; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9b7772260..3cb1c213e9 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { di_buf.appendAssumeCapacity(0); // segment_selector_size const end_header_offset = di_buf.items.len; - const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2); + const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2); di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset); // Currently only one compilation unit is supported, so the address range is simply diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 15ba9ebecc..e0d0dfc75f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } @@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); } - phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align); + phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset; phdr_table_load.p_filesz = load_align_offset + needed_size; phdr_table_load.p_memsz = load_align_offset + needed_size; @@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; + const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value; const need_realloc = !align_ok or new_block_size > atom.capacity(self); if (!need_realloc) return sym.st_value; return self.allocateAtom(atom_index, new_block_size, alignment); @@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = big_atom_sym.st_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const last_sym = last.getSymbol(self); const ideal_capacity = padToIdeal(last_sym.st_size); const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); // Set up the metadata to be updated, after errors are no longer possible. atom_placement = last_index; break :blk new_start_vaddr; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a3f67bc70a..024fe1f8d9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; + const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.n_value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void { // The first __TEXT segment is immovable and covers MachO header and load commands. self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size }); @@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void { fn calcPagezeroSize(self: *MachO) u64 { const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.base.options.output_mode == .Lib) return 0; if (aligned_pagezero_vmsize == 0) return 0; if (aligned_pagezero_vmsize != pagezero_vmsize) { @@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const section_id = @intCast(u8, self.sections.slice().len); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; - break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); + break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size); + const vmsize = mem.alignForward(u64, opts.size, self.page_size); const off = self.findFreeSpace(opts.size, self.page_size); log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ @@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts var section = macho.section_64{ .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), - .addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment), - .offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment), + .addr = mem.alignForward(u64, vmaddr, opts.alignment), + .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { } header.size = needed_size; - segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size); + segment.filesize = mem.alignForward(u64, needed_size, self.page_size); + segment.vmsize = mem.alignForward(u64, needed_size, self.page_size); } fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { @@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { const segment = self.getSegmentPtr(sect_id); const increased_size = padToIdeal(needed_size); const old_aligned_end = segment.vmaddr + segment.vmsize; - const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size); + const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{ header.segName(), @@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const last_symbol = last.getSymbol(self); const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment); + break :blk mem.alignForward(u64, segment.vmaddr, alignment); } }; @@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void { for (self.segments.items, 0..) |segment, id| { if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { - seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size); + seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } if (seg.fileoff < segment.fileoff + segment.filesize) { - seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size); + seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size); } } try self.writeDyldInfoData(); try self.writeSymtabs(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void { @@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. @@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 { fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 4709560ba7..02511dbe29 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -282,7 +282,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size); + const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -357,7 +357,7 @@ fn parallelHash( ) !void { var wg: WaitGroup = .{}; - const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size; + const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size; assert(self.code_directory.code_slots.items.len >= total_num_chunks); const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); @@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 { pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); // Approx code slots - const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size; + const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size; ssize += total_pages * hash_size; var n_special_slots: u32 = 0; if (self.requirements) |req| { @@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64))); + return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 24a0c9ea34..fdb8c9c816 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { const off = @intCast(u64, self.page_size); const ideal_size: u16 = 200 + 128 + 160 + 250; - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size }); @@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 const segment = self.getDwarfSegmentPtr(); var offset: u64 = segment.fileoff; while (self.detectAllocCollision(offset, object_size)) |item_end| { - offset = mem.alignForwardGeneric(u64, item_end, min_alignment); + offset = mem.alignForward(u64, item_end, min_alignment); } return offset; } @@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { file_size = @max(file_size, header.offset + header.size); } - const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size); + const aligned_size = mem.alignForward(u64, file_size, self.page_size); dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.filesize = aligned_size; dwarf_segment.vmsize = aligned_size; const linkedit = self.getLinkeditSegmentPtr(); - linkedit.vmaddr = mem.alignForwardGeneric( + linkedit.vmaddr = mem.alignForward( u64, dwarf_segment.vmaddr + aligned_size, self.page_size, ); - linkedit.fileoff = mem.alignForwardGeneric( + linkedit.fileoff = mem.alignForward( u64, dwarf_segment.fileoff + aligned_size, self.page_size, @@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { try self.writeStrtab(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size); seg.vmsize = aligned_size; } @@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const nsyms = nlocals + nexports; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64)); + const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64)); const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; @@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void { const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); - const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); - const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64)); + const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); + const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; self.symtab_cmd.stroff = @intCast(u32, offset); diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 228a1ccfaf..5111f53f2a 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld"; fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 { const darwin_path_max = 1024; const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1; - return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64)); + return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64)); } const CalcLCsSizeCtx = struct { @@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), @@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), @@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index 48d1faac6b..7895190005 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { while (true) { const atom = zld.getAtom(group_end); - offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment)); + offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment)); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; @@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } else break; } - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); allocateThunk(zld, thunk_index, offset, header); offset += zld.thunks.items[thunk_index].getSize(); @@ -193,7 +193,7 @@ fn allocateThunk( var offset = base_offset; while (true) { const atom = zld.getAtom(atom_index); - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 4f7e615c79..7902d67d87 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -1207,7 +1207,7 @@ pub const Zld = struct { fn createSegments(self: *Zld) !void { const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (aligned_pagezero_vmsize != pagezero_vmsize) { log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize}); @@ -1466,7 +1466,7 @@ pub const Zld = struct { while (true) { const atom = self.getAtom(atom_index); const atom_alignment = try math.powi(u32, 2, atom.alignment); - const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment); + const atom_offset = mem.alignForward(u64, header.size, atom_alignment); const padding = atom_offset - header.size; const sym = self.getSymbolPtr(atom.getSymbolWithLoc()); @@ -1534,7 +1534,7 @@ pub const Zld = struct { const slice = self.sections.slice(); for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); - const start_aligned = mem.alignForwardGeneric(u64, start, alignment); + const start_aligned = mem.alignForward(u64, start, alignment); const n_sect = @intCast(u8, indexes.start + sect_id + 1); header.offset = if (header.isZerofill()) @@ -1598,8 +1598,8 @@ pub const Zld = struct { segment.vmsize = start; } - segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size); + segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size); + segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size); } const InitSectionOpts = struct { @@ -1709,7 +1709,7 @@ pub const Zld = struct { try self.writeSymtabs(); const seg = self.getLinkeditSegmentPtr(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromContainer( @@ -2112,17 +2112,17 @@ pub const Zld = struct { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -2130,7 +2130,7 @@ pub const Zld = struct { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -2268,7 +2268,7 @@ pub const Zld = struct { const offset = link_seg.fileoff + link_seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow; if (padding > 0) { try buffer.ensureUnusedCapacity(padding); @@ -2347,7 +2347,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow); @@ -2480,7 +2480,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2515,7 +2515,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2690,7 +2690,7 @@ pub const Zld = struct { for (subsections[0..count]) |cut| { const size = cut.end - cut.start; - const num_chunks = mem.alignForward(size, chunk_size) / chunk_size; + const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size; var i: usize = 0; while (i < num_chunks) : (i += 1) { @@ -2725,10 +2725,10 @@ pub const Zld = struct { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index f911074473..2d2930be8c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void { } } } - offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); + offset = std.mem.alignForward(u32, offset, atom.alignment); atom.offset = offset; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol_loc.getName(wasm), @@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void { offset += atom.size; atom_index = atom.prev orelse break; } - segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); + segment.size = std.mem.alignForward(u32, offset, segment.alignment); } } @@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void { const is_obj = wasm.base.options.output_mode == .Obj; if (place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); @@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void { var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment); // set TLS-related symbols if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { @@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void { // create the memory init flag which is used by the init memory function if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) { // align to pointer size - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4); + memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); sym.virtual_address = @intCast(u32, memory_ptr); @@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (!place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); } @@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void { // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); } // Setup the max amount of pages @@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void { } memory_ptr = initial_memory; } - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size); + memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); diff --git a/src/objcopy.zig b/src/objcopy.zig index c5d0e8dcb3..014208cc0d 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -1024,7 +1024,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_size = @intCast(Elf_OffSize, data.len); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; - dest.sh_offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, addralign); + dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) { if (src.sh_offset > dest.sh_offset) { dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments @@ -1085,7 +1085,7 @@ fn ElfFile(comptime is_64: bool) type { // add a ".gnu_debuglink" section if (options.debuglink) |link| { const payload = payload: { - const crc_offset = std.mem.alignForward(link.name.len + 1, 4); + const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4); const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4); @memcpy(buf[0..link.name.len], link.name); @memset(buf[link.name.len..crc_offset], 0); @@ -1117,7 +1117,7 @@ fn ElfFile(comptime is_64: bool) type { // write the section header at the tail { - const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); + const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); const data = std.mem.sliceAsBytes(updated_section_header); assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum); diff --git a/src/type.zig b/src/type.zig index bb82a50682..1c3435dafd 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1339,7 +1339,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + const result = std.mem.alignForward(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; }, @@ -1380,14 +1380,14 @@ pub const Type = struct { var size: u64 = 0; if (code_align > payload_align) { size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); } else { size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); } return AbiSizeAdvanced{ .scalar = size }; }, @@ -1595,7 +1595,7 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { @@ -3194,7 +3194,7 @@ pub const Type = struct { const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); - const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); + const field_offset = std.mem.alignForward(u64, it.offset, field_align); it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } @@ -3223,7 +3223,7 @@ pub const Type = struct { return field_offset.offset; } - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)); }, .anon_struct_type => |tuple| { @@ -3239,11 +3239,11 @@ pub const Type = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); if (i == index) return offset; offset += field_ty.toType().abiSize(mod); } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + offset = std.mem.alignForward(u64, offset, @max(big_align, 1)); return offset; }, @@ -3254,7 +3254,7 @@ pub const Type = struct { const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + return std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { // {Payload, Tag} return 0; -- cgit v1.2.3