From bd3360e03d89fe947e3728ccacd4274653926376 Mon Sep 17 00:00:00 2001 From: dweiller <4678790+dweiller@users.noreplay.github.com> Date: Tue, 2 May 2023 22:08:54 +1000 Subject: convert s[start..start+len] to s[start..][0..len] --- lib/std/testing.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/std/testing.zig') diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 37e15ff08b..2857ebdbd3 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -941,7 +941,7 @@ fn printIndicatorLine(source: []const u8, indicator_index: usize) void { fn printWithVisibleNewlines(source: []const u8) void { var i: usize = 0; while (std.mem.indexOfScalar(u8, source[i..], '\n')) |nl| : (i += nl + 1) { - printLine(source[i .. i + nl]); + printLine(source[i..][0..nl]); } print("{s}␃\n", .{source[i..]}); // End of Text symbol (ETX) } -- cgit v1.2.3 From 39c2eee285f820282dedba4404cac1009a5ae2d6 Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Sat, 20 May 2023 22:30:02 +0100 Subject: std.debug: Rename TTY.Color enum values to snake case --- lib/build_runner.zig | 60 +++++++++++++++++++++---------------------- lib/std/Build.zig | 12 ++++----- lib/std/debug.zig | 62 ++++++++++++++++++++++----------------------- lib/std/testing.zig | 8 +++--- lib/std/zig/ErrorBundle.zig | 32 +++++++++++------------ 5 files changed, 87 insertions(+), 87 deletions(-) (limited to 'lib/std/testing.zig') diff --git a/lib/build_runner.zig b/lib/build_runner.zig index 42903b82f3..7eec164871 100644 --- a/lib/build_runner.zig +++ b/lib/build_runner.zig @@ -476,9 +476,9 @@ fn runStepNames( if (run.enable_summary != false) { const total_count = success_count + failure_count + pending_count + skipped_count; - ttyconf.setColor(stderr, .Cyan) catch {}; + ttyconf.setColor(stderr, .cyan) catch {}; stderr.writeAll("Build Summary:") catch {}; - ttyconf.setColor(stderr, .Reset) catch {}; + ttyconf.setColor(stderr, .reset) catch {}; stderr.writer().print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {}; if (skipped_count > 0) stderr.writer().print("; {d} skipped", .{skipped_count}) catch {}; if (failure_count > 0) stderr.writer().print("; {d} failed", .{failure_count}) catch {}; @@ -489,9 +489,9 @@ fn runStepNames( if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {}; if (run.enable_summary == null) { - ttyconf.setColor(stderr, .Dim) catch {}; + ttyconf.setColor(stderr, .dim) catch {}; stderr.writeAll(" (disable with -fno-summary)") catch {}; - ttyconf.setColor(stderr, .Reset) catch {}; + ttyconf.setColor(stderr, .reset) catch {}; } stderr.writeAll("\n") catch {}; @@ -560,7 +560,7 @@ fn printTreeStep( const first = step_stack.swapRemove(s); try printPrefix(parent_node, stderr, ttyconf); - if (!first) try ttyconf.setColor(stderr, .Dim); + if (!first) try ttyconf.setColor(stderr, .dim); if (parent_node.parent != null) { if (parent_node.last) { try stderr.writeAll(switch (ttyconf) { @@ -586,28 +586,28 @@ fn printTreeStep( .running => unreachable, .dependency_failure => { - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .dim); try stderr.writeAll(" transitive failure\n"); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); }, .success => { - try ttyconf.setColor(stderr, .Green); + try ttyconf.setColor(stderr, .green); if (s.result_cached) { try stderr.writeAll(" cached"); } else if (s.test_results.test_count > 0) { const pass_count = s.test_results.passCount(); try stderr.writer().print(" {d} passed", .{pass_count}); if (s.test_results.skip_count > 0) { - try ttyconf.setColor(stderr, .Yellow); + try ttyconf.setColor(stderr, .yellow); try stderr.writer().print(" {d} skipped", .{s.test_results.skip_count}); } } else { try stderr.writeAll(" success"); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); if (s.result_duration_ns) |ns| { - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .dim); if (ns >= std.time.ns_per_min) { try stderr.writer().print(" {d}m", .{ns / std.time.ns_per_min}); } else if (ns >= std.time.ns_per_s) { @@ -619,11 +619,11 @@ fn printTreeStep( } else { try stderr.writer().print(" {d}ns", .{ns}); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } if (s.result_peak_rss != 0) { const rss = s.result_peak_rss; - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .dim); if (rss >= 1000_000_000) { try stderr.writer().print(" MaxRSS:{d}G", .{rss / 1000_000_000}); } else if (rss >= 1000_000) { @@ -633,57 +633,57 @@ fn printTreeStep( } else { try stderr.writer().print(" MaxRSS:{d}B", .{rss}); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } try stderr.writeAll("\n"); }, .skipped => { - try ttyconf.setColor(stderr, .Yellow); + try ttyconf.setColor(stderr, .yellow); try stderr.writeAll(" skipped\n"); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); }, .failure => { if (s.result_error_bundle.errorMessageCount() > 0) { - try ttyconf.setColor(stderr, .Red); + try ttyconf.setColor(stderr, .red); try stderr.writer().print(" {d} errors\n", .{ s.result_error_bundle.errorMessageCount(), }); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } else if (!s.test_results.isSuccess()) { try stderr.writer().print(" {d}/{d} passed", .{ s.test_results.passCount(), s.test_results.test_count, }); if (s.test_results.fail_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Red); + try ttyconf.setColor(stderr, .red); try stderr.writer().print("{d} failed", .{ s.test_results.fail_count, }); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } if (s.test_results.skip_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Yellow); + try ttyconf.setColor(stderr, .yellow); try stderr.writer().print("{d} skipped", .{ s.test_results.skip_count, }); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } if (s.test_results.leak_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Red); + try ttyconf.setColor(stderr, .red); try stderr.writer().print("{d} leaked", .{ s.test_results.leak_count, }); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } try stderr.writeAll("\n"); } else { - try ttyconf.setColor(stderr, .Red); + try ttyconf.setColor(stderr, .red); try stderr.writeAll(" failure\n"); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } }, } @@ -703,7 +703,7 @@ fn printTreeStep( s.dependencies.items.len, }); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } } @@ -819,13 +819,13 @@ fn workerMakeOneStep( for (s.result_error_msgs.items) |msg| { // Sometimes it feels like you just can't catch a break. Finally, // with Zig, you can. - ttyconf.setColor(stderr, .Bold) catch break; + ttyconf.setColor(stderr, .bold) catch break; stderr.writeAll(s.owner.dep_prefix) catch break; stderr.writeAll(s.name) catch break; stderr.writeAll(": ") catch break; - ttyconf.setColor(stderr, .Red) catch break; + ttyconf.setColor(stderr, .red) catch break; stderr.writeAll("error: ") catch break; - ttyconf.setColor(stderr, .Reset) catch break; + ttyconf.setColor(stderr, .reset) catch break; stderr.writeAll(msg) catch break; stderr.writeAll("\n") catch break; } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index d97a5c5d7a..b36e815f72 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1713,9 +1713,9 @@ fn dumpBadGetPathHelp( }); const tty_config = std.debug.detectTTYConfig(stderr); - tty_config.setColor(w, .Red) catch {}; + tty_config.setColor(w, .red) catch {}; try stderr.writeAll(" The step was created by this stack trace:\n"); - tty_config.setColor(w, .Reset) catch {}; + tty_config.setColor(w, .reset) catch {}; const debug_info = std.debug.getSelfDebugInfo() catch |err| { try w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}); @@ -1727,9 +1727,9 @@ fn dumpBadGetPathHelp( return; }; if (asking_step) |as| { - tty_config.setColor(w, .Red) catch {}; + tty_config.setColor(w, .red) catch {}; try stderr.writeAll(" The step that is missing a dependency on the above step was created by this stack trace:\n"); - tty_config.setColor(w, .Reset) catch {}; + tty_config.setColor(w, .reset) catch {}; std.debug.writeStackTrace(as.getStackTrace(), w, ally, debug_info, tty_config) catch |err| { try stderr.writer().print("Unable to dump stack trace: {s}\n", .{@errorName(err)}); @@ -1737,9 +1737,9 @@ fn dumpBadGetPathHelp( }; } - tty_config.setColor(w, .Red) catch {}; + tty_config.setColor(w, .red) catch {}; try stderr.writeAll(" Hope that helps. Proceeding to panic.\n"); - tty_config.setColor(w, .Reset) catch {}; + tty_config.setColor(w, .reset) catch {}; } /// Allocates a new string for assigning a value to a named macro. diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 005c2b5404..d98cf8f27d 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -421,9 +421,9 @@ pub fn writeStackTrace( if (stack_trace.index > stack_trace.instruction_addresses.len) { const dropped_frames = stack_trace.index - stack_trace.instruction_addresses.len; - tty_config.setColor(out_stream, .Bold) catch {}; + tty_config.setColor(out_stream, .bold) catch {}; try out_stream.print("({d} additional stack frames skipped...)\n", .{dropped_frames}); - tty_config.setColor(out_stream, .Reset) catch {}; + tty_config.setColor(out_stream, .reset) catch {}; } } @@ -655,14 +655,14 @@ pub fn writeCurrentStackTraceWindows( /// for debugging purposes, such as coloring text, etc. pub const TTY = struct { pub const Color = enum { - Red, - Green, - Yellow, - Cyan, - White, - Dim, - Bold, - Reset, + red, + green, + yellow, + cyan, + white, + dim, + bold, + reset, }; pub const Config = union(enum) { @@ -680,26 +680,26 @@ pub const TTY = struct { .no_color => return, .escape_codes => { const color_string = switch (color) { - .Red => "\x1b[31;1m", - .Green => "\x1b[32;1m", - .Yellow => "\x1b[33;1m", - .Cyan => "\x1b[36;1m", - .White => "\x1b[37;1m", - .Bold => "\x1b[1m", - .Dim => "\x1b[2m", - .Reset => "\x1b[0m", + .red => "\x1b[31;1m", + .green => "\x1b[32;1m", + .yellow => "\x1b[33;1m", + .cyan => "\x1b[36;1m", + .white => "\x1b[37;1m", + .bold => "\x1b[1m", + .dim => "\x1b[2m", + .reset => "\x1b[0m", }; try out_stream.writeAll(color_string); }, .windows_api => |ctx| if (native_os == .windows) { const attributes = switch (color) { - .Red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY, - .Green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, - .Yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, - .Cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, - .White, .Bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, - .Dim => windows.FOREGROUND_INTENSITY, - .Reset => ctx.reset_attributes, + .red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY, + .green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, + .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, + .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, + .white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, + .dim => windows.FOREGROUND_INTENSITY, + .reset => ctx.reset_attributes, }; try windows.SetConsoleTextAttribute(ctx.handle, attributes); } else { @@ -831,7 +831,7 @@ fn printLineInfo( comptime printLineFromFile: anytype, ) !void { nosuspend { - try tty_config.setColor(out_stream, .Bold); + try tty_config.setColor(out_stream, .bold); if (line_info) |*li| { try out_stream.print("{s}:{d}:{d}", .{ li.file_name, li.line, li.column }); @@ -839,11 +839,11 @@ fn printLineInfo( try out_stream.writeAll("???:?:?"); } - try tty_config.setColor(out_stream, .Reset); + try tty_config.setColor(out_stream, .reset); try out_stream.writeAll(": "); - try tty_config.setColor(out_stream, .Dim); + try tty_config.setColor(out_stream, .dim); try out_stream.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name }); - try tty_config.setColor(out_stream, .Reset); + try tty_config.setColor(out_stream, .reset); try out_stream.writeAll("\n"); // Show the matching source code line if possible @@ -854,9 +854,9 @@ fn printLineInfo( const space_needed = @intCast(usize, li.column - 1); try out_stream.writeByteNTimes(' ', space_needed); - try tty_config.setColor(out_stream, .Green); + try tty_config.setColor(out_stream, .green); try out_stream.writeAll("^"); - try tty_config.setColor(out_stream, .Reset); + try tty_config.setColor(out_stream, .reset); } try out_stream.writeAll("\n"); } else |err| switch (err) { diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 2857ebdbd3..8576ec0c83 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -387,9 +387,9 @@ fn SliceDiffer(comptime T: type) type { for (self.expected, 0..) |value, i| { var full_index = self.start_index + i; const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true; - if (diff) try self.ttyconf.setColor(writer, .Red); + if (diff) try self.ttyconf.setColor(writer, .red); try writer.print("[{}]: {any}\n", .{ full_index, value }); - if (diff) try self.ttyconf.setColor(writer, .Reset); + if (diff) try self.ttyconf.setColor(writer, .reset); } } }; @@ -427,9 +427,9 @@ const BytesDiffer = struct { } fn writeByteDiff(self: BytesDiffer, writer: anytype, comptime fmt: []const u8, byte: u8, diff: bool) !void { - if (diff) try self.ttyconf.setColor(writer, .Red); + if (diff) try self.ttyconf.setColor(writer, .red); try writer.print(fmt, .{byte}); - if (diff) try self.ttyconf.setColor(writer, .Reset); + if (diff) try self.ttyconf.setColor(writer, .reset); } const ChunkIterator = struct { diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index ffe748203e..f74d82273a 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -163,7 +163,7 @@ pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void { pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, writer: anytype) anyerror!void { for (eb.getMessages()) |err_msg| { - try renderErrorMessageToWriter(eb, options, err_msg, writer, "error", .Red, 0); + try renderErrorMessageToWriter(eb, options, err_msg, writer, "error", .red, 0); } if (options.include_log_text) { @@ -191,7 +191,7 @@ fn renderErrorMessageToWriter( if (err_msg.src_loc != .none) { const src = eb.extraData(SourceLocation, @enumToInt(err_msg.src_loc)); try counting_stderr.writeByteNTimes(' ', indent); - try ttyconf.setColor(stderr, .Bold); + try ttyconf.setColor(stderr, .bold); try counting_stderr.print("{s}:{d}:{d}: ", .{ eb.nullTerminatedString(src.data.src_path), src.data.line + 1, @@ -203,17 +203,17 @@ fn renderErrorMessageToWriter( // This is the length of the part before the error message: // e.g. "file.zig:4:5: error: " const prefix_len = @intCast(usize, counting_stderr.context.bytes_written); - try ttyconf.setColor(stderr, .Reset); - try ttyconf.setColor(stderr, .Bold); + try ttyconf.setColor(stderr, .reset); + try ttyconf.setColor(stderr, .bold); if (err_msg.count == 1) { try writeMsg(eb, err_msg, stderr, prefix_len); try stderr.writeByte('\n'); } else { try writeMsg(eb, err_msg, stderr, prefix_len); - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .dim); try stderr.print(" ({d} times)\n", .{err_msg.count}); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); if (src.data.source_line != 0 and options.include_source_line) { const line = eb.nullTerminatedString(src.data.source_line); for (line) |b| switch (b) { @@ -226,19 +226,19 @@ fn renderErrorMessageToWriter( // -1 since span.main includes the caret const after_caret = src.data.span_end - src.data.span_main -| 1; try stderr.writeByteNTimes(' ', src.data.column - before_caret); - try ttyconf.setColor(stderr, .Green); + try ttyconf.setColor(stderr, .green); try stderr.writeByteNTimes('~', before_caret); try stderr.writeByte('^'); try stderr.writeByteNTimes('~', after_caret); try stderr.writeByte('\n'); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } for (eb.getNotes(err_msg_index)) |note| { - try renderErrorMessageToWriter(eb, options, note, stderr, "note", .Cyan, indent); + try renderErrorMessageToWriter(eb, options, note, stderr, "note", .cyan, indent); } if (src.data.reference_trace_len > 0 and options.include_reference_trace) { - try ttyconf.setColor(stderr, .Reset); - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .reset); + try ttyconf.setColor(stderr, .dim); try stderr.print("referenced by:\n", .{}); var ref_index = src.end; for (0..src.data.reference_trace_len) |_| { @@ -266,25 +266,25 @@ fn renderErrorMessageToWriter( } } try stderr.writeByte('\n'); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); } } else { try ttyconf.setColor(stderr, color); try stderr.writeByteNTimes(' ', indent); try stderr.writeAll(kind); try stderr.writeAll(": "); - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); const msg = eb.nullTerminatedString(err_msg.msg); if (err_msg.count == 1) { try stderr.print("{s}\n", .{msg}); } else { try stderr.print("{s}", .{msg}); - try ttyconf.setColor(stderr, .Dim); + try ttyconf.setColor(stderr, .dim); try stderr.print(" ({d} times)\n", .{err_msg.count}); } - try ttyconf.setColor(stderr, .Reset); + try ttyconf.setColor(stderr, .reset); for (eb.getNotes(err_msg_index)) |note| { - try renderErrorMessageToWriter(eb, options, note, stderr, "note", .Cyan, indent + 4); + try renderErrorMessageToWriter(eb, options, note, stderr, "note", .cyan, indent + 4); } } } -- cgit v1.2.3 From 0f6fa3f20b3b28958921bd63a9a9d96468455e9c Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Sun, 21 May 2023 14:27:28 +0100 Subject: std: Move std.debug.{TTY.Config,detectTTYConfig} to std.io.tty Also get rid of the TTY wrapper struct, which was exlusively used as a namespace - this is done by the tty.zig root struct now. detectTTYConfig has been renamed to just detectConfig, which is enough given the new namespace. Additionally, a doc comment had been added. --- lib/build_runner.zig | 12 ++-- lib/std/Build.zig | 2 +- lib/std/Build/Step.zig | 2 +- lib/std/builtin.zig | 2 +- lib/std/debug.zig | 137 ++++---------------------------------------- lib/std/io.zig | 2 + lib/std/io/tty.zig | 121 ++++++++++++++++++++++++++++++++++++++ lib/std/testing.zig | 8 +-- lib/std/zig/ErrorBundle.zig | 4 +- src/main.zig | 4 +- test/src/Cases.zig | 2 +- 11 files changed, 152 insertions(+), 144 deletions(-) create mode 100644 lib/std/io/tty.zig (limited to 'lib/std/testing.zig') diff --git a/lib/build_runner.zig b/lib/build_runner.zig index 7eec164871..a09ec2cf1f 100644 --- a/lib/build_runner.zig +++ b/lib/build_runner.zig @@ -333,7 +333,7 @@ const Run = struct { claimed_rss: usize, enable_summary: ?bool, - ttyconf: std.debug.TTY.Config, + ttyconf: std.io.tty.Config, stderr: std.fs.File, }; @@ -535,7 +535,7 @@ const PrintNode = struct { last: bool = false, }; -fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.debug.TTY.Config) !void { +fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.io.tty.Config) !void { const parent = node.parent orelse return; if (parent.parent == null) return; try printPrefix(parent, stderr, ttyconf); @@ -553,7 +553,7 @@ fn printTreeStep( b: *std.Build, s: *Step, stderr: std.fs.File, - ttyconf: std.debug.TTY.Config, + ttyconf: std.io.tty.Config, parent_node: *PrintNode, step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), ) !void { @@ -1026,15 +1026,15 @@ fn cleanExit() void { const Color = enum { auto, off, on }; -fn get_tty_conf(color: Color, stderr: std.fs.File) std.debug.TTY.Config { +fn get_tty_conf(color: Color, stderr: std.fs.File) std.io.tty.Config { return switch (color) { - .auto => std.debug.detectTTYConfig(stderr), + .auto => std.io.tty.detectConfig(stderr), .on => .escape_codes, .off => .no_color, }; } -fn renderOptions(ttyconf: std.debug.TTY.Config) std.zig.ErrorBundle.RenderOptions { +fn renderOptions(ttyconf: std.io.tty.Config) std.zig.ErrorBundle.RenderOptions { return .{ .ttyconf = ttyconf, .include_source_line = ttyconf != .no_color, diff --git a/lib/std/Build.zig b/lib/std/Build.zig index b36e815f72..bb642b5e66 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1712,7 +1712,7 @@ fn dumpBadGetPathHelp( s.name, }); - const tty_config = std.debug.detectTTYConfig(stderr); + const tty_config = std.io.tty.detectConfig(stderr); tty_config.setColor(w, .red) catch {}; try stderr.writeAll(" The step was created by this stack trace:\n"); tty_config.setColor(w, .reset) catch {}; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 40c88df2b9..a0d7a6a296 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -237,7 +237,7 @@ pub fn dump(step: *Step) void { const stderr = std.io.getStdErr(); const w = stderr.writer(); - const tty_config = std.debug.detectTTYConfig(stderr); + const tty_config = std.io.tty.detectConfig(stderr); const debug_info = std.debug.getSelfDebugInfo() catch |err| { w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{ @errorName(err), diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 56fab05d88..710aaefd5a 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -51,7 +51,7 @@ pub const StackTrace = struct { const debug_info = std.debug.getSelfDebugInfo() catch |err| { return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}); }; - const tty_config = std.debug.detectTTYConfig(std.io.getStdErr()); + const tty_config = std.io.tty.detectConfig(std.io.getStdErr()); try writer.writeAll("\n"); std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| { try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)}); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index d98cf8f27d..08407023d6 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -5,7 +5,6 @@ const mem = std.mem; const io = std.io; const os = std.os; const fs = std.fs; -const process = std.process; const testing = std.testing; const elf = std.elf; const DW = std.dwarf; @@ -109,31 +108,6 @@ pub fn getSelfDebugInfo() !*DebugInfo { } } -pub fn detectTTYConfig(file: std.fs.File) TTY.Config { - if (builtin.os.tag == .wasi) { - // Per https://github.com/WebAssembly/WASI/issues/162 ANSI codes - // aren't currently supported. - return .no_color; - } else if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) { - return .escape_codes; - } else if (process.hasEnvVarConstant("NO_COLOR")) { - return .no_color; - } else if (file.supportsAnsiEscapeCodes()) { - return .escape_codes; - } else if (native_os == .windows and file.isTty()) { - var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { - // TODO: Should this return an error instead? - return .no_color; - } - return .{ .windows_api = .{ - .handle = file.handle, - .reset_attributes = info.wAttributes, - } }; - } - return .no_color; -} - /// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned. /// TODO multithreaded awareness pub fn dumpCurrentStackTrace(start_addr: ?usize) void { @@ -154,7 +128,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; - writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(io.getStdErr()), start_addr) catch |err| { + writeCurrentStackTrace(stderr, debug_info, io.tty.detectConfig(io.getStdErr()), start_addr) catch |err| { stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return; return; }; @@ -182,7 +156,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; - const tty_config = detectTTYConfig(io.getStdErr()); + const tty_config = io.tty.detectConfig(io.getStdErr()); if (native_os == .windows) { writeCurrentStackTraceWindows(stderr, debug_info, tty_config, ip) catch return; return; @@ -265,7 +239,7 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; - writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig(io.getStdErr())) catch |err| { + writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, io.tty.detectConfig(io.getStdErr())) catch |err| { stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return; return; }; @@ -403,7 +377,7 @@ pub fn writeStackTrace( out_stream: anytype, allocator: mem.Allocator, debug_info: *DebugInfo, - tty_config: TTY.Config, + tty_config: io.tty.Config, ) !void { _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; @@ -562,7 +536,7 @@ pub const StackIterator = struct { pub fn writeCurrentStackTrace( out_stream: anytype, debug_info: *DebugInfo, - tty_config: TTY.Config, + tty_config: io.tty.Config, start_addr: ?usize, ) !void { if (native_os == .windows) { @@ -634,7 +608,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize { pub fn writeCurrentStackTraceWindows( out_stream: anytype, debug_info: *DebugInfo, - tty_config: TTY.Config, + tty_config: io.tty.Config, start_addr: ?usize, ) !void { var addr_buf: [1024]usize = undefined; @@ -651,95 +625,6 @@ pub fn writeCurrentStackTraceWindows( } } -/// Provides simple functionality for manipulating the terminal in some way, -/// for debugging purposes, such as coloring text, etc. -pub const TTY = struct { - pub const Color = enum { - red, - green, - yellow, - cyan, - white, - dim, - bold, - reset, - }; - - pub const Config = union(enum) { - no_color, - escape_codes, - windows_api: if (native_os == .windows) WindowsContext else void, - - pub const WindowsContext = struct { - handle: File.Handle, - reset_attributes: u16, - }; - - pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void { - nosuspend switch (conf) { - .no_color => return, - .escape_codes => { - const color_string = switch (color) { - .red => "\x1b[31;1m", - .green => "\x1b[32;1m", - .yellow => "\x1b[33;1m", - .cyan => "\x1b[36;1m", - .white => "\x1b[37;1m", - .bold => "\x1b[1m", - .dim => "\x1b[2m", - .reset => "\x1b[0m", - }; - try out_stream.writeAll(color_string); - }, - .windows_api => |ctx| if (native_os == .windows) { - const attributes = switch (color) { - .red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY, - .green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, - .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, - .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, - .white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, - .dim => windows.FOREGROUND_INTENSITY, - .reset => ctx.reset_attributes, - }; - try windows.SetConsoleTextAttribute(ctx.handle, attributes); - } else { - unreachable; - }, - }; - } - - pub fn writeDEC(conf: Config, writer: anytype, codepoint: u8) !void { - const bytes = switch (conf) { - .no_color, .windows_api => switch (codepoint) { - 0x50...0x5e => @as(*const [1]u8, &codepoint), - 0x6a => "+", // ┘ - 0x6b => "+", // ┐ - 0x6c => "+", // ┌ - 0x6d => "+", // └ - 0x6e => "+", // ┼ - 0x71 => "-", // ─ - 0x74 => "+", // ├ - 0x75 => "+", // ┤ - 0x76 => "+", // ┴ - 0x77 => "+", // ┬ - 0x78 => "|", // │ - else => " ", // TODO - }, - .escape_codes => switch (codepoint) { - // Here we avoid writing the DEC beginning sequence and - // ending sequence in separate syscalls by putting the - // beginning and ending sequence into the same string - // literals, to prevent terminals ending up in bad states - // in case a crash happens between syscalls. - inline 0x50...0x7f => |x| "\x1B\x28\x30" ++ [1]u8{x} ++ "\x1B\x28\x42", - else => unreachable, - }, - }; - return writer.writeAll(bytes); - } - }; -}; - fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol { var min: usize = 0; var max: usize = symbols.len - 1; @@ -785,7 +670,7 @@ test "machoSearchSymbols" { try testing.expectEqual(&symbols[2], machoSearchSymbols(&symbols, 5000).?); } -fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void { +fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void { const module_name = debug_info.getModuleNameForAddress(address); return printLineInfo( out_stream, @@ -798,7 +683,7 @@ fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usiz ); } -pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void { +pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void { const module = debug_info.getModuleForAddress(address) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config), else => return err, @@ -827,7 +712,7 @@ fn printLineInfo( address: usize, symbol_name: []const u8, compile_unit_name: []const u8, - tty_config: TTY.Config, + tty_config: io.tty.Config, comptime printLineFromFile: anytype, ) !void { nosuspend { @@ -2193,7 +2078,7 @@ test "manage resources correctly" { const writer = std.io.null_writer; var di = try openSelfDebugInfo(testing.allocator); defer di.deinit(); - try printSourceAtAddress(&di, writer, showMyTrace(), detectTTYConfig(std.io.getStdErr())); + try printSourceAtAddress(&di, writer, showMyTrace(), io.tty.detectConfig(std.io.getStdErr())); } noinline fn showMyTrace() usize { @@ -2253,7 +2138,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize pub fn dump(t: @This()) void { if (!enabled) return; - const tty_config = detectTTYConfig(std.io.getStdErr()); + const tty_config = io.tty.detectConfig(std.io.getStdErr()); const stderr = io.getStdErr().writer(); const end = @min(t.index, size); const debug_info = getSelfDebugInfo() catch |err| { diff --git a/lib/std/io.zig b/lib/std/io.zig index d95997f853..f6d893c7dd 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -155,6 +155,8 @@ pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAt pub const StreamSource = @import("io/stream_source.zig").StreamSource; +pub const tty = @import("io/tty.zig"); + /// A Writer that doesn't write to anything. pub const null_writer = @as(NullWriter, .{ .context = {} }); diff --git a/lib/std/io/tty.zig b/lib/std/io/tty.zig new file mode 100644 index 0000000000..ea1c52db00 --- /dev/null +++ b/lib/std/io/tty.zig @@ -0,0 +1,121 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const File = std.fs.File; +const process = std.process; +const windows = std.os.windows; +const native_os = builtin.os.tag; + +/// Detect suitable TTY configuration options for the given file (commonly stdout/stderr). +/// This includes feature checks for ANSI escape codes and the Windows console API, as well as +/// respecting the `NO_COLOR` environment variable. +pub fn detectConfig(file: File) Config { + if (builtin.os.tag == .wasi) { + // Per https://github.com/WebAssembly/WASI/issues/162 ANSI codes + // aren't currently supported. + return .no_color; + } else if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) { + return .escape_codes; + } else if (process.hasEnvVarConstant("NO_COLOR")) { + return .no_color; + } else if (file.supportsAnsiEscapeCodes()) { + return .escape_codes; + } else if (native_os == .windows and file.isTty()) { + var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { + // TODO: Should this return an error instead? + return .no_color; + } + return .{ .windows_api = .{ + .handle = file.handle, + .reset_attributes = info.wAttributes, + } }; + } + return .no_color; +} + +pub const Color = enum { + red, + green, + yellow, + cyan, + white, + dim, + bold, + reset, +}; + +/// Provides simple functionality for manipulating the terminal in some way, +/// such as coloring text, etc. +pub const Config = union(enum) { + no_color, + escape_codes, + windows_api: if (native_os == .windows) WindowsContext else void, + + pub const WindowsContext = struct { + handle: File.Handle, + reset_attributes: u16, + }; + + pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void { + nosuspend switch (conf) { + .no_color => return, + .escape_codes => { + const color_string = switch (color) { + .red => "\x1b[31;1m", + .green => "\x1b[32;1m", + .yellow => "\x1b[33;1m", + .cyan => "\x1b[36;1m", + .white => "\x1b[37;1m", + .bold => "\x1b[1m", + .dim => "\x1b[2m", + .reset => "\x1b[0m", + }; + try out_stream.writeAll(color_string); + }, + .windows_api => |ctx| if (native_os == .windows) { + const attributes = switch (color) { + .red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY, + .green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, + .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY, + .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, + .white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY, + .dim => windows.FOREGROUND_INTENSITY, + .reset => ctx.reset_attributes, + }; + try windows.SetConsoleTextAttribute(ctx.handle, attributes); + } else { + unreachable; + }, + }; + } + + pub fn writeDEC(conf: Config, writer: anytype, codepoint: u8) !void { + const bytes = switch (conf) { + .no_color, .windows_api => switch (codepoint) { + 0x50...0x5e => @as(*const [1]u8, &codepoint), + 0x6a => "+", // ┘ + 0x6b => "+", // ┐ + 0x6c => "+", // ┌ + 0x6d => "+", // └ + 0x6e => "+", // ┼ + 0x71 => "-", // ─ + 0x74 => "+", // ├ + 0x75 => "+", // ┤ + 0x76 => "+", // ┴ + 0x77 => "+", // ┬ + 0x78 => "|", // │ + else => " ", // TODO + }, + .escape_codes => switch (codepoint) { + // Here we avoid writing the DEC beginning sequence and + // ending sequence in separate syscalls by putting the + // beginning and ending sequence into the same string + // literals, to prevent terminals ending up in bad states + // in case a crash happens between syscalls. + inline 0x50...0x7f => |x| "\x1B\x28\x30" ++ [1]u8{x} ++ "\x1B\x28\x42", + else => unreachable, + }, + }; + return writer.writeAll(bytes); + } +}; diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 8576ec0c83..7986c50eaf 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -279,7 +279,7 @@ test "expectApproxEqRel" { /// This function is intended to be used only in tests. When the two slices are not /// equal, prints diagnostics to stderr to show exactly how they are not equal (with /// the differences highlighted in red), then returns a test failure error. -/// The colorized output is optional and controlled by the return of `std.debug.detectTTYConfig()`. +/// The colorized output is optional and controlled by the return of `std.io.tty.detectConfig()`. /// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead. pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void { if (expected.ptr == actual.ptr and expected.len == actual.len) { @@ -312,7 +312,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const const actual_window = actual[window_start..@min(actual.len, window_start + max_window_size)]; const actual_truncated = window_start + actual_window.len < actual.len; - const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr()); + const ttyconf = std.io.tty.detectConfig(std.io.getStdErr()); var differ = if (T == u8) BytesDiffer{ .expected = expected_window, .actual = actual_window, @@ -379,7 +379,7 @@ fn SliceDiffer(comptime T: type) type { start_index: usize, expected: []const T, actual: []const T, - ttyconf: std.debug.TTY.Config, + ttyconf: std.io.tty.Config, const Self = @This(); @@ -398,7 +398,7 @@ fn SliceDiffer(comptime T: type) type { const BytesDiffer = struct { expected: []const u8, actual: []const u8, - ttyconf: std.debug.TTY.Config, + ttyconf: std.io.tty.Config, pub fn write(self: BytesDiffer, writer: anytype) !void { var expected_iterator = ChunkIterator{ .bytes = self.expected }; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index f74d82273a..46b5799807 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -148,7 +148,7 @@ pub fn nullTerminatedString(eb: ErrorBundle, index: usize) [:0]const u8 { } pub const RenderOptions = struct { - ttyconf: std.debug.TTY.Config, + ttyconf: std.io.tty.Config, include_reference_trace: bool = true, include_source_line: bool = true, include_log_text: bool = true, @@ -181,7 +181,7 @@ fn renderErrorMessageToWriter( err_msg_index: MessageIndex, stderr: anytype, kind: []const u8, - color: std.debug.TTY.Color, + color: std.io.tty.Color, indent: usize, ) anyerror!void { const ttyconf = options.ttyconf; diff --git a/src/main.zig b/src/main.zig index 650741e5e4..afda88cebd 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6044,9 +6044,9 @@ const ClangSearchSanitizer = struct { }; }; -fn get_tty_conf(color: Color) std.debug.TTY.Config { +fn get_tty_conf(color: Color) std.io.tty.Config { return switch (color) { - .auto => std.debug.detectTTYConfig(std.io.getStdErr()), + .auto => std.io.tty.detectConfig(std.io.getStdErr()), .on => .escape_codes, .off => .no_color, }; diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 63dd2fd3da..08568d0dd6 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -1354,7 +1354,7 @@ fn runOneCase( defer all_errors.deinit(allocator); if (all_errors.errorMessageCount() > 0) { all_errors.renderToStdErr(.{ - .ttyconf = std.debug.detectTTYConfig(std.io.getStdErr()), + .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()), }); // TODO print generated C code return error.UnexpectedCompileErrors; -- cgit v1.2.3 From 4976b58ab16069f8d3267b69ed030f29685c1abe Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 29 May 2023 05:07:17 +0100 Subject: Prevent analysis of functions only referenced at comptime The idea here is that there are two ways we can reference a function at runtime: * Through a direct call, i.e. where the function is comptime-known * Through a function pointer This means we can easily perform a form of rudimentary escape analysis on functions. If we ever see a `decl_ref` or `ref` of a function, we have a function pointer, which could "leak" into runtime code, so we emit the function; but for a plain `decl_val`, there's no need to. This change means that `comptime { _ = f; }` no longer forces a function to be emitted, which was used for some things (mainly tests). These use sites have been replaced with `_ = &f;`, which still triggers analysis of the function body, since you're taking a pointer to the function. Resolves: #6256 Resolves: #15353 --- lib/compiler_rt/clear_cache.zig | 2 +- lib/std/fmt.zig | 2 +- lib/std/fs.zig | 10 +-- lib/std/hash_map.zig | 2 +- lib/std/multi_array_list.zig | 4 +- lib/std/os/test.zig | 2 +- lib/std/testing.zig | 4 +- src/Compilation.zig | 7 ++ src/Module.zig | 74 +++++++++++++++++----- src/Sema.zig | 41 ++++++++++-- src/type.zig | 2 +- src/value.zig | 2 +- test/behavior/sizeof_and_typeof.zig | 2 +- .../closure_get_depends_on_failed_decl.zig | 2 +- ...og_of_tagged_enum_doesnt_crash_the_compiler.zig | 5 +- test/cases/compile_errors/compile_log.zig | 11 ++-- test/cases/compile_errors/dereference_slice.zig | 2 +- .../extern_function_with_comptime_parameter.zig | 6 +- .../invalid_address_space_coercion.zig | 2 +- ...ss_space_when_taking_address_of_dereference.zig | 2 +- .../noalias_on_non_pointer_param.zig | 4 +- .../pointer_with_different_address_spaces.zig | 2 +- .../pointers_with_different_address_spaces.zig | 2 +- .../compile_errors/slice_sentinel_mismatch-2.zig | 2 +- ...r_access_chaining_pointer_to_optional_array.zig | 2 +- ...paces_pointer_access_chaining_array_pointer.zig | 2 +- ...ress_spaces_pointer_access_chaining_complex.zig | 2 +- ...aces_pointer_access_chaining_struct_pointer.zig | 2 +- ...hough_multiple_pointers_with_address_spaces.zig | 2 +- test/cases/llvm/pointer_keeps_address_space.zig | 2 +- ...ss_space_when_taking_address_of_dereference.zig | 2 +- ...c_address_space_coerces_to_implicit_pointer.zig | 2 +- test/link/wasm/type/build.zig | 4 +- 33 files changed, 149 insertions(+), 65 deletions(-) (limited to 'lib/std/testing.zig') diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig index 93e6846ae5..5038c4061a 100644 --- a/lib/compiler_rt/clear_cache.zig +++ b/lib/compiler_rt/clear_cache.zig @@ -12,7 +12,7 @@ pub const panic = @import("common.zig").panic; // specified range. comptime { - _ = clear_cache; + _ = &clear_cache; } fn clear_cache(start: usize, end: usize) callconv(.C) void { diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index ac1f86cce5..be6ebf20ac 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -1959,7 +1959,7 @@ pub const parseFloat = @import("fmt/parse_float.zig").parseFloat; pub const ParseFloatError = @import("fmt/parse_float.zig").ParseFloatError; test { - _ = parseFloat; + _ = &parseFloat; } pub fn charToDigit(c: u8, radix: u8) (error{InvalidCharacter}!u8) { diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 6ab2dbaa7f..746bfde383 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -3150,12 +3150,12 @@ fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t, maybe_size: ?u64) CopyFileRawError test { if (builtin.os.tag != .wasi) { - _ = makeDirAbsolute; - _ = makeDirAbsoluteZ; - _ = copyFileAbsolute; - _ = updateFileAbsolute; + _ = &makeDirAbsolute; + _ = &makeDirAbsoluteZ; + _ = ©FileAbsolute; + _ = &updateFileAbsolute; } - _ = Dir.copyFile; + _ = &Dir.copyFile; _ = @import("fs/test.zig"); _ = @import("fs/path.zig"); _ = @import("fs/file.zig"); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 91f5682831..50ff2f0c94 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1605,7 +1605,7 @@ pub fn HashMapUnmanaged( comptime { if (builtin.mode == .Debug) { - _ = dbHelper; + _ = &dbHelper; } } }; diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 44e226be33..e9011c3c63 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -532,8 +532,8 @@ pub fn MultiArrayList(comptime T: type) type { comptime { if (builtin.mode == .Debug) { - _ = dbHelper; - _ = Slice.dbHelper; + _ = &dbHelper; + _ = &Slice.dbHelper; } } }; diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index f694ea277a..e7b66c0d55 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -704,7 +704,7 @@ test "signalfd" { .linux, .solaris => {}, else => return error.SkipZigTest, } - _ = os.signalfd; + _ = &os.signalfd; } test "sync" { diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 7986c50eaf..fa131122bb 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -1116,7 +1116,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime pub fn refAllDecls(comptime T: type) void { if (!builtin.is_test) return; inline for (comptime std.meta.declarations(T)) |decl| { - if (decl.is_pub) _ = @field(T, decl.name); + if (decl.is_pub) _ = &@field(T, decl.name); } } @@ -1132,7 +1132,7 @@ pub fn refAllDeclsRecursive(comptime T: type) void { else => {}, } } - _ = @field(T, decl.name); + _ = &@field(T, decl.name); } } } diff --git a/src/Compilation.zig b/src/Compilation.zig index cc2e2a916b..956948dd86 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3193,6 +3193,13 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; + const decl = module.declPtr(decl_index); + if (decl.kind == .@"test" and comp.bin_file.options.is_test) { + // Tests are always emitted in test binaries. The decl_refs are created by + // Module.populateTestFunctions, but this will not queue body analysis, so do + // that now. + try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + } }, .update_embed_file => |embed_file| { const named_frame = tracy.namedFrame("update_embed_file"); diff --git a/src/Module.zig b/src/Module.zig index 61843f5a8f..59ee21d8cf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1638,6 +1638,10 @@ pub const Fn = struct { inferred_error_sets: InferredErrorSetList = .{}, pub const Analysis = enum { + /// This function has not yet undergone analysis, because we have not + /// seen a potential runtime call. It may be analyzed in future. + none, + /// Analysis for this function has been queued, but not yet completed. queued, /// This function intentionally only has ZIR generated because it is marked /// inline, which means no runtime version of the function will be generated. @@ -4323,7 +4327,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .complete, .codegen_failure_retryable => { switch (func.state) { .sema_failure, .dependency_failure => return error.AnalysisFail, - .queued => {}, + .none, .queued => {}, .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this .success => return, @@ -4426,6 +4430,60 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { } } +/// Ensure this function's body is or will be analyzed and emitted. This should +/// be called whenever a potential runtime call of a function is seen. +/// +/// The caller is responsible for ensuring the function decl itself is already +/// analyzed, and for ensuring it can exist at runtime (see +/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body +/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { + const decl_index = func.owner_decl; + const decl = mod.declPtr(decl_index); + + switch (decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, + + .file_failure, + .sema_failure, + .liveness_failure, + .codegen_failure, + .dependency_failure, + .sema_failure_retryable, + .codegen_failure_retryable, + // The function analysis failed, but we've already emitted an error for + // that. The callee doesn't need the function to be analyzed right now, + // so its analysis can safely continue. + => return, + + .complete => {}, + } + + assert(decl.has_tv); + + switch (func.state) { + .none => {}, + .queued => return, + // As above, we don't need to forward errors here. + .sema_failure, .dependency_failure => return, + .in_progress => return, + .inline_only => unreachable, // don't queue work for this + .success => return, + } + + // Decl itself is safely analyzed, and body analysis is not yet queued + + try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + if (mod.emit_h != null) { + // TODO: we ideally only want to do this if the function's type changed + // since the last update + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); + } + func.state = .queued; +} + pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -4733,20 +4791,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const has_runtime_bits = try sema.fnHasRuntimeBits(decl.ty); - - if (has_runtime_bits) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency - // order, increasing how many computations can be done in parallel. - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); - } - } else if (!prev_is_inline and prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl_index); - } - const is_inline = decl.ty.fnCallingConvention() == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; diff --git a/src/Sema.zig b/src/Sema.zig index 4ae928d243..bd146c68fc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2452,6 +2452,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE .@"align" = iac.data.alignment, .@"addrspace" = addr_space, }); + try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); return sema.addConstant( ptr_ty, try Value.Tag.decl_ref_mut.create(sema.arena, .{ @@ -3709,6 +3710,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const final_ptr_ty_inst = try sema.addType(final_ptr_ty); sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; + try sema.maybeQueueFuncBodyAnalysis(decl_index); if (var_is_mut) { sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ .decl_index = decl_index, @@ -3809,6 +3811,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Even though we reuse the constant instruction, we still remove it from the // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); + try sema.maybeQueueFuncBodyAnalysis(new_decl_index); sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); // if bitcast ty ref needs to be made const, make_ptr_const // ZIR handles it later, so we can just use the ty ref here. @@ -5747,6 +5750,7 @@ pub fn analyzeExport( // This decl is alive no matter what, since it's being exported mod.markDeclAlive(exported_decl); + try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); const gpa = mod.gpa; @@ -7068,6 +7072,12 @@ fn analyzeCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } + if (try sema.resolveMaybeUndefVal(func)) |func_val| { + if (func_val.castTag(.function)) |func_obj| { + try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + } + } + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + args.len); const func_inst = try block.addInst(.{ @@ -7585,6 +7595,8 @@ fn instantiateGenericCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } + try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); const result = try block.addInst(.{ @@ -9143,7 +9155,7 @@ fn funcCommon( } const is_inline = fn_ty.fnCallingConvention() == .Inline; - const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; + const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; @@ -24279,9 +24291,7 @@ fn fieldCallBind( if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); - const inst = try sema.analyzeDeclRef(decl_idx); - - const decl_val = try sema.analyzeLoad(block, src, inst, src); + const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); if (decl_type.zigTypeTag() == .Fn and decl_type.fnParamLen() >= 1) @@ -28911,7 +28921,7 @@ fn analyzeDeclVal( if (sema.decl_val_table.get(decl_index)) |result| { return result; } - const decl_ref = try sema.analyzeDeclRef(decl_index); + const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { @@ -28970,6 +28980,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { try val.copy(anon_decl.arena()), 0, // default alignment ); + try sema.maybeQueueFuncBodyAnalysis(decl); try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); return try Value.Tag.decl_ref.create(sema.arena, decl); } @@ -28982,6 +28993,14 @@ fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { + return sema.analyzeDeclRefInner(decl_index, true); +} + +/// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but +/// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a +/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps +/// this function with `analyze_fn_body` set to true. +fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -28997,6 +29016,9 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref }); return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl_index)); } + if (analyze_fn_body) { + try sema.maybeQueueFuncBodyAnalysis(decl_index); + } return sema.addConstant( try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = decl_tv.ty, @@ -29008,6 +29030,15 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref ); } +fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { + const decl = sema.mod.declPtr(decl_index); + const tv = try decl.typedValue(); + if (tv.ty.zigTypeTag() != .Fn) return; + if (!try sema.fnHasRuntimeBits(tv.ty)) return; + const func = tv.val.castTag(.function) orelse return; // undef or extern_fn + try sema.mod.ensureFuncBodyAnalysisQueued(func.data); +} + fn analyzeRef( sema: *Sema, block: *Block, diff --git a/src/type.zig b/src/type.zig index 4023b5ba66..e5b41e717b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6802,7 +6802,7 @@ pub const Type = extern union { comptime { if (builtin.mode == .Debug) { - _ = dbHelper; + _ = &dbHelper; } } }; diff --git a/src/value.zig b/src/value.zig index b18ba3d834..af2d7b1ca2 100644 --- a/src/value.zig +++ b/src/value.zig @@ -5709,7 +5709,7 @@ pub const Value = extern union { comptime { if (builtin.mode == .Debug) { - _ = dbHelper; + _ = &dbHelper; } } }; diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index e463e51753..6f7d420646 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -48,7 +48,7 @@ fn fn1(alpha: bool) void { } test "lazy @sizeOf result is checked for definedness" { - _ = fn1; + _ = &fn1; } const A = struct { diff --git a/test/cases/compile_errors/closure_get_depends_on_failed_decl.zig b/test/cases/compile_errors/closure_get_depends_on_failed_decl.zig index ccdbf67713..be451d2fc8 100644 --- a/test/cases/compile_errors/closure_get_depends_on_failed_decl.zig +++ b/test/cases/compile_errors/closure_get_depends_on_failed_decl.zig @@ -3,7 +3,7 @@ pub inline fn instanceRequestAdapter() void {} pub inline fn requestAdapter( comptime callbackArg: fn () callconv(.Inline) void, ) void { - _ = (struct { + _ = &(struct { pub fn callback() callconv(.C) void { callbackArg(); } diff --git a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig index 55676f9230..f7de8129b7 100644 --- a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig +++ b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig @@ -1,5 +1,5 @@ const Bar = union(enum(u32)) { - X: i32 = 1 + X: i32 = 1, }; fn testCompileLog(x: Bar) void { @@ -7,7 +7,8 @@ fn testCompileLog(x: Bar) void { } pub export fn entry() void { - comptime testCompileLog(Bar{.X = 123}); + comptime testCompileLog(Bar{ .X = 123 }); + _ = &testCompileLog; } // error diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig index e1ea460dc3..444d091017 100644 --- a/test/cases/compile_errors/compile_log.zig +++ b/test/cases/compile_errors/compile_log.zig @@ -1,10 +1,11 @@ export fn foo() void { - comptime bar(12, "hi",); + comptime bar(12, "hi"); + _ = &bar; } fn bar(a: i32, b: []const u8) void { - @compileLog("begin",); + @compileLog("begin"); @compileLog("a", a, "b", b); - @compileLog("end",); + @compileLog("end"); } export fn baz() void { const S = struct { a: u32 }; @@ -15,8 +16,8 @@ export fn baz() void { // backend=llvm // target=native // -// :5:5: error: found compile log statement -// :11:5: note: also here +// :6:5: error: found compile log statement +// :12:5: note: also here // // Compile Log Output: // @as(*const [5:0]u8, "begin") diff --git a/test/cases/compile_errors/dereference_slice.zig b/test/cases/compile_errors/dereference_slice.zig index 7dba3b55d8..55d6078b22 100644 --- a/test/cases/compile_errors/dereference_slice.zig +++ b/test/cases/compile_errors/dereference_slice.zig @@ -2,7 +2,7 @@ fn entry(x: []i32) i32 { return x.*; } comptime { - _ = entry; + _ = &entry; } // error diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig index 58f15f7fab..8ade9ca2aa 100644 --- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig @@ -4,9 +4,9 @@ fn f() i32 { } pub extern fn entry1(b: u32, comptime a: [2]u8, c: i32) void; pub extern fn entry2(b: u32, noalias a: anytype, i43) void; -comptime { _ = f; } -comptime { _ = entry1; } -comptime { _ = entry2; } +comptime { _ = &f; } +comptime { _ = &entry1; } +comptime { _ = &entry2; } // error // backend=stage2 diff --git a/test/cases/compile_errors/invalid_address_space_coercion.zig b/test/cases/compile_errors/invalid_address_space_coercion.zig index 4633b12e0f..baf37cbe37 100644 --- a/test/cases/compile_errors/invalid_address_space_coercion.zig +++ b/test/cases/compile_errors/invalid_address_space_coercion.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) i32) *i32 { return a; } pub fn main() void { - _ = entry; + _ = &entry; } // error diff --git a/test/cases/compile_errors/invalid_pointer_keeps_address_space_when_taking_address_of_dereference.zig b/test/cases/compile_errors/invalid_pointer_keeps_address_space_when_taking_address_of_dereference.zig index 4d7b3c627b..c59238d6a7 100644 --- a/test/cases/compile_errors/invalid_pointer_keeps_address_space_when_taking_address_of_dereference.zig +++ b/test/cases/compile_errors/invalid_pointer_keeps_address_space_when_taking_address_of_dereference.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) i32) *i32 { return &a.*; } pub fn main() void { - _ = entry; + _ = &entry; } // error diff --git a/test/cases/compile_errors/noalias_on_non_pointer_param.zig b/test/cases/compile_errors/noalias_on_non_pointer_param.zig index 806808820f..65e6e141ce 100644 --- a/test/cases/compile_errors/noalias_on_non_pointer_param.zig +++ b/test/cases/compile_errors/noalias_on_non_pointer_param.zig @@ -2,10 +2,10 @@ fn f(noalias x: i32) void { _ = x; } export fn entry() void { f(1234); } fn generic(comptime T: type, noalias _: [*]T, noalias _: [*]const T, _: usize) void {} -comptime { _ = generic; } +comptime { _ = &generic; } fn slice(noalias _: []u8) void {} -comptime { _ = slice; } +comptime { _ = &slice; } // error // backend=stage2 diff --git a/test/cases/compile_errors/pointer_with_different_address_spaces.zig b/test/cases/compile_errors/pointer_with_different_address_spaces.zig index 2bbea3d3b6..7a434cfa8e 100644 --- a/test/cases/compile_errors/pointer_with_different_address_spaces.zig +++ b/test/cases/compile_errors/pointer_with_different_address_spaces.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) i32) *addrspace(.fs) i32 { return a; } export fn entry2() void { - _ = entry; + _ = &entry; } // error diff --git a/test/cases/compile_errors/pointers_with_different_address_spaces.zig b/test/cases/compile_errors/pointers_with_different_address_spaces.zig index e952da2af5..44b1ef8722 100644 --- a/test/cases/compile_errors/pointers_with_different_address_spaces.zig +++ b/test/cases/compile_errors/pointers_with_different_address_spaces.zig @@ -2,7 +2,7 @@ fn entry(a: ?*addrspace(.gs) i32) *i32 { return a.?; } pub fn main() void { - _ = entry; + _ = &entry; } // error diff --git a/test/cases/compile_errors/slice_sentinel_mismatch-2.zig b/test/cases/compile_errors/slice_sentinel_mismatch-2.zig index 3cc5ac4c39..ea34805e32 100644 --- a/test/cases/compile_errors/slice_sentinel_mismatch-2.zig +++ b/test/cases/compile_errors/slice_sentinel_mismatch-2.zig @@ -2,7 +2,7 @@ fn foo() [:0]u8 { var x: []u8 = undefined; return x; } -comptime { _ = foo; } +comptime { _ = &foo; } // error // backend=stage2 diff --git a/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig b/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig index 00d4a7ecc9..9ee3fa4de4 100644 --- a/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig +++ b/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) ?[1]i32) *addrspace(.gs) i32 { return &a.*.?[0]; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig index f23498e955..da90f3ee1d 100644 --- a/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig +++ b/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) [1]i32) *addrspace(.gs) i32 { return &a[0]; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig index 4f54f38e6b..5be74a0ea5 100644 --- a/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig +++ b/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig @@ -3,7 +3,7 @@ fn entry(a: *addrspace(.gs) [1]A) *addrspace(.gs) i32 { return &a[0].a.?[0]; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig index 84695cb35b..d3182b4745 100644 --- a/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig +++ b/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig @@ -3,7 +3,7 @@ fn entry(a: *addrspace(.gs) A) *addrspace(.gs) i32 { return &a.a; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig b/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig index badab821d3..a4b5d1372a 100644 --- a/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig +++ b/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.fs) *addrspace(.gs) *i32) *i32 { return a.*.*; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/pointer_keeps_address_space.zig b/test/cases/llvm/pointer_keeps_address_space.zig index f894c96d7b..fa1a11a0c5 100644 --- a/test/cases/llvm/pointer_keeps_address_space.zig +++ b/test/cases/llvm/pointer_keeps_address_space.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) i32) *addrspace(.gs) i32 { return a; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig b/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig index b5803a3076..1fade5ce54 100644 --- a/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig +++ b/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.gs) i32) *addrspace(.gs) i32 { return &a.*; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig b/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig index b3c0116983..287bc54a18 100644 --- a/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig +++ b/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig @@ -2,7 +2,7 @@ fn entry(a: *addrspace(.generic) i32) *i32 { return a; } pub fn main() void { - _ = entry; + _ = &entry; } // compile diff --git a/test/link/wasm/type/build.zig b/test/link/wasm/type/build.zig index 7a779f6c21..72a9261750 100644 --- a/test/link/wasm/type/build.zig +++ b/test/link/wasm/type/build.zig @@ -26,10 +26,10 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize const check_lib = lib.checkObject(); check_lib.checkStart("Section type"); - // only 3 entries, although we have more functions. + // only 2 entries, although we have more functions. // This is to test functions with the same function signature // have their types deduplicated. - check_lib.checkNext("entries 3"); + check_lib.checkNext("entries 2"); check_lib.checkNext("params 1"); check_lib.checkNext("type i32"); check_lib.checkNext("returns 1"); -- cgit v1.2.3 From d04a262a3d7e68920dec2a6becc86bf79d81452c Mon Sep 17 00:00:00 2001 From: Eric Joldasov Date: Sun, 4 Jun 2023 23:09:17 +0600 Subject: std.math: hard deprecate obsolete constants (soft deprecated in 0.10) Followup to 5b8ac9821dd25c3e5282130b4d93d6c5b7debb08. Signed-off-by: Eric Joldasov --- lib/compiler_rt/powiXf2_test.zig | 465 ++++++++++++++++++++------------------- lib/std/math.zig | 159 +++++++++---- lib/std/rand.zig | 2 +- lib/std/testing.zig | 2 +- test/behavior/pointers.zig | 6 +- 5 files changed, 361 insertions(+), 273 deletions(-) (limited to 'lib/std/testing.zig') diff --git a/lib/compiler_rt/powiXf2_test.zig b/lib/compiler_rt/powiXf2_test.zig index b1f9d2b538..5f7828c3e3 100644 --- a/lib/compiler_rt/powiXf2_test.zig +++ b/lib/compiler_rt/powiXf2_test.zig @@ -32,17 +32,18 @@ fn test__powixf2(a: f80, b: i32, expected: f80) !void { } test "powihf2" { + const inf_f16 = math.inf(f16); try test__powisf2(0, 0, 1); try test__powihf2(1, 0, 1); try test__powihf2(1.5, 0, 1); try test__powihf2(2, 0, 1); - try test__powihf2(math.inf_f16, 0, 1); + try test__powihf2(inf_f16, 0, 1); try test__powihf2(-0.0, 0, 1); try test__powihf2(-1, 0, 1); try test__powihf2(-1.5, 0, 1); try test__powihf2(-2, 0, 1); - try test__powihf2(-math.inf_f16, 0, 1); + try test__powihf2(-inf_f16, 0, 1); try test__powihf2(0, 1, 0); try test__powihf2(0, 2, 0); @@ -65,35 +66,35 @@ test "powihf2" { try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); - try test__powihf2(math.inf_f16, 1, math.inf_f16); - try test__powihf2(math.inf_f16, 2, math.inf_f16); - try test__powihf2(math.inf_f16, 3, math.inf_f16); - try test__powihf2(math.inf_f16, 4, math.inf_f16); - try test__powihf2(math.inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f16); - try test__powihf2(math.inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), math.inf_f16); - - try test__powihf2(-math.inf_f16, 1, -math.inf_f16); - try test__powihf2(-math.inf_f16, 2, math.inf_f16); - try test__powihf2(-math.inf_f16, 3, -math.inf_f16); - try test__powihf2(-math.inf_f16, 4, math.inf_f16); - try test__powihf2(-math.inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f16); - try test__powihf2(-math.inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -math.inf_f16); + try test__powihf2(inf_f16, 1, inf_f16); + try test__powihf2(inf_f16, 2, inf_f16); + try test__powihf2(inf_f16, 3, inf_f16); + try test__powihf2(inf_f16, 4, inf_f16); + try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); + try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f16); + + try test__powihf2(-inf_f16, 1, -inf_f16); + try test__powihf2(-inf_f16, 2, inf_f16); + try test__powihf2(-inf_f16, 3, -inf_f16); + try test__powihf2(-inf_f16, 4, inf_f16); + try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16); + try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f16); // - try test__powihf2(0, -1, math.inf_f16); - try test__powihf2(0, -2, math.inf_f16); - try test__powihf2(0, -3, math.inf_f16); - try test__powihf2(0, -4, math.inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f16); // 0 ^ anything = +inf - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000001)), math.inf_f16); - try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f16); - - try test__powihf2(-0.0, -1, -math.inf_f16); - try test__powihf2(-0.0, -2, math.inf_f16); - try test__powihf2(-0.0, -3, -math.inf_f16); - try test__powihf2(-0.0, -4, math.inf_f16); - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f16); // -0 ^ anything even = +inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -math.inf_f16); // -0 ^ anything odd = -inf - try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f16); + try test__powihf2(0, -1, inf_f16); + try test__powihf2(0, -2, inf_f16); + try test__powihf2(0, -3, inf_f16); + try test__powihf2(0, -4, inf_f16); + try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // 0 ^ anything = +inf + try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f16); + try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); + + try test__powihf2(-0.0, -1, -inf_f16); + try test__powihf2(-0.0, -2, inf_f16); + try test__powihf2(-0.0, -3, -inf_f16); + try test__powihf2(-0.0, -4, inf_f16); + try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // -0 ^ anything even = +inf + try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f16); // -0 ^ anything odd = -inf + try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16); try test__powihf2(1, -1, 1); try test__powihf2(1, -2, 1); @@ -103,21 +104,21 @@ test "powihf2" { try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__powihf2(math.inf_f16, -1, 0); - try test__powihf2(math.inf_f16, -2, 0); - try test__powihf2(math.inf_f16, -3, 0); - try test__powihf2(math.inf_f16, -4, 0); - try test__powihf2(math.inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(math.inf_f16, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powihf2(math.inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(inf_f16, -1, 0); + try test__powihf2(inf_f16, -2, 0); + try test__powihf2(inf_f16, -3, 0); + try test__powihf2(inf_f16, -4, 0); + try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); // - try test__powihf2(-math.inf_f16, -1, -0.0); - try test__powihf2(-math.inf_f16, -2, 0); - try test__powihf2(-math.inf_f16, -3, -0.0); - try test__powihf2(-math.inf_f16, -4, 0); - try test__powihf2(-math.inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powihf2(-math.inf_f16, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powihf2(-math.inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powihf2(-inf_f16, -1, -0.0); + try test__powihf2(-inf_f16, -2, 0); + try test__powihf2(-inf_f16, -3, -0.0); + try test__powihf2(-inf_f16, -4, 0); + try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000001)), -0.0); + try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0); try test__powihf2(2, 10, 1024.0); try test__powihf2(-2, 10, 1024.0); @@ -128,8 +129,8 @@ test "powihf2" { try test__powihf2(-2, 14, 16384.0); try test__powihf2(2, 15, 32768.0); try test__powihf2(-2, 15, -32768.0); - try test__powihf2(2, 16, math.inf_f16); - try test__powihf2(-2, 16, math.inf_f16); + try test__powihf2(2, 16, inf_f16); + try test__powihf2(-2, 16, inf_f16); try test__powihf2(2, -13, 1.0 / 8192.0); try test__powihf2(-2, -13, -1.0 / 8192.0); @@ -140,17 +141,18 @@ test "powihf2" { } test "powisf2" { + const inf_f32 = math.inf(f32); try test__powisf2(0, 0, 1); try test__powisf2(1, 0, 1); try test__powisf2(1.5, 0, 1); try test__powisf2(2, 0, 1); - try test__powisf2(math.inf_f32, 0, 1); + try test__powisf2(inf_f32, 0, 1); try test__powisf2(-0.0, 0, 1); try test__powisf2(-1, 0, 1); try test__powisf2(-1.5, 0, 1); try test__powisf2(-2, 0, 1); - try test__powisf2(-math.inf_f32, 0, 1); + try test__powisf2(-inf_f32, 0, 1); try test__powisf2(0, 1, 0); try test__powisf2(0, 2, 0); @@ -173,35 +175,35 @@ test "powisf2" { try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); - try test__powisf2(math.inf_f32, 1, math.inf_f32); - try test__powisf2(math.inf_f32, 2, math.inf_f32); - try test__powisf2(math.inf_f32, 3, math.inf_f32); - try test__powisf2(math.inf_f32, 4, math.inf_f32); - try test__powisf2(math.inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f32); - try test__powisf2(math.inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), math.inf_f32); - - try test__powisf2(-math.inf_f32, 1, -math.inf_f32); - try test__powisf2(-math.inf_f32, 2, math.inf_f32); - try test__powisf2(-math.inf_f32, 3, -math.inf_f32); - try test__powisf2(-math.inf_f32, 4, math.inf_f32); - try test__powisf2(-math.inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f32); - try test__powisf2(-math.inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -math.inf_f32); - - try test__powisf2(0, -1, math.inf_f32); - try test__powisf2(0, -2, math.inf_f32); - try test__powisf2(0, -3, math.inf_f32); - try test__powisf2(0, -4, math.inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000001)), math.inf_f32); - try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f32); - - try test__powisf2(-0.0, -1, -math.inf_f32); - try test__powisf2(-0.0, -2, math.inf_f32); - try test__powisf2(-0.0, -3, -math.inf_f32); - try test__powisf2(-0.0, -4, math.inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -math.inf_f32); - try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f32); + try test__powisf2(inf_f32, 1, inf_f32); + try test__powisf2(inf_f32, 2, inf_f32); + try test__powisf2(inf_f32, 3, inf_f32); + try test__powisf2(inf_f32, 4, inf_f32); + try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); + try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f32); + + try test__powisf2(-inf_f32, 1, -inf_f32); + try test__powisf2(-inf_f32, 2, inf_f32); + try test__powisf2(-inf_f32, 3, -inf_f32); + try test__powisf2(-inf_f32, 4, inf_f32); + try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32); + try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f32); + + try test__powisf2(0, -1, inf_f32); + try test__powisf2(0, -2, inf_f32); + try test__powisf2(0, -3, inf_f32); + try test__powisf2(0, -4, inf_f32); + try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); + try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f32); + try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); + + try test__powisf2(-0.0, -1, -inf_f32); + try test__powisf2(-0.0, -2, inf_f32); + try test__powisf2(-0.0, -3, -inf_f32); + try test__powisf2(-0.0, -4, inf_f32); + try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32); + try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f32); + try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32); try test__powisf2(1, -1, 1); try test__powisf2(1, -2, 1); @@ -211,21 +213,21 @@ test "powisf2" { try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__powisf2(math.inf_f32, -1, 0); - try test__powisf2(math.inf_f32, -2, 0); - try test__powisf2(math.inf_f32, -3, 0); - try test__powisf2(math.inf_f32, -4, 0); - try test__powisf2(math.inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(math.inf_f32, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powisf2(math.inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); - - try test__powisf2(-math.inf_f32, -1, -0.0); - try test__powisf2(-math.inf_f32, -2, 0); - try test__powisf2(-math.inf_f32, -3, -0.0); - try test__powisf2(-math.inf_f32, -4, 0); - try test__powisf2(-math.inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powisf2(-math.inf_f32, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powisf2(-math.inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powisf2(inf_f32, -1, 0); + try test__powisf2(inf_f32, -2, 0); + try test__powisf2(inf_f32, -3, 0); + try test__powisf2(inf_f32, -4, 0); + try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); + + try test__powisf2(-inf_f32, -1, -0.0); + try test__powisf2(-inf_f32, -2, 0); + try test__powisf2(-inf_f32, -3, -0.0); + try test__powisf2(-inf_f32, -4, 0); + try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000001)), -0.0); + try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0); try test__powisf2(2.0, 10, 1024.0); try test__powisf2(-2, 10, 1024.0); @@ -244,17 +246,18 @@ test "powisf2" { } test "powidf2" { + const inf_f64 = math.inf(f64); try test__powidf2(0, 0, 1); try test__powidf2(1, 0, 1); try test__powidf2(1.5, 0, 1); try test__powidf2(2, 0, 1); - try test__powidf2(math.inf_f64, 0, 1); + try test__powidf2(inf_f64, 0, 1); try test__powidf2(-0.0, 0, 1); try test__powidf2(-1, 0, 1); try test__powidf2(-1.5, 0, 1); try test__powidf2(-2, 0, 1); - try test__powidf2(-math.inf_f64, 0, 1); + try test__powidf2(-inf_f64, 0, 1); try test__powidf2(0, 1, 0); try test__powidf2(0, 2, 0); @@ -277,35 +280,35 @@ test "powidf2" { try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); - try test__powidf2(math.inf_f64, 1, math.inf_f64); - try test__powidf2(math.inf_f64, 2, math.inf_f64); - try test__powidf2(math.inf_f64, 3, math.inf_f64); - try test__powidf2(math.inf_f64, 4, math.inf_f64); - try test__powidf2(math.inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f64); - try test__powidf2(math.inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), math.inf_f64); - - try test__powidf2(-math.inf_f64, 1, -math.inf_f64); - try test__powidf2(-math.inf_f64, 2, math.inf_f64); - try test__powidf2(-math.inf_f64, 3, -math.inf_f64); - try test__powidf2(-math.inf_f64, 4, math.inf_f64); - try test__powidf2(-math.inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f64); - try test__powidf2(-math.inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -math.inf_f64); - - try test__powidf2(0, -1, math.inf_f64); - try test__powidf2(0, -2, math.inf_f64); - try test__powidf2(0, -3, math.inf_f64); - try test__powidf2(0, -4, math.inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000001)), math.inf_f64); - try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f64); - - try test__powidf2(-0.0, -1, -math.inf_f64); - try test__powidf2(-0.0, -2, math.inf_f64); - try test__powidf2(-0.0, -3, -math.inf_f64); - try test__powidf2(-0.0, -4, math.inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -math.inf_f64); - try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f64); + try test__powidf2(inf_f64, 1, inf_f64); + try test__powidf2(inf_f64, 2, inf_f64); + try test__powidf2(inf_f64, 3, inf_f64); + try test__powidf2(inf_f64, 4, inf_f64); + try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); + try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f64); + + try test__powidf2(-inf_f64, 1, -inf_f64); + try test__powidf2(-inf_f64, 2, inf_f64); + try test__powidf2(-inf_f64, 3, -inf_f64); + try test__powidf2(-inf_f64, 4, inf_f64); + try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64); + try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f64); + + try test__powidf2(0, -1, inf_f64); + try test__powidf2(0, -2, inf_f64); + try test__powidf2(0, -3, inf_f64); + try test__powidf2(0, -4, inf_f64); + try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); + try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f64); + try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); + + try test__powidf2(-0.0, -1, -inf_f64); + try test__powidf2(-0.0, -2, inf_f64); + try test__powidf2(-0.0, -3, -inf_f64); + try test__powidf2(-0.0, -4, inf_f64); + try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64); + try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f64); + try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64); try test__powidf2(1, -1, 1); try test__powidf2(1, -2, 1); @@ -315,21 +318,21 @@ test "powidf2" { try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__powidf2(math.inf_f64, -1, 0); - try test__powidf2(math.inf_f64, -2, 0); - try test__powidf2(math.inf_f64, -3, 0); - try test__powidf2(math.inf_f64, -4, 0); - try test__powidf2(math.inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(math.inf_f64, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powidf2(math.inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); - - try test__powidf2(-math.inf_f64, -1, -0.0); - try test__powidf2(-math.inf_f64, -2, 0); - try test__powidf2(-math.inf_f64, -3, -0.0); - try test__powidf2(-math.inf_f64, -4, 0); - try test__powidf2(-math.inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powidf2(-math.inf_f64, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powidf2(-math.inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powidf2(inf_f64, -1, 0); + try test__powidf2(inf_f64, -2, 0); + try test__powidf2(inf_f64, -3, 0); + try test__powidf2(inf_f64, -4, 0); + try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); + + try test__powidf2(-inf_f64, -1, -0.0); + try test__powidf2(-inf_f64, -2, 0); + try test__powidf2(-inf_f64, -3, -0.0); + try test__powidf2(-inf_f64, -4, 0); + try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000001)), -0.0); + try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0); try test__powidf2(2, 10, 1024.0); try test__powidf2(-2, 10, 1024.0); @@ -348,17 +351,18 @@ test "powidf2" { } test "powitf2" { + const inf_f128 = math.inf(f128); try test__powitf2(0, 0, 1); try test__powitf2(1, 0, 1); try test__powitf2(1.5, 0, 1); try test__powitf2(2, 0, 1); - try test__powitf2(math.inf_f128, 0, 1); + try test__powitf2(inf_f128, 0, 1); try test__powitf2(-0.0, 0, 1); try test__powitf2(-1, 0, 1); try test__powitf2(-1.5, 0, 1); try test__powitf2(-2, 0, 1); - try test__powitf2(-math.inf_f128, 0, 1); + try test__powitf2(-inf_f128, 0, 1); try test__powitf2(0, 1, 0); try test__powitf2(0, 2, 0); @@ -381,35 +385,35 @@ test "powitf2" { try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); - try test__powitf2(math.inf_f128, 1, math.inf_f128); - try test__powitf2(math.inf_f128, 2, math.inf_f128); - try test__powitf2(math.inf_f128, 3, math.inf_f128); - try test__powitf2(math.inf_f128, 4, math.inf_f128); - try test__powitf2(math.inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f128); - try test__powitf2(math.inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), math.inf_f128); - - try test__powitf2(-math.inf_f128, 1, -math.inf_f128); - try test__powitf2(-math.inf_f128, 2, math.inf_f128); - try test__powitf2(-math.inf_f128, 3, -math.inf_f128); - try test__powitf2(-math.inf_f128, 4, math.inf_f128); - try test__powitf2(-math.inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f128); - try test__powitf2(-math.inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -math.inf_f128); - - try test__powitf2(0, -1, math.inf_f128); - try test__powitf2(0, -2, math.inf_f128); - try test__powitf2(0, -3, math.inf_f128); - try test__powitf2(0, -4, math.inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000001)), math.inf_f128); - try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f128); - - try test__powitf2(-0.0, -1, -math.inf_f128); - try test__powitf2(-0.0, -2, math.inf_f128); - try test__powitf2(-0.0, -3, -math.inf_f128); - try test__powitf2(-0.0, -4, math.inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -math.inf_f128); - try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f128); + try test__powitf2(inf_f128, 1, inf_f128); + try test__powitf2(inf_f128, 2, inf_f128); + try test__powitf2(inf_f128, 3, inf_f128); + try test__powitf2(inf_f128, 4, inf_f128); + try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); + try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f128); + + try test__powitf2(-inf_f128, 1, -inf_f128); + try test__powitf2(-inf_f128, 2, inf_f128); + try test__powitf2(-inf_f128, 3, -inf_f128); + try test__powitf2(-inf_f128, 4, inf_f128); + try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128); + try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f128); + + try test__powitf2(0, -1, inf_f128); + try test__powitf2(0, -2, inf_f128); + try test__powitf2(0, -3, inf_f128); + try test__powitf2(0, -4, inf_f128); + try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); + try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f128); + try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); + + try test__powitf2(-0.0, -1, -inf_f128); + try test__powitf2(-0.0, -2, inf_f128); + try test__powitf2(-0.0, -3, -inf_f128); + try test__powitf2(-0.0, -4, inf_f128); + try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128); + try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f128); + try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128); try test__powitf2(1, -1, 1); try test__powitf2(1, -2, 1); @@ -419,21 +423,21 @@ test "powitf2" { try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__powitf2(math.inf_f128, -1, 0); - try test__powitf2(math.inf_f128, -2, 0); - try test__powitf2(math.inf_f128, -3, 0); - try test__powitf2(math.inf_f128, -4, 0); - try test__powitf2(math.inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(math.inf_f128, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powitf2(math.inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); - - try test__powitf2(-math.inf_f128, -1, -0.0); - try test__powitf2(-math.inf_f128, -2, 0); - try test__powitf2(-math.inf_f128, -3, -0.0); - try test__powitf2(-math.inf_f128, -4, 0); - try test__powitf2(-math.inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powitf2(-math.inf_f128, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powitf2(-math.inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powitf2(inf_f128, -1, 0); + try test__powitf2(inf_f128, -2, 0); + try test__powitf2(inf_f128, -3, 0); + try test__powitf2(inf_f128, -4, 0); + try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); + + try test__powitf2(-inf_f128, -1, -0.0); + try test__powitf2(-inf_f128, -2, 0); + try test__powitf2(-inf_f128, -3, -0.0); + try test__powitf2(-inf_f128, -4, 0); + try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000001)), -0.0); + try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0); try test__powitf2(2, 10, 1024.0); try test__powitf2(-2, 10, 1024.0); @@ -452,17 +456,18 @@ test "powitf2" { } test "powixf2" { + const inf_f80 = math.inf(f80); try test__powixf2(0, 0, 1); try test__powixf2(1, 0, 1); try test__powixf2(1.5, 0, 1); try test__powixf2(2, 0, 1); - try test__powixf2(math.inf_f80, 0, 1); + try test__powixf2(inf_f80, 0, 1); try test__powixf2(-0.0, 0, 1); try test__powixf2(-1, 0, 1); try test__powixf2(-1.5, 0, 1); try test__powixf2(-2, 0, 1); - try test__powixf2(-math.inf_f80, 0, 1); + try test__powixf2(-inf_f80, 0, 1); try test__powixf2(0, 1, 0); try test__powixf2(0, 2, 0); @@ -485,35 +490,35 @@ test "powixf2" { try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1); try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1); - try test__powixf2(math.inf_f80, 1, math.inf_f80); - try test__powixf2(math.inf_f80, 2, math.inf_f80); - try test__powixf2(math.inf_f80, 3, math.inf_f80); - try test__powixf2(math.inf_f80, 4, math.inf_f80); - try test__powixf2(math.inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f80); - try test__powixf2(math.inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), math.inf_f80); - - try test__powixf2(-math.inf_f80, 1, -math.inf_f80); - try test__powixf2(-math.inf_f80, 2, math.inf_f80); - try test__powixf2(-math.inf_f80, 3, -math.inf_f80); - try test__powixf2(-math.inf_f80, 4, math.inf_f80); - try test__powixf2(-math.inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), math.inf_f80); - try test__powixf2(-math.inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -math.inf_f80); - - try test__powixf2(0, -1, math.inf_f80); - try test__powixf2(0, -2, math.inf_f80); - try test__powixf2(0, -3, math.inf_f80); - try test__powixf2(0, -4, math.inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000001)), math.inf_f80); - try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f80); - - try test__powixf2(-0.0, -1, -math.inf_f80); - try test__powixf2(-0.0, -2, math.inf_f80); - try test__powixf2(-0.0, -3, -math.inf_f80); - try test__powixf2(-0.0, -4, math.inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), math.inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -math.inf_f80); - try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), math.inf_f80); + try test__powixf2(inf_f80, 1, inf_f80); + try test__powixf2(inf_f80, 2, inf_f80); + try test__powixf2(inf_f80, 3, inf_f80); + try test__powixf2(inf_f80, 4, inf_f80); + try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); + try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f80); + + try test__powixf2(-inf_f80, 1, -inf_f80); + try test__powixf2(-inf_f80, 2, inf_f80); + try test__powixf2(-inf_f80, 3, -inf_f80); + try test__powixf2(-inf_f80, 4, inf_f80); + try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80); + try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f80); + + try test__powixf2(0, -1, inf_f80); + try test__powixf2(0, -2, inf_f80); + try test__powixf2(0, -3, inf_f80); + try test__powixf2(0, -4, inf_f80); + try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); + try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f80); + try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); + + try test__powixf2(-0.0, -1, -inf_f80); + try test__powixf2(-0.0, -2, inf_f80); + try test__powixf2(-0.0, -3, -inf_f80); + try test__powixf2(-0.0, -4, inf_f80); + try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80); + try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f80); + try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80); try test__powixf2(1, -1, 1); try test__powixf2(1, -2, 1); @@ -523,21 +528,21 @@ test "powixf2" { try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1); try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1); - try test__powixf2(math.inf_f80, -1, 0); - try test__powixf2(math.inf_f80, -2, 0); - try test__powixf2(math.inf_f80, -3, 0); - try test__powixf2(math.inf_f80, -4, 0); - try test__powixf2(math.inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(math.inf_f80, @bitCast(i32, @as(u32, 0x80000001)), 0); - try test__powixf2(math.inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); - - try test__powixf2(-math.inf_f80, -1, -0.0); - try test__powixf2(-math.inf_f80, -2, 0); - try test__powixf2(-math.inf_f80, -3, -0.0); - try test__powixf2(-math.inf_f80, -4, 0); - try test__powixf2(-math.inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); - try test__powixf2(-math.inf_f80, @bitCast(i32, @as(u32, 0x80000001)), -0.0); - try test__powixf2(-math.inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + try test__powixf2(inf_f80, -1, 0); + try test__powixf2(inf_f80, -2, 0); + try test__powixf2(inf_f80, -3, 0); + try test__powixf2(inf_f80, -4, 0); + try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000001)), 0); + try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); + + try test__powixf2(-inf_f80, -1, -0.0); + try test__powixf2(-inf_f80, -2, 0); + try test__powixf2(-inf_f80, -3, -0.0); + try test__powixf2(-inf_f80, -4, 0); + try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0); + try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000001)), -0.0); + try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0); try test__powixf2(2, 10, 1024.0); try test__powixf2(-2, 10, 1024.0); diff --git a/lib/std/math.zig b/lib/std/math.zig index 8bd7c364f9..d85cefdefb 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -48,43 +48,41 @@ pub const floatMax = @import("math/float.zig").floatMax; pub const floatEps = @import("math/float.zig").floatEps; pub const inf = @import("math/float.zig").inf; -// TODO Replace with @compileError("deprecated for foobar") after 0.10.0 is released. -pub const f16_true_min: comptime_float = floatTrueMin(f16); // prev: 0.000000059604644775390625 -pub const f32_true_min: comptime_float = floatTrueMin(f32); // prev: 1.40129846432481707092e-45 -pub const f64_true_min: comptime_float = floatTrueMin(f64); // prev: 4.94065645841246544177e-324 -pub const f80_true_min = floatTrueMin(f80); // prev: make_f80(.{ .fraction = 1, .exp = 0 }) -pub const f128_true_min = floatTrueMin(f128); // prev: @bitCast(f128, @as(u128, 0x00000000000000000000000000000001)) -pub const f16_min: comptime_float = floatMin(f16); // prev: 0.00006103515625 -pub const f32_min: comptime_float = floatMin(f32); // prev: 1.17549435082228750797e-38 -pub const f64_min: comptime_float = floatMin(f64); // prev: 2.2250738585072014e-308 -pub const f80_min = floatMin(f80); // prev: make_f80(.{ .fraction = 0x8000000000000000, .exp = 1 }) -pub const f128_min = floatMin(f128); // prev: @bitCast(f128, @as(u128, 0x00010000000000000000000000000000)) -pub const f16_max: comptime_float = floatMax(f16); // prev: 65504 -pub const f32_max: comptime_float = floatMax(f32); // prev: 3.40282346638528859812e+38 -pub const f64_max: comptime_float = floatMax(f64); // prev: 1.79769313486231570815e+308 -pub const f80_max = floatMax(f80); // prev: make_f80(.{ .fraction = 0xFFFFFFFFFFFFFFFF, .exp = 0x7FFE }) -pub const f128_max = floatMax(f128); // prev: @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF)) -pub const f16_epsilon: comptime_float = floatEps(f16); // prev: 0.0009765625 -pub const f32_epsilon: comptime_float = floatEps(f32); // prev: 1.1920928955078125e-07 -pub const f64_epsilon: comptime_float = floatEps(f64); // prev: 2.22044604925031308085e-16 -pub const f80_epsilon = floatEps(f80); // prev: make_f80(.{ .fraction = 0x8000000000000000, .exp = 0x3FC0 }) -pub const f128_epsilon = floatEps(f128); // prev: @bitCast(f128, @as(u128, 0x3F8F0000000000000000000000000000)) -pub const f16_toint: comptime_float = 1.0 / f16_epsilon; // same as before -pub const f32_toint: comptime_float = 1.0 / f32_epsilon; // same as before -pub const f64_toint: comptime_float = 1.0 / f64_epsilon; // same as before -pub const f80_toint = 1.0 / f80_epsilon; // same as before -pub const f128_toint = 1.0 / f128_epsilon; // same as before -pub const inf_u16 = @bitCast(u16, inf_f16); // prev: @as(u16, 0x7C00) -pub const inf_f16 = inf(f16); // prev: @bitCast(f16, inf_u16) -pub const inf_u32 = @bitCast(u32, inf_f32); // prev: @as(u32, 0x7F800000) -pub const inf_f32 = inf(f32); // prev: @bitCast(f32, inf_u32) -pub const inf_u64 = @bitCast(u64, inf_f64); // prev: @as(u64, 0x7FF << 52) -pub const inf_f64 = inf(f64); // prev: @bitCast(f64, inf_u64) -pub const inf_f80 = inf(f80); // prev: make_f80(F80{ .fraction = 0x8000000000000000, .exp = 0x7fff }) -pub const inf_u128 = @bitCast(u128, inf_f128); // prev: @as(u128, 0x7fff0000000000000000000000000000) -pub const inf_f128 = inf(f128); // prev: @bitCast(f128, inf_u128) -pub const epsilon = floatEps; -// End of "soft deprecated" section +pub const f16_true_min = @compileError("Deprecated: use `floatTrueMin(f16)` instead"); +pub const f32_true_min = @compileError("Deprecated: use `floatTrueMin(f32)` instead"); +pub const f64_true_min = @compileError("Deprecated: use `floatTrueMin(f64)` instead"); +pub const f80_true_min = @compileError("Deprecated: use `floatTrueMin(f80)` instead"); +pub const f128_true_min = @compileError("Deprecated: use `floatTrueMin(f128)` instead"); +pub const f16_min = @compileError("Deprecated: use `floatMin(f16)` instead"); +pub const f32_min = @compileError("Deprecated: use `floatMin(f32)` instead"); +pub const f64_min = @compileError("Deprecated: use `floatMin(f64)` instead"); +pub const f80_min = @compileError("Deprecated: use `floatMin(f80)` instead"); +pub const f128_min = @compileError("Deprecated: use `floatMin(f128)` instead"); +pub const f16_max = @compileError("Deprecated: use `floatMax(f16)` instead"); +pub const f32_max = @compileError("Deprecated: use `floatMax(f32)` instead"); +pub const f64_max = @compileError("Deprecated: use `floatMax(f64)` instead"); +pub const f80_max = @compileError("Deprecated: use `floatMax(f80)` instead"); +pub const f128_max = @compileError("Deprecated: use `floatMax(f128)` instead"); +pub const f16_epsilon = @compileError("Deprecated: use `floatEps(f16)` instead"); +pub const f32_epsilon = @compileError("Deprecated: use `floatEps(f32)` instead"); +pub const f64_epsilon = @compileError("Deprecated: use `floatEps(f64)` instead"); +pub const f80_epsilon = @compileError("Deprecated: use `floatEps(f80)` instead"); +pub const f128_epsilon = @compileError("Deprecated: use `floatEps(f128)` instead"); +pub const f16_toint = @compileError("Deprecated: use `1.0 / floatEps(f16)` instead"); +pub const f32_toint = @compileError("Deprecated: use `1.0 / floatEps(f32)` instead"); +pub const f64_toint = @compileError("Deprecated: use `1.0 / floatEps(f64)` instead"); +pub const f80_toint = @compileError("Deprecated: use `1.0 / floatEps(f80)` instead"); +pub const f128_toint = @compileError("Deprecated: use `1.0 / floatEps(f128)` instead"); +pub const inf_u16 = @compileError("Deprecated: use `@bitCast(u16, inf(f16))` instead"); +pub const inf_f16 = @compileError("Deprecated: use `inf(f16)` instead"); +pub const inf_u32 = @compileError("Deprecated: use `@bitCast(u32, inf(f32))` instead"); +pub const inf_f32 = @compileError("Deprecated: use `inf(f32)` instead"); +pub const inf_u64 = @compileError("Deprecated: use `@bitCast(u64, inf(f64))` instead"); +pub const inf_f64 = @compileError("Deprecated: use `inf(f64)` instead"); +pub const inf_f80 = @compileError("Deprecated: use `inf(f80)` instead"); +pub const inf_u128 = @compileError("Deprecated: use `@bitCast(u128, inf(f128))` instead"); +pub const inf_f128 = @compileError("Deprecated: use `inf(f128)` instead"); +pub const epsilon = @compileError("Deprecated: use `floatEps` instead"); pub const nan_u16 = @as(u16, 0x7C01); pub const nan_f16 = @bitCast(f16, nan_u16); @@ -329,7 +327,92 @@ pub const Complex = complex.Complex; pub const big = @import("math/big.zig"); test { - std.testing.refAllDecls(@This()); + _ = floatExponentBits; + _ = floatMantissaBits; + _ = floatFractionalBits; + _ = floatExponentMin; + _ = floatExponentMax; + _ = floatTrueMin; + _ = floatMin; + _ = floatMax; + _ = floatEps; + _ = inf; + + _ = nan_u16; + _ = nan_f16; + + _ = qnan_u16; + _ = qnan_f16; + + _ = nan_u32; + _ = nan_f32; + + _ = qnan_u32; + _ = qnan_f32; + + _ = nan_u64; + _ = nan_f64; + + _ = qnan_u64; + _ = qnan_f64; + + _ = nan_f80; + _ = qnan_f80; + + _ = nan_u128; + _ = nan_f128; + + _ = qnan_u128; + _ = qnan_f128; + + _ = nan; + _ = snan; + + _ = isNan; + _ = isSignalNan; + _ = frexp; + _ = Frexp; + _ = modf; + _ = modf32_result; + _ = modf64_result; + _ = copysign; + _ = isFinite; + _ = isInf; + _ = isPositiveInf; + _ = isNegativeInf; + _ = isNormal; + _ = signbit; + _ = scalbn; + _ = ldexp; + _ = pow; + _ = powi; + _ = sqrt; + _ = cbrt; + _ = acos; + _ = asin; + _ = atan; + _ = atan2; + _ = hypot; + _ = expm1; + _ = ilogb; + _ = ln; + _ = log; + _ = log2; + _ = log10; + _ = log10_int; + _ = log1p; + _ = asinh; + _ = acosh; + _ = atanh; + _ = sinh; + _ = cosh; + _ = tanh; + _ = gcd; + + _ = complex; + _ = Complex; + + _ = big; } /// Given two types, returns the smallest one which is capable of holding the diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 204409c10e..ad19baa546 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -410,7 +410,7 @@ pub const Random = struct { r.uintLessThan(T, sum) else if (comptime std.meta.trait.isFloat(T)) // take care that imprecision doesn't lead to a value slightly greater than sum - std.math.min(r.float(T) * sum, sum - std.math.epsilon(T)) + std.math.min(r.float(T) * sum, sum - std.math.floatEps(T)) else @compileError("weightedIndex does not support proportions of type " ++ @typeName(T)); diff --git a/lib/std/testing.zig b/lib/std/testing.zig index fa131122bb..6b1e0bb640 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -263,7 +263,7 @@ pub fn expectApproxEqRel(expected: anytype, actual: @TypeOf(expected), tolerance test "expectApproxEqRel" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { - const eps_value = comptime math.epsilon(T); + const eps_value = comptime math.floatEps(T); const sqrt_eps_value = comptime @sqrt(eps_value); const pos_x: T = 12.0; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 70bc6ad47e..d343c5dbc4 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -357,9 +357,9 @@ test "pointer sentinel with +inf" { const S = struct { fn doTheTest() !void { - const inf = std.math.inf_f32; - var ptr: [*:inf]const f32 = &[_:inf]f32{ 1.1, 2.2, 3.3, 4.4 }; - try expect(ptr[4] == inf); // TODO this should be comptime try expect, see #3731 + const inf_f32 = comptime std.math.inf(f32); + var ptr: [*:inf_f32]const f32 = &[_:inf_f32]f32{ 1.1, 2.2, 3.3, 4.4 }; + try expect(ptr[4] == inf_f32); // TODO this should be comptime try expect, see #3731 } }; try S.doTheTest(); -- cgit v1.2.3 From d41111d7ef531f6f55a19c56205d6d2f1134c224 Mon Sep 17 00:00:00 2001 From: Motiejus Jakštys Date: Fri, 9 Jun 2023 16:02:18 -0700 Subject: mem: rename align*Generic to mem.align* Anecdote 1: The generic version is way more popular than the non-generic one in Zig codebase: git grep -w alignForward | wc -l 56 git grep -w alignForwardGeneric | wc -l 149 git grep -w alignBackward | wc -l 6 git grep -w alignBackwardGeneric | wc -l 15 Anecdote 2: In my project (turbonss) that does much arithmetic and alignment I exclusively use the Generic functions. Anecdote 3: we used only the Generic versions in the Macho Man's linker workshop. --- lib/std/Thread.zig | 8 ++--- lib/std/dynamic_library.zig | 4 +-- lib/std/hash_map.zig | 12 +++---- lib/std/heap.zig | 8 ++--- lib/std/heap/PageAllocator.zig | 12 +++---- lib/std/heap/WasmPageAllocator.zig | 6 ++-- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 1 + lib/std/mem.zig | 56 ++++++++++++------------------ lib/std/mem/Allocator.zig | 4 +-- lib/std/meta/trailer_flags.zig | 6 ++-- lib/std/os/linux/tls.zig | 8 ++--- lib/std/os/uefi/pool_allocator.zig | 4 +-- lib/std/tar.zig | 2 +- lib/std/target.zig | 2 +- lib/std/testing.zig | 2 +- src/Module.zig | 10 +++--- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 10 +++--- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +-- src/arch/wasm/CodeGen.zig | 8 ++--- src/arch/x86_64/CodeGen.zig | 10 +++--- src/codegen.zig | 8 ++--- src/codegen/llvm.zig | 44 +++++++++++------------ src/codegen/spirv.zig | 4 +-- src/link/Coff.zig | 30 ++++++++-------- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 10 +++--- src/link/MachO.zig | 50 +++++++++++++------------- src/link/MachO/CodeSignature.zig | 8 ++--- src/link/MachO/DebugSymbols.zig | 18 +++++----- src/link/MachO/load_commands.zig | 8 ++--- src/link/MachO/thunks.zig | 6 ++-- src/link/MachO/zld.zig | 34 +++++++++--------- src/link/Wasm.zig | 16 ++++----- src/objcopy.zig | 6 ++-- src/type.zig | 22 ++++++------ 39 files changed, 223 insertions(+), 232 deletions(-) (limited to 'lib/std/testing.zig') diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 76650a9072..d7bcbee66f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -931,18 +931,18 @@ const LinuxThreadImpl = struct { guard_offset = bytes; bytes += @max(page_size, config.stack_size); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); stack_offset = bytes; - bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); + bytes = std.mem.alignForward(usize, bytes, linux.tls.tls_image.alloc_align); tls_offset = bytes; bytes += linux.tls.tls_image.alloc_size; - bytes = std.mem.alignForward(bytes, @alignOf(Instance)); + bytes = std.mem.alignForward(usize, bytes, @alignOf(Instance)); instance_offset = bytes; bytes += @sizeOf(Instance); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); break :blk bytes; }; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 94da2f4d6d..928d0cc9c3 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -124,7 +124,7 @@ pub const ElfDynLib = struct { // corresponding to the actual LOAD sections. const file_bytes = try os.mmap( null, - mem.alignForward(size, mem.page_size), + mem.alignForward(usize, size, mem.page_size), os.PROT.READ, os.MAP.PRIVATE, fd, @@ -187,7 +187,7 @@ pub const ElfDynLib = struct { // extra nonsense mapped before/after the VirtAddr,MemSiz const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; - const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); + const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 5b539ddaad..8c05dfeca5 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1545,13 +1545,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + new_capacity * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + new_capacity * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = try allocator.alignedAlloc(u8, max_align, total_size); const ptr = @ptrToInt(slice.ptr); @@ -1581,13 +1581,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + cap * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + cap * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size]; allocator.free(slice); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7d2a66df1e..7b4bf3af21 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -83,7 +83,7 @@ const CAllocator = struct { // the aligned address. var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; @@ -249,7 +249,7 @@ pub const wasm_allocator = Allocator{ /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize) usize { const aligned_len = mem.alignAllocLen(full_len, len); - assert(mem.alignForward(aligned_len, mem.page_size) == full_len); + assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); return aligned_len; } @@ -307,7 +307,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @ptrToInt(ptr); - const aligned_addr = mem.alignForward(root_addr, ptr_align); + const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); const buf = @intToPtr([*]u8, aligned_addr)[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; @@ -840,7 +840,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { // which is 16 pages, hence the 32. This test may require to increase // the size of the allocations feeding the `allocator` parameter if they // fail, because of this high over-alignment we want to have. - while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { + while (@ptrToInt(slice.ptr) == mem.alignForward(usize, @ptrToInt(slice.ptr), mem.page_size * 32)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 2c8146caf3..5da570fa42 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -17,7 +17,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { _ = log2_align; assert(n > 0); if (n > maxInt(usize) - (mem.page_size - 1)) return null; - const aligned_len = mem.alignForward(n, mem.page_size); + const aligned_len = mem.alignForward(usize, n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; @@ -54,14 +54,14 @@ fn resize( ) bool { _ = log2_buf_align; _ = return_address; - const new_size_aligned = mem.alignForward(new_size, mem.page_size); + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; - const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); + const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. @@ -73,14 +73,14 @@ fn resize( } return true; } - const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); + const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned <= old_size_aligned) { return true; } return false; } - const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned == buf_aligned_len) return true; @@ -103,7 +103,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v if (builtin.os.tag == .windows) { os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { - const buf_aligned_len = mem.alignForward(slice.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); const ptr = @alignCast(mem.page_size, slice.ptr); os.munmap(ptr[0..buf_aligned_len]); } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index 1370af022c..63ae226196 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -100,7 +100,7 @@ fn extendedOffset() usize { } fn nPages(memsize: usize) usize { - return mem.alignForward(memsize, mem.page_size) / mem.page_size; + return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size; } fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { @@ -170,7 +170,7 @@ fn resize( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); if (new_len > aligned_len) return false; const current_n = nPages(aligned_len); const new_n = nPages(new_len); @@ -190,7 +190,7 @@ fn free( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); const current_n = nPages(aligned_len); const base = nPages(@ptrToInt(buf.ptr)); freePages(base, base + current_n); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c7e0569067..f858510bcf 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -186,7 +186,7 @@ pub const ArenaAllocator = struct { const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(addr, ptr_align); + const adjusted_addr = mem.alignForward(usize, addr, ptr_align); const adjusted_index = self.state.end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index ef88787fc6..51b6c1744f 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -309,6 +309,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn bucketStackFramesStart(size_class: usize) usize { return mem.alignForward( + usize, @sizeOf(BucketHeader) + usedBitsCount(size_class), @alignOf(usize), ); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 87f436d156..23e24b0c09 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4213,23 +4213,17 @@ test "sliceAsBytes preserves pointer attributes" { /// Round an address up to the next (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForward(addr: usize, alignment: usize) usize { - return alignForwardGeneric(usize, addr, alignment); +pub fn alignForward(comptime T: type, addr: T, alignment: T) T { + assert(isValidAlignGeneric(T, alignment)); + return alignBackward(T, addr + (alignment - 1), alignment); } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); - return alignForward(addr, alignment); + return alignForward(usize, addr, alignment); } -/// Round an address up to the next (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -/// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(alignment > 0); - assert(std.math.isPowerOfTwo(alignment)); - return alignBackwardGeneric(T, addr + (alignment - 1), alignment); -} +pub const alignForwardGeneric = @compileError("renamed to alignForward"); /// Force an evaluation of the expression; this tries to prevent /// the compiler from optimizing the computation away even if the @@ -4322,38 +4316,32 @@ test "doNotOptimizeAway" { } test "alignForward" { - try testing.expect(alignForward(1, 1) == 1); - try testing.expect(alignForward(2, 1) == 2); - try testing.expect(alignForward(1, 2) == 2); - try testing.expect(alignForward(2, 2) == 2); - try testing.expect(alignForward(3, 2) == 4); - try testing.expect(alignForward(4, 2) == 4); - try testing.expect(alignForward(7, 8) == 8); - try testing.expect(alignForward(8, 8) == 8); - try testing.expect(alignForward(9, 8) == 16); - try testing.expect(alignForward(15, 8) == 16); - try testing.expect(alignForward(16, 8) == 16); - try testing.expect(alignForward(17, 8) == 24); + try testing.expect(alignForward(usize, 1, 1) == 1); + try testing.expect(alignForward(usize, 2, 1) == 2); + try testing.expect(alignForward(usize, 1, 2) == 2); + try testing.expect(alignForward(usize, 2, 2) == 2); + try testing.expect(alignForward(usize, 3, 2) == 4); + try testing.expect(alignForward(usize, 4, 2) == 4); + try testing.expect(alignForward(usize, 7, 8) == 8); + try testing.expect(alignForward(usize, 8, 8) == 8); + try testing.expect(alignForward(usize, 9, 8) == 16); + try testing.expect(alignForward(usize, 15, 8) == 16); + try testing.expect(alignForward(usize, 16, 8) == 16); + try testing.expect(alignForward(usize, 17, 8) == 24); } /// Round an address down to the previous (or current) aligned address. /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (isValidAlign(alignment)) - return alignBackward(i, alignment); + return alignBackward(usize, i, alignment); assert(alignment != 0); return i - @mod(i, alignment); } /// Round an address down to the previous (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. -pub fn alignBackward(addr: usize, alignment: usize) usize { - return alignBackwardGeneric(usize, addr, alignment); -} - -/// Round an address down to the previous (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { +pub fn alignBackward(comptime T: type, addr: T, alignment: T) T { assert(isValidAlignGeneric(T, alignment)); // 000010000 // example alignment // 000001111 // subtract 1 @@ -4361,6 +4349,8 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { return addr & ~(alignment - 1); } +pub const alignBackwardGeneric = @compileError("renamed to alignBackward"); + /// Returns whether `alignment` is a valid alignment, meaning it is /// a positive power of 2. pub fn isValidAlign(alignment: usize) bool { @@ -4391,7 +4381,7 @@ pub fn isAligned(addr: usize, alignment: usize) bool { } pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { - return alignBackwardGeneric(T, addr, alignment) == addr; + return alignBackward(T, addr, alignment) == addr; } test "isAligned" { @@ -4439,7 +4429,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali const begin_address = @ptrToInt(bytes.ptr); const end_address = begin_address + bytes.len; - const begin_address_aligned = mem.alignForward(begin_address, new_alignment); + const begin_address_aligned = mem.alignForward(usize, begin_address, new_alignment); const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { error.Overflow => return null, }; diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 5110534ed4..4a1ff86721 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -208,7 +208,7 @@ pub fn allocAdvancedWithRetAddr( comptime assert(a <= mem.page_size); if (n == 0) { - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); return @intToPtr([*]align(a) T, ptr)[0..0]; } @@ -267,7 +267,7 @@ pub fn reallocAdvanced( } if (new_n == 0) { self.free(old_mem); - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0]; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index 0c43a5ff28..a4d83dcbb3 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -105,9 +105,9 @@ pub fn TrailerFlags(comptime Fields: type) type { const active = (self.bits & (1 << i)) != 0; if (i == @enumToInt(field)) { assert(active); - return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + return mem.alignForward(usize, off, @alignOf(field_info.type)); } else if (active) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + off = mem.alignForward(usize, off, @alignOf(field_info.type)); off += @sizeOf(field_info.type); } } @@ -123,7 +123,7 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(field.type) == 0) continue; if ((self.bits & (1 << i)) != 0) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field.type)); + off = mem.alignForward(usize, off, @alignOf(field.type)); off += @sizeOf(field.type); } } diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index 311e5609e8..d765e403c8 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -233,7 +233,7 @@ fn initTLS(phdrs: []elf.Phdr) void { l += tls_align_factor - delta; l += @sizeOf(CustomData); tcb_offset = l; - l += mem.alignForward(tls_tcb_size, tls_align_factor); + l += mem.alignForward(usize, tls_tcb_size, tls_align_factor); data_offset = l; l += tls_data_alloc_size; break :blk l; @@ -241,14 +241,14 @@ fn initTLS(phdrs: []elf.Phdr) void { .VariantII => blk: { var l: usize = 0; data_offset = l; - l += mem.alignForward(tls_data_alloc_size, tls_align_factor); + l += mem.alignForward(usize, tls_data_alloc_size, tls_align_factor); // The thread pointer is aligned to p_align tcb_offset = l; l += tls_tcb_size; // The CustomData structure is right after the TCB with no padding // in between so it can be easily found l += @sizeOf(CustomData); - l = mem.alignForward(l, @alignOf(DTV)); + l = mem.alignForward(usize, l, @alignOf(DTV)); dtv_offset = l; l += @sizeOf(DTV); break :blk l; @@ -329,7 +329,7 @@ pub fn initStaticTLS(phdrs: []elf.Phdr) void { // Make sure the slice is correctly aligned. const begin_addr = @ptrToInt(alloc_tls_area.ptr); - const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align); + const begin_aligned_addr = mem.alignForward(usize, begin_addr, tls_image.alloc_align); const start = begin_aligned_addr - begin_addr; break :blk alloc_tls_area[start .. start + tls_image.alloc_size]; }; diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index 8f26aac32c..00b8941974 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -24,7 +24,7 @@ const UefiPoolAllocator = struct { const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); - const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); + const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); const full_len = metadata_len + len; @@ -32,7 +32,7 @@ const UefiPoolAllocator = struct { if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null; const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), ptr_align); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index c570c8e09c..14a9ce5d3f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -116,7 +116,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi const header: Header = .{ .bytes = buffer[start..][0..512] }; start += 512; const file_size = try header.fileSize(); - const rounded_file_size = std.mem.alignForwardGeneric(u64, file_size, 512); + const rounded_file_size = std.mem.alignForward(u64, file_size, 512); const pad_len = @intCast(usize, rounded_file_size - file_size); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { diff --git a/lib/std/target.zig b/lib/std/target.zig index 15bb65cd4b..4c7bcfc37a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1944,7 +1944,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))), + 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 6b1e0bb640..bbb0905121 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -305,7 +305,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const var window_start: usize = 0; if (@max(actual.len, expected.len) > max_window_size) { const alignment = if (T == u8) 16 else 2; - window_start = std.mem.alignBackward(diff_index - @min(diff_index, alignment), alignment); + window_start = std.mem.alignBackward(usize, diff_index - @min(diff_index, alignment), alignment); } const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)]; const expected_truncated = window_start + expected_window.len < expected.len; diff --git a/src/Module.zig b/src/Module.zig index 8c5a86652d..8d9f9593dd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1293,7 +1293,7 @@ pub const Union = struct { payload_align = @max(payload_align, 1); if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ - .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), + .abi_size = std.mem.alignForward(u64, payload_size, payload_align), .abi_align = payload_align, .most_aligned_field = most_aligned_field, .most_aligned_field_size = most_aligned_field_size, @@ -1314,18 +1314,18 @@ pub const Union = struct { if (tag_align >= payload_align) { // {Tag, Payload} size += tag_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); padding = @intCast(u32, size - prev_size); } else { // {Payload, Tag} size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); size += tag_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); padding = @intCast(u32, size - prev_size); } return .{ diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dd752555b7..1355f96231 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -566,7 +566,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; if (math.cast(u12, stack_size)) |size| { @@ -1011,7 +1011,7 @@ fn allocMem( std.math.ceilPowerOfTwoAssert(u32, abi_size); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6328,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 69a156999b..a2a5a3d4d3 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -560,7 +560,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; self.mir_instructions.set(sub_reloc, .{ @@ -991,7 +991,7 @@ fn allocMem( assert(abi_align > 0); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6214,7 +6214,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiAlignment(mod) == 8) - ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); + ncrn = std.mem.alignForward(usize, ncrn, 2); const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { @@ -6229,7 +6229,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else { ncrn = 4; if (ty.toType().abiAlignment(mod) == 8) - nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); + nsaa = std.mem.alignForward(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; nsaa += param_size; @@ -6267,7 +6267,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index e4a07f22bf..a4a4fe472b 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForwardGeneric(u64, total_size, arr_size) / arr_size); + const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 809c388532..c6ac3255c6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -792,7 +792,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align); self.next_stack_offset = offset + abi_size; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b660126604..e339794fd4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -423,7 +423,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + abi.stack_reserved_area; - const stack_size = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align); if (math.cast(i13, stack_size)) |size| { self.mir_instructions.set(save_inst, .{ .tag = .save, @@ -2781,7 +2781,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index aa44dc2bc8..495ca7f6dd 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1286,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // store stack pointer so we can restore it when we return from the function try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size - const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment); + const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); @@ -1531,7 +1531,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { func.stack_alignment = abi_align; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align); + const offset = std.mem.alignForward(u32, func.stack_size, abi_align); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -1564,7 +1564,7 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { func.stack_alignment = abi_alignment; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment); + const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6e13a55008..a33faecca3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2150,7 +2150,7 @@ fn setFrameLoc( const frame_i = @enumToInt(frame_index); if (aligned) { const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i]; - offset.* = mem.alignForwardGeneric(i32, offset.*, alignment); + offset.* = mem.alignForward(i32, offset.*, alignment); } self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); offset.* += self.frame_allocs.items(.abi_size)[frame_i]; @@ -2207,7 +2207,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true); for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true); rsp_offset += stack_frame_align_offset; - rsp_offset = mem.alignForwardGeneric(i32, rsp_offset, @as(i32, 1) << needed_align); + rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@enumToInt(FrameIndex.call_frame)] = @intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]); @@ -11807,7 +11807,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11847,7 +11847,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11858,7 +11858,7 @@ fn resolveCallingConventionValues( else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), } - result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, result.stack_align); + result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align); return result; } diff --git a/src/codegen.zig b/src/codegen.zig index 6145d8778b..430562fe9b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -290,7 +290,7 @@ pub fn generateSymbol( .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -303,7 +303,7 @@ pub fn generateSymbol( const begin = code.items.len; try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -1020,7 +1020,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); + return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align); } } @@ -1029,7 +1029,7 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); + return mem.alignForward(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 47be4148d3..11cd752000 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1633,7 +1633,7 @@ pub const Object = struct { var offset: u64 = 0; offset += ptr_size; - offset = std.mem.alignForwardGeneric(u64, offset, len_align); + offset = std.mem.alignForward(u64, offset, len_align); const len_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1801,7 +1801,7 @@ pub const Object = struct { var offset: u64 = 0; offset += payload_size; - offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); + offset = std.mem.alignForward(u64, offset, non_null_align); const non_null_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1888,12 +1888,12 @@ pub const Object = struct { error_index = 0; payload_index = 1; error_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + payload_offset = std.mem.alignForward(u64, error_size, payload_align); } else { payload_index = 0; error_index = 1; payload_offset = 0; - error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); + error_offset = std.mem.alignForward(u64, payload_size, error_align); } var fields: [2]*llvm.DIType = undefined; @@ -1995,7 +1995,7 @@ pub const Object = struct { const field_size = field_ty.toType().abiSize(mod); const field_align = field_ty.toType().abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = if (tuple.names.len != 0) @@ -2086,7 +2086,7 @@ pub const Object = struct { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); const field_align = field.alignment(mod, layout); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); @@ -2242,10 +2242,10 @@ pub const Object = struct { var payload_offset: u64 = undefined; if (layout.tag_align >= layout.payload_align) { tag_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { payload_offset = 0; - tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align); + tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align); } const tag_di = dib.createMemberType( @@ -2861,9 +2861,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_error_type; fields_buf[1] = llvm_payload_type; const payload_end = - std.mem.alignForwardGeneric(u64, error_size, payload_align) + + std.mem.alignForward(u64, error_size, payload_align) + payload_size; - const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align); + const abi_size = std.mem.alignForward(u64, payload_end, error_align); const padding = @intCast(c_uint, abi_size - payload_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2874,9 +2874,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_payload_type; fields_buf[1] = llvm_error_type; const error_end = - std.mem.alignForwardGeneric(u64, payload_size, error_align) + + std.mem.alignForward(u64, payload_size, error_align) + error_size; - const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align); + const abi_size = std.mem.alignForward(u64, error_end, payload_align); const padding = @intCast(c_uint, abi_size - error_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2910,7 +2910,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2924,7 +2924,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -2979,7 +2979,7 @@ pub const DeclGen = struct { field_align < field_ty_align; big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2993,7 +2993,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3552,7 +3552,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3575,7 +3575,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3650,7 +3650,7 @@ pub const DeclGen = struct { const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3673,7 +3673,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -10274,7 +10274,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -10308,7 +10308,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4fd91aded4..dc1f23dad4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -472,12 +472,12 @@ pub const DeclGen = struct { try self.initializers.append(result_id); self.partial_word.len = 0; - self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word)); + self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word)); } /// Fill the buffer with undefined values until the size is aligned to `align`. fn fillToAlign(self: *@This(), alignment: u32) !void { - const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment); + const target_size = std.mem.alignForward(u32, self.size, alignment); try self.addUndef(target_size - self.size); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7785858dd..202bb71e9b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section const vaddr = blk: { if (index == 0) break :blk self.page_size; const prev_header = self.sections.items(.header)[index - 1]; - break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); + break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100; + const memsz = mem.alignForward(u32, size, self.page_size) * 100; log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ name, off, @@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void { fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void { const header = &self.sections.items(.header)[sect_id]; const increased_size = padToIdeal(needed_size); - const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); - const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size); + const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size); + const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff }); @@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const last_symbol = last.getSymbol(self); const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); + break :blk mem.alignForward(u32, header.virtual_address, alignment); } }; @@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; + const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void { for (offsets.items) |offset| { const rva = sym.value + offset; - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void { if (sym.section_number == .UNDEFINED) continue; const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void { lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); } dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); } @@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void { }; const subsystem: coff.Subsystem = .WINDOWS_CUI; const size_of_image: u32 = self.getSizeOfImage(); - const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment); + const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment); const image_base = self.getImageBase(); const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address; @@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 { fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 { var start: u32 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u32, item_end, min_alignment); + start = mem.alignForward(u32, item_end, min_alignment); } return start; } @@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 { } inline fn getSizeOfImage(self: Coff) u32 { - var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size); + var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size); for (self.sections.items(.header)) |header| { - image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); + image_size += mem.alignForward(u32, header.virtual_size, self.page_size); } return image_size; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9b7772260..3cb1c213e9 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { di_buf.appendAssumeCapacity(0); // segment_selector_size const end_header_offset = di_buf.items.len; - const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2); + const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2); di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset); // Currently only one compilation unit is supported, so the address range is simply diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 15ba9ebecc..e0d0dfc75f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } @@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); } - phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align); + phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset; phdr_table_load.p_filesz = load_align_offset + needed_size; phdr_table_load.p_memsz = load_align_offset + needed_size; @@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; + const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value; const need_realloc = !align_ok or new_block_size > atom.capacity(self); if (!need_realloc) return sym.st_value; return self.allocateAtom(atom_index, new_block_size, alignment); @@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = big_atom_sym.st_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const last_sym = last.getSymbol(self); const ideal_capacity = padToIdeal(last_sym.st_size); const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); // Set up the metadata to be updated, after errors are no longer possible. atom_placement = last_index; break :blk new_start_vaddr; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a3f67bc70a..024fe1f8d9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; + const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.n_value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void { // The first __TEXT segment is immovable and covers MachO header and load commands. self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size }); @@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void { fn calcPagezeroSize(self: *MachO) u64 { const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.base.options.output_mode == .Lib) return 0; if (aligned_pagezero_vmsize == 0) return 0; if (aligned_pagezero_vmsize != pagezero_vmsize) { @@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const section_id = @intCast(u8, self.sections.slice().len); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; - break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); + break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size); + const vmsize = mem.alignForward(u64, opts.size, self.page_size); const off = self.findFreeSpace(opts.size, self.page_size); log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ @@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts var section = macho.section_64{ .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), - .addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment), - .offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment), + .addr = mem.alignForward(u64, vmaddr, opts.alignment), + .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { } header.size = needed_size; - segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size); + segment.filesize = mem.alignForward(u64, needed_size, self.page_size); + segment.vmsize = mem.alignForward(u64, needed_size, self.page_size); } fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { @@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { const segment = self.getSegmentPtr(sect_id); const increased_size = padToIdeal(needed_size); const old_aligned_end = segment.vmaddr + segment.vmsize; - const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size); + const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{ header.segName(), @@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const last_symbol = last.getSymbol(self); const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment); + break :blk mem.alignForward(u64, segment.vmaddr, alignment); } }; @@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void { for (self.segments.items, 0..) |segment, id| { if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { - seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size); + seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } if (seg.fileoff < segment.fileoff + segment.filesize) { - seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size); + seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size); } } try self.writeDyldInfoData(); try self.writeSymtabs(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void { @@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. @@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 { fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 4709560ba7..02511dbe29 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -282,7 +282,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size); + const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -357,7 +357,7 @@ fn parallelHash( ) !void { var wg: WaitGroup = .{}; - const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size; + const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size; assert(self.code_directory.code_slots.items.len >= total_num_chunks); const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); @@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 { pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); // Approx code slots - const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size; + const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size; ssize += total_pages * hash_size; var n_special_slots: u32 = 0; if (self.requirements) |req| { @@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64))); + return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 24a0c9ea34..fdb8c9c816 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { const off = @intCast(u64, self.page_size); const ideal_size: u16 = 200 + 128 + 160 + 250; - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size }); @@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 const segment = self.getDwarfSegmentPtr(); var offset: u64 = segment.fileoff; while (self.detectAllocCollision(offset, object_size)) |item_end| { - offset = mem.alignForwardGeneric(u64, item_end, min_alignment); + offset = mem.alignForward(u64, item_end, min_alignment); } return offset; } @@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { file_size = @max(file_size, header.offset + header.size); } - const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size); + const aligned_size = mem.alignForward(u64, file_size, self.page_size); dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.filesize = aligned_size; dwarf_segment.vmsize = aligned_size; const linkedit = self.getLinkeditSegmentPtr(); - linkedit.vmaddr = mem.alignForwardGeneric( + linkedit.vmaddr = mem.alignForward( u64, dwarf_segment.vmaddr + aligned_size, self.page_size, ); - linkedit.fileoff = mem.alignForwardGeneric( + linkedit.fileoff = mem.alignForward( u64, dwarf_segment.fileoff + aligned_size, self.page_size, @@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { try self.writeStrtab(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size); seg.vmsize = aligned_size; } @@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const nsyms = nlocals + nexports; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64)); + const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64)); const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; @@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void { const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); - const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); - const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64)); + const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); + const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; self.symtab_cmd.stroff = @intCast(u32, offset); diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 228a1ccfaf..5111f53f2a 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld"; fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 { const darwin_path_max = 1024; const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1; - return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64)); + return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64)); } const CalcLCsSizeCtx = struct { @@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), @@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), @@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index 48d1faac6b..7895190005 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { while (true) { const atom = zld.getAtom(group_end); - offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment)); + offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment)); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; @@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } else break; } - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); allocateThunk(zld, thunk_index, offset, header); offset += zld.thunks.items[thunk_index].getSize(); @@ -193,7 +193,7 @@ fn allocateThunk( var offset = base_offset; while (true) { const atom = zld.getAtom(atom_index); - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 4f7e615c79..7902d67d87 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -1207,7 +1207,7 @@ pub const Zld = struct { fn createSegments(self: *Zld) !void { const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (aligned_pagezero_vmsize != pagezero_vmsize) { log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize}); @@ -1466,7 +1466,7 @@ pub const Zld = struct { while (true) { const atom = self.getAtom(atom_index); const atom_alignment = try math.powi(u32, 2, atom.alignment); - const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment); + const atom_offset = mem.alignForward(u64, header.size, atom_alignment); const padding = atom_offset - header.size; const sym = self.getSymbolPtr(atom.getSymbolWithLoc()); @@ -1534,7 +1534,7 @@ pub const Zld = struct { const slice = self.sections.slice(); for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); - const start_aligned = mem.alignForwardGeneric(u64, start, alignment); + const start_aligned = mem.alignForward(u64, start, alignment); const n_sect = @intCast(u8, indexes.start + sect_id + 1); header.offset = if (header.isZerofill()) @@ -1598,8 +1598,8 @@ pub const Zld = struct { segment.vmsize = start; } - segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size); + segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size); + segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size); } const InitSectionOpts = struct { @@ -1709,7 +1709,7 @@ pub const Zld = struct { try self.writeSymtabs(); const seg = self.getLinkeditSegmentPtr(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromContainer( @@ -2112,17 +2112,17 @@ pub const Zld = struct { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -2130,7 +2130,7 @@ pub const Zld = struct { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -2268,7 +2268,7 @@ pub const Zld = struct { const offset = link_seg.fileoff + link_seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow; if (padding > 0) { try buffer.ensureUnusedCapacity(padding); @@ -2347,7 +2347,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow); @@ -2480,7 +2480,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2515,7 +2515,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2690,7 +2690,7 @@ pub const Zld = struct { for (subsections[0..count]) |cut| { const size = cut.end - cut.start; - const num_chunks = mem.alignForward(size, chunk_size) / chunk_size; + const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size; var i: usize = 0; while (i < num_chunks) : (i += 1) { @@ -2725,10 +2725,10 @@ pub const Zld = struct { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index f911074473..2d2930be8c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void { } } } - offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); + offset = std.mem.alignForward(u32, offset, atom.alignment); atom.offset = offset; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol_loc.getName(wasm), @@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void { offset += atom.size; atom_index = atom.prev orelse break; } - segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); + segment.size = std.mem.alignForward(u32, offset, segment.alignment); } } @@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void { const is_obj = wasm.base.options.output_mode == .Obj; if (place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); @@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void { var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment); // set TLS-related symbols if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { @@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void { // create the memory init flag which is used by the init memory function if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) { // align to pointer size - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4); + memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); sym.virtual_address = @intCast(u32, memory_ptr); @@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (!place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); } @@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void { // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); } // Setup the max amount of pages @@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void { } memory_ptr = initial_memory; } - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size); + memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); diff --git a/src/objcopy.zig b/src/objcopy.zig index c5d0e8dcb3..014208cc0d 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -1024,7 +1024,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_size = @intCast(Elf_OffSize, data.len); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; - dest.sh_offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, addralign); + dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) { if (src.sh_offset > dest.sh_offset) { dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments @@ -1085,7 +1085,7 @@ fn ElfFile(comptime is_64: bool) type { // add a ".gnu_debuglink" section if (options.debuglink) |link| { const payload = payload: { - const crc_offset = std.mem.alignForward(link.name.len + 1, 4); + const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4); const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4); @memcpy(buf[0..link.name.len], link.name); @memset(buf[link.name.len..crc_offset], 0); @@ -1117,7 +1117,7 @@ fn ElfFile(comptime is_64: bool) type { // write the section header at the tail { - const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); + const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); const data = std.mem.sliceAsBytes(updated_section_header); assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum); diff --git a/src/type.zig b/src/type.zig index bb82a50682..1c3435dafd 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1339,7 +1339,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + const result = std.mem.alignForward(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; }, @@ -1380,14 +1380,14 @@ pub const Type = struct { var size: u64 = 0; if (code_align > payload_align) { size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); } else { size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); } return AbiSizeAdvanced{ .scalar = size }; }, @@ -1595,7 +1595,7 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { @@ -3194,7 +3194,7 @@ pub const Type = struct { const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); - const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); + const field_offset = std.mem.alignForward(u64, it.offset, field_align); it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } @@ -3223,7 +3223,7 @@ pub const Type = struct { return field_offset.offset; } - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)); }, .anon_struct_type => |tuple| { @@ -3239,11 +3239,11 @@ pub const Type = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); if (i == index) return offset; offset += field_ty.toType().abiSize(mod); } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + offset = std.mem.alignForward(u64, offset, @max(big_align, 1)); return offset; }, @@ -3254,7 +3254,7 @@ pub const Type = struct { const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + return std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { // {Payload, Tag} return 0; -- cgit v1.2.3