From 10a4c2269d110d636e7817677fb50c6f418bff34 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 29 Apr 2023 17:57:23 -0400 Subject: x86_64: enable normal start/test_runner logic on more targets --- test/src/Cases.zig | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'test/src') diff --git a/test/src/Cases.zig b/test/src/Cases.zig index defe248fe6..4b023f45b0 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -396,6 +396,12 @@ fn addFromDirInner( // Other backends don't support new liveness format continue; } + if (backend == .stage2 and target.getOsTag() == .macos and + target.getCpuArch() == .x86_64 and builtin.cpu.arch == .aarch64) + { + // Rosetta has issues with ZLD + continue; + } const next = ctx.cases.items.len; try ctx.cases.append(.{ -- cgit v1.2.3 From 3f3b1a6808113fd5f9b2cec1033009cbb17dc969 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 3 May 2023 11:49:55 +0300 Subject: std.Build: use Step.* instead of *Step Follow up to 13eb7251d37759bd47403db304c6120c706fe353 --- build.zig | 12 +- lib/init-exe/build.zig | 2 +- lib/std/Build.zig | 150 +++++++++++++------------ lib/std/Build/Step.zig | 67 ++++++++--- lib/std/Build/Step/CheckFile.zig | 12 +- lib/std/Build/Step/CheckObject.zig | 28 ++--- lib/std/Build/Step/Compile.zig | 168 ++++++++++++++-------------- lib/std/Build/Step/ConfigHeader.zig | 26 ++--- lib/std/Build/Step/Fmt.zig | 8 +- lib/std/Build/Step/InstallArtifact.zig | 17 ++- lib/std/Build/Step/InstallFile.zig | 8 +- lib/std/Build/Step/ObjCopy.zig | 17 ++- lib/std/Build/Step/Options.zig | 25 ++--- lib/std/Build/Step/RemoveDir.zig | 8 +- lib/std/Build/Step/Run.zig | 108 +++++++++--------- lib/std/Build/Step/TranslateC.zig | 26 ++--- lib/std/Build/Step/WriteFile.zig | 38 +++---- test/link/macho/dead_strip/build.zig | 2 +- test/link/macho/dead_strip_dylibs/build.zig | 2 +- test/link/macho/headerpad/build.zig | 2 +- test/link/macho/search_strategy/build.zig | 2 +- test/link/macho/unwind_info/build.zig | 2 +- test/link/macho/uuid/build.zig | 3 +- test/src/Cases.zig | 2 +- test/src/StackTrace.zig | 2 +- test/standalone/install_raw_hex/build.zig | 1 - test/tests.zig | 2 +- 27 files changed, 379 insertions(+), 361 deletions(-) (limited to 'test/src') diff --git a/build.zig b/build.zig index 78345ac9e4..208d06fe1d 100644 --- a/build.zig +++ b/build.zig @@ -533,7 +533,7 @@ fn addCompilerStep( b: *std.Build, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const exe = b.addExecutable(.{ .name = "zig", .root_source_file = .{ .path = "src/main.zig" }, @@ -561,7 +561,7 @@ const exe_cflags = [_][]const u8{ fn addCmakeCfgOptionsToExe( b: *std.Build, cfg: CMakeConfig, - exe: *std.Build.CompileStep, + exe: *std.Build.Step.Compile, use_zig_libcxx: bool, ) !void { if (exe.target.isDarwin()) { @@ -640,7 +640,7 @@ fn addCmakeCfgOptionsToExe( } } -fn addStaticLlvmOptionsToExe(exe: *std.Build.CompileStep) !void { +fn addStaticLlvmOptionsToExe(exe: *std.Build.Step.Compile) !void { // Adds the Zig C++ sources which both stage1 and stage2 need. // // We need this because otherwise zig_clang_cc1_main.cpp ends up pulling @@ -679,7 +679,7 @@ fn addStaticLlvmOptionsToExe(exe: *std.Build.CompileStep) !void { fn addCxxKnownPath( b: *std.Build, ctx: CMakeConfig, - exe: *std.Build.CompileStep, + exe: *std.Build.Step.Compile, objname: []const u8, errtxt: ?[]const u8, need_cpp_includes: bool, @@ -709,7 +709,7 @@ fn addCxxKnownPath( } } -fn addCMakeLibraryList(exe: *std.Build.CompileStep, list: []const u8) void { +fn addCMakeLibraryList(exe: *std.Build.Step.Compile, list: []const u8) void { var it = mem.tokenize(u8, list, ";"); while (it.next()) |lib| { if (mem.startsWith(u8, lib, "-l")) { @@ -723,7 +723,7 @@ fn addCMakeLibraryList(exe: *std.Build.CompileStep, list: []const u8) void { } const CMakeConfig = struct { - llvm_linkage: std.Build.CompileStep.Linkage, + llvm_linkage: std.Build.Step.Compile.Linkage, cmake_binary_dir: []const u8, cmake_prefix_path: []const u8, cmake_static_library_prefix: []const u8, diff --git a/lib/init-exe/build.zig b/lib/init-exe/build.zig index abf8654f0f..1221984190 100644 --- a/lib/init-exe/build.zig +++ b/lib/init-exe/build.zig @@ -29,7 +29,7 @@ pub fn build(b: *std.Build) void { // step when running `zig build`). b.installArtifact(exe); - // This *creates* a RunStep in the build graph, to be executed when another + // This *creates* a Run step in the build graph, to be executed when another // step is evaluated that depends on it. The next line below will establish // such a dependency. const run_cmd = b.addRunArtifact(exe); diff --git a/lib/std/Build.zig b/lib/std/Build.zig index bda50112b6..ca55d23937 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -21,27 +21,41 @@ const Build = @This(); pub const Cache = @import("Build/Cache.zig"); -/// deprecated: use `CompileStep`. -pub const LibExeObjStep = CompileStep; +/// deprecated: use `Step.Compile`. +pub const LibExeObjStep = Step.Compile; /// deprecated: use `Build`. pub const Builder = Build; -/// deprecated: use `InstallDirStep.Options` -pub const InstallDirectoryOptions = InstallDirStep.Options; +/// deprecated: use `Step.InstallDir.Options` +pub const InstallDirectoryOptions = Step.InstallDir.Options; pub const Step = @import("Build/Step.zig"); +/// deprecated: use `Step.CheckFile`. pub const CheckFileStep = @import("Build/Step/CheckFile.zig"); +/// deprecated: use `Step.CheckObject`. pub const CheckObjectStep = @import("Build/Step/CheckObject.zig"); +/// deprecated: use `Step.ConfigHeader`. pub const ConfigHeaderStep = @import("Build/Step/ConfigHeader.zig"); +/// deprecated: use `Step.Fmt`. pub const FmtStep = @import("Build/Step/Fmt.zig"); +/// deprecated: use `Step.InstallArtifact`. pub const InstallArtifactStep = @import("Build/Step/InstallArtifact.zig"); +/// deprecated: use `Step.InstallDir`. pub const InstallDirStep = @import("Build/Step/InstallDir.zig"); +/// deprecated: use `Step.InstallFile`. pub const InstallFileStep = @import("Build/Step/InstallFile.zig"); +/// deprecated: use `Step.ObjCopy`. pub const ObjCopyStep = @import("Build/Step/ObjCopy.zig"); +/// deprecated: use `Step.Compile`. pub const CompileStep = @import("Build/Step/Compile.zig"); +/// deprecated: use `Step.Options`. pub const OptionsStep = @import("Build/Step/Options.zig"); +/// deprecated: use `Step.RemoveDir`. pub const RemoveDirStep = @import("Build/Step/RemoveDir.zig"); +/// deprecated: use `Step.Run`. pub const RunStep = @import("Build/Step/Run.zig"); +/// deprecated: use `Step.TranslateC`. pub const TranslateCStep = @import("Build/Step/TranslateC.zig"); +/// deprecated: use `Step.WriteFile`. pub const WriteFileStep = @import("Build/Step/WriteFile.zig"); install_tls: TopLevelStep, @@ -442,8 +456,8 @@ pub fn resolveInstallPrefix(self: *Build, install_prefix: ?[]const u8, dir_list: self.h_dir = self.pathJoin(&h_list); } -pub fn addOptions(self: *Build) *OptionsStep { - return OptionsStep.create(self); +pub fn addOptions(self: *Build) *Step.Options { + return Step.Options.create(self); } pub const ExecutableOptions = struct { @@ -452,7 +466,7 @@ pub const ExecutableOptions = struct { version: ?std.builtin.Version = null, target: CrossTarget = .{}, optimize: std.builtin.Mode = .Debug, - linkage: ?CompileStep.Linkage = null, + linkage: ?Step.Compile.Linkage = null, max_rss: usize = 0, link_libc: ?bool = null, single_threaded: ?bool = null, @@ -460,8 +474,8 @@ pub const ExecutableOptions = struct { use_lld: ?bool = null, }; -pub fn addExecutable(b: *Build, options: ExecutableOptions) *CompileStep { - return CompileStep.create(b, .{ +pub fn addExecutable(b: *Build, options: ExecutableOptions) *Step.Compile { + return Step.Compile.create(b, .{ .name = options.name, .root_source_file = options.root_source_file, .version = options.version, @@ -489,8 +503,8 @@ pub const ObjectOptions = struct { use_lld: ?bool = null, }; -pub fn addObject(b: *Build, options: ObjectOptions) *CompileStep { - return CompileStep.create(b, .{ +pub fn addObject(b: *Build, options: ObjectOptions) *Step.Compile { + return Step.Compile.create(b, .{ .name = options.name, .root_source_file = options.root_source_file, .target = options.target, @@ -517,8 +531,8 @@ pub const SharedLibraryOptions = struct { use_lld: ?bool = null, }; -pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *CompileStep { - return CompileStep.create(b, .{ +pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *Step.Compile { + return Step.Compile.create(b, .{ .name = options.name, .root_source_file = options.root_source_file, .kind = .lib, @@ -547,8 +561,8 @@ pub const StaticLibraryOptions = struct { use_lld: ?bool = null, }; -pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *CompileStep { - return CompileStep.create(b, .{ +pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *Step.Compile { + return Step.Compile.create(b, .{ .name = options.name, .root_source_file = options.root_source_file, .kind = .lib, @@ -579,8 +593,8 @@ pub const TestOptions = struct { use_lld: ?bool = null, }; -pub fn addTest(b: *Build, options: TestOptions) *CompileStep { - return CompileStep.create(b, .{ +pub fn addTest(b: *Build, options: TestOptions) *Step.Compile { + return Step.Compile.create(b, .{ .name = options.name, .kind = .@"test", .root_source_file = options.root_source_file, @@ -604,8 +618,8 @@ pub const AssemblyOptions = struct { max_rss: usize = 0, }; -pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep { - const obj_step = CompileStep.create(b, .{ +pub fn addAssembly(b: *Build, options: AssemblyOptions) *Step.Compile { + const obj_step = Step.Compile.create(b, .{ .name = options.name, .kind = .obj, .root_source_file = null, @@ -657,25 +671,25 @@ fn moduleDependenciesToArrayHashMap(arena: Allocator, deps: []const ModuleDepend return result; } -/// Initializes a RunStep with argv, which must at least have the path to the +/// Initializes a `Step.Run` with argv, which must at least have the path to the /// executable. More command line arguments can be added with `addArg`, /// `addArgs`, and `addArtifactArg`. /// Be careful using this function, as it introduces a system dependency. -/// To run an executable built with zig build, see `CompileStep.run`. -pub fn addSystemCommand(self: *Build, argv: []const []const u8) *RunStep { +/// To run an executable built with zig build, see `Step.Compile.run`. +pub fn addSystemCommand(self: *Build, argv: []const []const u8) *Step.Run { assert(argv.len >= 1); - const run_step = RunStep.create(self, self.fmt("run {s}", .{argv[0]})); + const run_step = Step.Run.create(self, self.fmt("run {s}", .{argv[0]})); run_step.addArgs(argv); return run_step; } -/// Creates a `RunStep` with an executable built with `addExecutable`. -/// Add command line arguments with methods of `RunStep`. -pub fn addRunArtifact(b: *Build, exe: *CompileStep) *RunStep { +/// Creates a `Step.Run` with an executable built with `addExecutable`. +/// Add command line arguments with methods of `Step.Run`. +pub fn addRunArtifact(b: *Build, exe: *Step.Compile) *Step.Run { // It doesn't have to be native. We catch that if you actually try to run it. // Consider that this is declarative; the run step may not be run unless a user // option is supplied. - const run_step = RunStep.create(b, b.fmt("run {s}", .{exe.name})); + const run_step = Step.Run.create(b, b.fmt("run {s}", .{exe.name})); run_step.addArtifactArg(exe); if (exe.kind == .@"test") { @@ -696,14 +710,14 @@ pub fn addRunArtifact(b: *Build, exe: *CompileStep) *RunStep { /// when an option found in the input file is missing from `values`. pub fn addConfigHeader( b: *Build, - options: ConfigHeaderStep.Options, + options: Step.ConfigHeader.Options, values: anytype, -) *ConfigHeaderStep { +) *Step.ConfigHeader { var options_copy = options; if (options_copy.first_ret_addr == null) options_copy.first_ret_addr = @returnAddress(); - const config_header_step = ConfigHeaderStep.create(b, options_copy); + const config_header_step = Step.ConfigHeader.create(b, options_copy); config_header_step.addValues(values); return config_header_step; } @@ -734,28 +748,28 @@ pub fn dupePath(self: *Build, bytes: []const u8) []u8 { return the_copy; } -pub fn addWriteFile(self: *Build, file_path: []const u8, data: []const u8) *WriteFileStep { +pub fn addWriteFile(self: *Build, file_path: []const u8, data: []const u8) *Step.WriteFile { const write_file_step = self.addWriteFiles(); write_file_step.add(file_path, data); return write_file_step; } -pub fn addWriteFiles(b: *Build) *WriteFileStep { - return WriteFileStep.create(b); +pub fn addWriteFiles(b: *Build) *Step.WriteFile { + return Step.WriteFile.create(b); } -pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *RemoveDirStep { - const remove_dir_step = self.allocator.create(RemoveDirStep) catch @panic("OOM"); - remove_dir_step.* = RemoveDirStep.init(self, dir_path); +pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *Step.RemoveDir { + const remove_dir_step = self.allocator.create(Step.RemoveDir) catch @panic("OOM"); + remove_dir_step.* = Step.RemoveDir.init(self, dir_path); return remove_dir_step; } -pub fn addFmt(b: *Build, options: FmtStep.Options) *FmtStep { - return FmtStep.create(b, options); +pub fn addFmt(b: *Build, options: Step.Fmt.Options) *Step.Fmt { + return Step.Fmt.create(b, options); } -pub fn addTranslateC(self: *Build, options: TranslateCStep.Options) *TranslateCStep { - return TranslateCStep.create(self, options); +pub fn addTranslateC(self: *Build, options: Step.TranslateC.Options) *Step.TranslateC { + return Step.TranslateC.create(self, options); } pub fn getInstallStep(self: *Build) *Step { @@ -1213,12 +1227,12 @@ fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void { std.debug.print("{s}\n", .{text}); } -pub fn installArtifact(self: *Build, artifact: *CompileStep) void { +pub fn installArtifact(self: *Build, artifact: *Step.Compile) void { self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step); } -pub fn addInstallArtifact(self: *Build, artifact: *CompileStep) *InstallArtifactStep { - return InstallArtifactStep.create(self, artifact); +pub fn addInstallArtifact(self: *Build, artifact: *Step.Compile) *Step.InstallArtifact { + return Step.InstallArtifact.create(self, artifact); } ///`dest_rel_path` is relative to prefix path @@ -1240,26 +1254,26 @@ pub fn installLibFile(self: *Build, src_path: []const u8, dest_rel_path: []const self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step); } -pub fn addObjCopy(b: *Build, source: FileSource, options: ObjCopyStep.Options) *ObjCopyStep { - return ObjCopyStep.create(b, source, options); +pub fn addObjCopy(b: *Build, source: FileSource, options: Step.ObjCopy.Options) *Step.ObjCopy { + return Step.ObjCopy.create(b, source, options); } ///`dest_rel_path` is relative to install prefix path -pub fn addInstallFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { +pub fn addInstallFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *Step.InstallFile { return self.addInstallFileWithDir(source.dupe(self), .prefix, dest_rel_path); } ///`dest_rel_path` is relative to bin path -pub fn addInstallBinFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { +pub fn addInstallBinFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *Step.InstallFile { return self.addInstallFileWithDir(source.dupe(self), .bin, dest_rel_path); } ///`dest_rel_path` is relative to lib path -pub fn addInstallLibFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { +pub fn addInstallLibFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *Step.InstallFile { return self.addInstallFileWithDir(source.dupe(self), .lib, dest_rel_path); } -pub fn addInstallHeaderFile(b: *Build, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep { +pub fn addInstallHeaderFile(b: *Build, src_path: []const u8, dest_rel_path: []const u8) *Step.InstallFile { return b.addInstallFileWithDir(.{ .path = src_path }, .header, dest_rel_path); } @@ -1268,22 +1282,22 @@ pub fn addInstallFileWithDir( source: FileSource, install_dir: InstallDir, dest_rel_path: []const u8, -) *InstallFileStep { - return InstallFileStep.create(self, source.dupe(self), install_dir, dest_rel_path); +) *Step.InstallFile { + return Step.InstallFile.create(self, source.dupe(self), install_dir, dest_rel_path); } -pub fn addInstallDirectory(self: *Build, options: InstallDirectoryOptions) *InstallDirStep { - const install_step = self.allocator.create(InstallDirStep) catch @panic("OOM"); - install_step.* = InstallDirStep.init(self, options); +pub fn addInstallDirectory(self: *Build, options: InstallDirectoryOptions) *Step.InstallDir { + const install_step = self.allocator.create(Step.InstallDir) catch @panic("OOM"); + install_step.* = Step.InstallDir.init(self, options); return install_step; } pub fn addCheckFile( b: *Build, file_source: FileSource, - options: CheckFileStep.Options, -) *CheckFileStep { - return CheckFileStep.create(b, file_source, options); + options: Step.CheckFile.Options, +) *Step.CheckFile { + return Step.CheckFile.create(b, file_source, options); } pub fn pushInstalledFile(self: *Build, dir: InstallDir, dest_rel_path: []const u8) void { @@ -1453,10 +1467,10 @@ pub fn getInstallPath(self: *Build, dir: InstallDir, dest_rel_path: []const u8) pub const Dependency = struct { builder: *Build, - pub fn artifact(d: *Dependency, name: []const u8) *CompileStep { - var found: ?*CompileStep = null; + pub fn artifact(d: *Dependency, name: []const u8) *Step.Compile { + var found: ?*Step.Compile = null; for (d.builder.install_tls.step.dependencies.items) |dep_step| { - const inst = dep_step.cast(InstallArtifactStep) orelse continue; + const inst = dep_step.cast(Step.InstallArtifact) orelse continue; if (mem.eql(u8, inst.artifact.name, name)) { if (found != null) panic("artifact name '{s}' is ambiguous", .{name}); found = inst.artifact; @@ -1464,7 +1478,7 @@ pub const Dependency = struct { } return found orelse { for (d.builder.install_tls.step.dependencies.items) |dep_step| { - const inst = dep_step.cast(InstallArtifactStep) orelse continue; + const inst = dep_step.cast(Step.InstallArtifact) orelse continue; log.info("available artifact: '{s}'", .{inst.artifact.name}); } panic("unable to find artifact '{s}'", .{name}); @@ -1808,17 +1822,5 @@ pub fn hex64(x: u64) [16]u8 { } test { - _ = CheckFileStep; - _ = CheckObjectStep; - _ = FmtStep; - _ = InstallArtifactStep; - _ = InstallDirStep; - _ = InstallFileStep; - _ = ObjCopyStep; - _ = CompileStep; - _ = OptionsStep; - _ = RemoveDirStep; - _ = RunStep; - _ = TranslateCStep; - _ = WriteFileStep; + _ = Step; } diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index bdb500d99c..40c88df2b9 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -94,26 +94,41 @@ pub const Id = enum { pub fn Type(comptime id: Id) type { return switch (id) { .top_level => Build.TopLevelStep, - .compile => Build.CompileStep, - .install_artifact => Build.InstallArtifactStep, - .install_file => Build.InstallFileStep, - .install_dir => Build.InstallDirStep, - .remove_dir => Build.RemoveDirStep, - .fmt => Build.FmtStep, - .translate_c => Build.TranslateCStep, - .write_file => Build.WriteFileStep, - .run => Build.RunStep, - .check_file => Build.CheckFileStep, - .check_object => Build.CheckObjectStep, - .config_header => Build.ConfigHeaderStep, - .objcopy => Build.ObjCopyStep, - .options => Build.OptionsStep, + .compile => Compile, + .install_artifact => InstallArtifact, + .install_file => InstallFile, + .install_dir => InstallDir, + .remove_dir => RemoveDir, + .fmt => Fmt, + .translate_c => TranslateC, + .write_file => WriteFile, + .run => Run, + .check_file => CheckFile, + .check_object => CheckObject, + .config_header => ConfigHeader, + .objcopy => ObjCopy, + .options => Options, .custom => @compileError("no type available for custom step"), }; } }; -pub const Options = struct { +pub const CheckFile = @import("Step/CheckFile.zig"); +pub const CheckObject = @import("Step/CheckObject.zig"); +pub const ConfigHeader = @import("Step/ConfigHeader.zig"); +pub const Fmt = @import("Step/Fmt.zig"); +pub const InstallArtifact = @import("Step/InstallArtifact.zig"); +pub const InstallDir = @import("Step/InstallDir.zig"); +pub const InstallFile = @import("Step/InstallFile.zig"); +pub const ObjCopy = @import("Step/ObjCopy.zig"); +pub const Compile = @import("Step/Compile.zig"); +pub const Options = @import("Step/Options.zig"); +pub const RemoveDir = @import("Step/RemoveDir.zig"); +pub const Run = @import("Step/Run.zig"); +pub const TranslateC = @import("Step/TranslateC.zig"); +pub const WriteFile = @import("Step/WriteFile.zig"); + +pub const StepOptions = struct { id: Id, name: []const u8, owner: *Build, @@ -122,7 +137,7 @@ pub const Options = struct { max_rss: usize = 0, }; -pub fn init(options: Options) Step { +pub fn init(options: StepOptions) Step { const arena = options.owner.allocator; var addresses = [1]usize{0} ** n_debug_stack_frames; @@ -387,8 +402,8 @@ pub fn evalZigProcess( s.result_duration_ns = timer.read(); s.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0; - // Special handling for CompileStep that is expecting compile errors. - if (s.cast(Build.CompileStep)) |compile| switch (term) { + // Special handling for Compile step that is expecting compile errors. + if (s.cast(Compile)) |compile| switch (term) { .Exited => { // Note that the exit code may be 0 in this case due to the // compiler server protocol. @@ -535,3 +550,19 @@ pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void { }; } } + +test { + _ = CheckFile; + _ = CheckObject; + _ = Fmt; + _ = InstallArtifact; + _ = InstallDir; + _ = InstallFile; + _ = ObjCopy; + _ = Compile; + _ = Options; + _ = RemoveDir; + _ = Run; + _ = TranslateC; + _ = WriteFile; +} diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index ad8b1a25f0..dc359b5654 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -1,8 +1,8 @@ //! Fail the build step if a file does not match certain checks. //! TODO: make this more flexible, supporting more kinds of checks. //! TODO: generalize the code in std.testing.expectEqualStrings and make this -//! CheckFileStep produce those helpful diagnostics when there is not a match. -const CheckFileStep = @This(); +//! CheckFile step produce those helpful diagnostics when there is not a match. +const CheckFile = @This(); const std = @import("std"); const Step = std.Build.Step; const fs = std.fs; @@ -25,8 +25,8 @@ pub fn create( owner: *std.Build, source: std.Build.FileSource, options: Options, -) *CheckFileStep { - const self = owner.allocator.create(CheckFileStep) catch @panic("OOM"); +) *CheckFile { + const self = owner.allocator.create(CheckFile) catch @panic("OOM"); self.* = .{ .step = Step.init(.{ .id = .check_file, @@ -42,14 +42,14 @@ pub fn create( return self; } -pub fn setName(self: *CheckFileStep, name: []const u8) void { +pub fn setName(self: *CheckFile, name: []const u8) void { self.step.name = name; } fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; - const self = @fieldParentPtr(CheckFileStep, "step", step); + const self = @fieldParentPtr(CheckFile, "step", step); const src_path = self.source.getPath(b); const contents = fs.cwd().readFileAlloc(b.allocator, src_path, self.max_bytes) catch |err| { diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index 431f74eccc..c77dc3de36 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -6,7 +6,7 @@ const math = std.math; const mem = std.mem; const testing = std.testing; -const CheckObjectStep = @This(); +const CheckObject = @This(); const Allocator = mem.Allocator; const Step = std.Build.Step; @@ -24,9 +24,9 @@ pub fn create( owner: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat, -) *CheckObjectStep { +) *CheckObject { const gpa = owner.allocator; - const self = gpa.create(CheckObjectStep) catch @panic("OOM"); + const self = gpa.create(CheckObject) catch @panic("OOM"); self.* = .{ .step = Step.init(.{ .id = .check_file, @@ -47,11 +47,11 @@ pub fn create( /// TODO this doesn't actually compare, and there's no apparent reason for it /// to depend on the check object step. I don't see why this function should exist, /// the caller could just add the run step directly. -pub fn runAndCompare(self: *CheckObjectStep) *std.Build.RunStep { +pub fn runAndCompare(self: *CheckObject) *std.Build.Step.Run { const dependencies_len = self.step.dependencies.items.len; assert(dependencies_len > 0); const exe_step = self.step.dependencies.items[dependencies_len - 1]; - const exe = exe_step.cast(std.Build.CompileStep).?; + const exe = exe_step.cast(std.Build.Step.Compile).?; const run = self.step.owner.addRunArtifact(exe); run.skip_foreign_checks = true; run.step.dependOn(&self.step); @@ -274,15 +274,15 @@ const Check = struct { }; /// Creates a new sequence of actions with `phrase` as the first anchor searched phrase. -pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void { +pub fn checkStart(self: *CheckObject, phrase: []const u8) void { var new_check = Check.create(self.step.owner.allocator); new_check.match(.{ .string = self.step.owner.dupe(phrase) }); self.checks.append(new_check) catch @panic("OOM"); } -/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`. +/// Adds another searched phrase to the latest created Check with `CheckObject.checkStart(...)`. /// Asserts at least one check already exists. -pub fn checkNext(self: *CheckObjectStep, phrase: []const u8) void { +pub fn checkNext(self: *CheckObject, phrase: []const u8) void { assert(self.checks.items.len > 0); const last = &self.checks.items[self.checks.items.len - 1]; last.match(.{ .string = self.step.owner.dupe(phrase) }); @@ -291,7 +291,7 @@ pub fn checkNext(self: *CheckObjectStep, phrase: []const u8) void { /// Like `checkNext()` but takes an additional argument `FileSource` which will be /// resolved to a full search query in `make()`. pub fn checkNextFileSource( - self: *CheckObjectStep, + self: *CheckObject, phrase: []const u8, file_source: std.Build.FileSource, ) void { @@ -300,10 +300,10 @@ pub fn checkNextFileSource( last.match(.{ .string = self.step.owner.dupe(phrase), .file_source = file_source }); } -/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)` +/// Adds another searched phrase to the latest created Check with `CheckObject.checkStart(...)` /// however ensures there is no matching phrase in the output. /// Asserts at least one check already exists. -pub fn checkNotPresent(self: *CheckObjectStep, phrase: []const u8) void { +pub fn checkNotPresent(self: *CheckObject, phrase: []const u8) void { assert(self.checks.items.len > 0); const last = &self.checks.items[self.checks.items.len - 1]; last.notPresent(.{ .string = self.step.owner.dupe(phrase) }); @@ -312,7 +312,7 @@ pub fn checkNotPresent(self: *CheckObjectStep, phrase: []const u8) void { /// Creates a new check checking specifically symbol table parsed and dumped from the object /// file. /// Issuing this check will force parsing and dumping of the symbol table. -pub fn checkInSymtab(self: *CheckObjectStep) void { +pub fn checkInSymtab(self: *CheckObject) void { self.dump_symtab = true; const symtab_label = switch (self.obj_format) { .macho => MachODumper.symtab_label, @@ -325,7 +325,7 @@ pub fn checkInSymtab(self: *CheckObjectStep) void { /// on the extracted variables. It will then compare the reduced program with the value of /// the expected variable. pub fn checkComputeCompare( - self: *CheckObjectStep, + self: *CheckObject, program: []const u8, expected: ComputeCompareExpected, ) void { @@ -338,7 +338,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; const gpa = b.allocator; - const self = @fieldParentPtr(CheckObjectStep, "step", step); + const self = @fieldParentPtr(CheckObject, "step", step); const src_path = self.source.getPath(b); const contents = fs.cwd().readFileAllocOptions( diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 7627c4e6d0..2371f49daf 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -18,14 +18,8 @@ const ExecError = std.Build.ExecError; const Module = std.Build.Module; const VcpkgRoot = std.Build.VcpkgRoot; const InstallDir = std.Build.InstallDir; -const InstallArtifactStep = std.Build.InstallArtifactStep; const GeneratedFile = std.Build.GeneratedFile; -const ObjCopyStep = std.Build.ObjCopyStep; -const CheckObjectStep = std.Build.CheckObjectStep; -const RunStep = std.Build.RunStep; -const OptionsStep = std.Build.OptionsStep; -const ConfigHeaderStep = std.Build.ConfigHeaderStep; -const CompileStep = @This(); +const Compile = @This(); pub const base_id: Step.Id = .compile; @@ -211,8 +205,8 @@ want_lto: ?bool = null, use_llvm: ?bool, use_lld: ?bool, -/// This is an advanced setting that can change the intent of this CompileStep. -/// If this slice has nonzero length, it means that this CompileStep exists to +/// This is an advanced setting that can change the intent of this Compile step. +/// If this slice has nonzero length, it means that this Compile step exists to /// check for compile errors and return *success* if they match, and failure /// otherwise. expect_errors: []const []const u8 = &.{}, @@ -242,7 +236,7 @@ pub const CSourceFile = struct { pub const LinkObject = union(enum) { static_path: FileSource, - other_step: *CompileStep, + other_step: *Compile, system_lib: SystemLib, assembly_file: FileSource, c_source_file: *CSourceFile, @@ -273,8 +267,8 @@ const FrameworkLinkInfo = struct { pub const IncludeDir = union(enum) { raw_path: []const u8, raw_path_system: []const u8, - other_step: *CompileStep, - config_header_step: *ConfigHeaderStep, + other_step: *Compile, + config_header_step: *Step.ConfigHeader, }; pub const Options = struct { @@ -319,7 +313,7 @@ pub const EmitOption = union(enum) { } }; -pub fn create(owner: *std.Build, options: Options) *CompileStep { +pub fn create(owner: *std.Build, options: Options) *Compile { const name = owner.dupe(options.name); const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(owner) else null; if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) { @@ -361,8 +355,8 @@ pub fn create(owner: *std.Build, options: Options) *CompileStep { .version = options.version, }) catch @panic("OOM"); - const self = owner.allocator.create(CompileStep) catch @panic("OOM"); - self.* = CompileStep{ + const self = owner.allocator.create(Compile) catch @panic("OOM"); + self.* = Compile{ .strip = null, .unwind_tables = null, .verbose_link = false, @@ -459,7 +453,7 @@ pub fn create(owner: *std.Build, options: Options) *CompileStep { return self; } -pub fn installHeader(cs: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void { +pub fn installHeader(cs: *Compile, src_path: []const u8, dest_rel_path: []const u8) void { const b = cs.step.owner; const install_file = b.addInstallHeaderFile(src_path, dest_rel_path); b.getInstallStep().dependOn(&install_file.step); @@ -472,8 +466,8 @@ pub const InstallConfigHeaderOptions = struct { }; pub fn installConfigHeader( - cs: *CompileStep, - config_header: *ConfigHeaderStep, + cs: *Compile, + config_header: *Step.ConfigHeader, options: InstallConfigHeaderOptions, ) void { const dest_rel_path = options.dest_rel_path orelse config_header.include_path; @@ -489,7 +483,7 @@ pub fn installConfigHeader( } pub fn installHeadersDirectory( - a: *CompileStep, + a: *Compile, src_dir_path: []const u8, dest_rel_path: []const u8, ) void { @@ -501,8 +495,8 @@ pub fn installHeadersDirectory( } pub fn installHeadersDirectoryOptions( - cs: *CompileStep, - options: std.Build.InstallDirStep.Options, + cs: *Compile, + options: std.Build.Step.InstallDir.Options, ) void { const b = cs.step.owner; const install_dir = b.addInstallDirectory(options); @@ -510,7 +504,7 @@ pub fn installHeadersDirectoryOptions( cs.installed_headers.append(&install_dir.step) catch @panic("OOM"); } -pub fn installLibraryHeaders(cs: *CompileStep, l: *CompileStep) void { +pub fn installLibraryHeaders(cs: *Compile, l: *Compile) void { assert(l.kind == .lib); const b = cs.step.owner; const install_step = b.getInstallStep(); @@ -533,7 +527,7 @@ pub fn installLibraryHeaders(cs: *CompileStep, l: *CompileStep) void { cs.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM"); } -pub fn addObjCopy(cs: *CompileStep, options: ObjCopyStep.Options) *ObjCopyStep { +pub fn addObjCopy(cs: *Compile, options: Step.ObjCopy.Options) *Step.ObjCopy { const b = cs.step.owner; var copy = options; if (copy.basename == null) { @@ -554,34 +548,34 @@ pub const run = @compileError("deprecated; use std.Build.addRunArtifact"); /// which is undesirable when installing an artifact provided by a dependency package. pub const install = @compileError("deprecated; use std.Build.installArtifact"); -pub fn checkObject(self: *CompileStep) *CheckObjectStep { - return CheckObjectStep.create(self.step.owner, self.getOutputSource(), self.target_info.target.ofmt); +pub fn checkObject(self: *Compile) *Step.CheckObject { + return Step.CheckObject.create(self.step.owner, self.getOutputSource(), self.target_info.target.ofmt); } -pub fn setLinkerScriptPath(self: *CompileStep, source: FileSource) void { +pub fn setLinkerScriptPath(self: *Compile, source: FileSource) void { const b = self.step.owner; self.linker_script = source.dupe(b); source.addStepDependencies(&self.step); } -pub fn forceUndefinedSymbol(self: *CompileStep, symbol_name: []const u8) void { +pub fn forceUndefinedSymbol(self: *Compile, symbol_name: []const u8) void { const b = self.step.owner; self.force_undefined_symbols.put(b.dupe(symbol_name), {}) catch @panic("OOM"); } -pub fn linkFramework(self: *CompileStep, framework_name: []const u8) void { +pub fn linkFramework(self: *Compile, framework_name: []const u8) void { const b = self.step.owner; self.frameworks.put(b.dupe(framework_name), .{}) catch @panic("OOM"); } -pub fn linkFrameworkNeeded(self: *CompileStep, framework_name: []const u8) void { +pub fn linkFrameworkNeeded(self: *Compile, framework_name: []const u8) void { const b = self.step.owner; self.frameworks.put(b.dupe(framework_name), .{ .needed = true, }) catch @panic("OOM"); } -pub fn linkFrameworkWeak(self: *CompileStep, framework_name: []const u8) void { +pub fn linkFrameworkWeak(self: *Compile, framework_name: []const u8) void { const b = self.step.owner; self.frameworks.put(b.dupe(framework_name), .{ .weak = true, @@ -589,7 +583,7 @@ pub fn linkFrameworkWeak(self: *CompileStep, framework_name: []const u8) void { } /// Returns whether the library, executable, or object depends on a particular system library. -pub fn dependsOnSystemLibrary(self: CompileStep, name: []const u8) bool { +pub fn dependsOnSystemLibrary(self: Compile, name: []const u8) bool { if (isLibCLibrary(name)) { return self.is_linking_libc; } @@ -605,51 +599,51 @@ pub fn dependsOnSystemLibrary(self: CompileStep, name: []const u8) bool { return false; } -pub fn linkLibrary(self: *CompileStep, lib: *CompileStep) void { +pub fn linkLibrary(self: *Compile, lib: *Compile) void { assert(lib.kind == .lib); self.linkLibraryOrObject(lib); } -pub fn isDynamicLibrary(self: *CompileStep) bool { +pub fn isDynamicLibrary(self: *Compile) bool { return self.kind == .lib and self.linkage == Linkage.dynamic; } -pub fn isStaticLibrary(self: *CompileStep) bool { +pub fn isStaticLibrary(self: *Compile) bool { return self.kind == .lib and self.linkage != Linkage.dynamic; } -pub fn producesPdbFile(self: *CompileStep) bool { +pub fn producesPdbFile(self: *Compile) bool { if (!self.target.isWindows() and !self.target.isUefi()) return false; if (self.target.getObjectFormat() == .c) return false; if (self.strip == true) return false; return self.isDynamicLibrary() or self.kind == .exe or self.kind == .@"test"; } -pub fn linkLibC(self: *CompileStep) void { +pub fn linkLibC(self: *Compile) void { self.is_linking_libc = true; } -pub fn linkLibCpp(self: *CompileStep) void { +pub fn linkLibCpp(self: *Compile) void { self.is_linking_libcpp = true; } /// If the value is omitted, it is set to 1. /// `name` and `value` need not live longer than the function call. -pub fn defineCMacro(self: *CompileStep, name: []const u8, value: ?[]const u8) void { +pub fn defineCMacro(self: *Compile, name: []const u8, value: ?[]const u8) void { const b = self.step.owner; const macro = std.Build.constructCMacro(b.allocator, name, value); self.c_macros.append(macro) catch @panic("OOM"); } /// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1. -pub fn defineCMacroRaw(self: *CompileStep, name_and_value: []const u8) void { +pub fn defineCMacroRaw(self: *Compile, name_and_value: []const u8) void { const b = self.step.owner; self.c_macros.append(b.dupe(name_and_value)) catch @panic("OOM"); } /// This one has no integration with anything, it just puts -lname on the command line. /// Prefer to use `linkSystemLibrary` instead. -pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibraryName(self: *Compile, name: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .system_lib = .{ @@ -663,7 +657,7 @@ pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void { /// This one has no integration with anything, it just puts -needed-lname on the command line. /// Prefer to use `linkSystemLibraryNeeded` instead. -pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibraryNeededName(self: *Compile, name: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .system_lib = .{ @@ -677,7 +671,7 @@ pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void { /// Darwin-only. This one has no integration with anything, it just puts -weak-lname on the /// command line. Prefer to use `linkSystemLibraryWeak` instead. -pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibraryWeakName(self: *Compile, name: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .system_lib = .{ @@ -691,7 +685,7 @@ pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void { /// This links against a system library, exclusively using pkg-config to find the library. /// Prefer to use `linkSystemLibrary` instead. -pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void { +pub fn linkSystemLibraryPkgConfigOnly(self: *Compile, lib_name: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .system_lib = .{ @@ -705,7 +699,7 @@ pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8) /// This links against a system library, exclusively using pkg-config to find the library. /// Prefer to use `linkSystemLibraryNeeded` instead. -pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void { +pub fn linkSystemLibraryNeededPkgConfigOnly(self: *Compile, lib_name: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .system_lib = .{ @@ -719,7 +713,7 @@ pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []cons /// Run pkg-config for the given library name and parse the output, returning the arguments /// that should be passed to zig to link the given library. -fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 { +fn runPkgConfig(self: *Compile, lib_name: []const u8) ![]const []const u8 { const b = self.step.owner; const pkg_name = match: { // First we have to map the library name to pkg config name. Unfortunately, @@ -813,19 +807,19 @@ fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 { return zig_args.toOwnedSlice(); } -pub fn linkSystemLibrary(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibrary(self: *Compile, name: []const u8) void { self.linkSystemLibraryInner(name, .{}); } -pub fn linkSystemLibraryNeeded(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibraryNeeded(self: *Compile, name: []const u8) void { self.linkSystemLibraryInner(name, .{ .needed = true }); } -pub fn linkSystemLibraryWeak(self: *CompileStep, name: []const u8) void { +pub fn linkSystemLibraryWeak(self: *Compile, name: []const u8) void { self.linkSystemLibraryInner(name, .{ .weak = true }); } -fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct { +fn linkSystemLibraryInner(self: *Compile, name: []const u8, opts: struct { needed: bool = false, weak: bool = false, }) void { @@ -850,7 +844,7 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct { } /// Handy when you have many C/C++ source files and want them all to have the same flags. -pub fn addCSourceFiles(self: *CompileStep, files: []const []const u8, flags: []const []const u8) void { +pub fn addCSourceFiles(self: *Compile, files: []const []const u8, flags: []const []const u8) void { const b = self.step.owner; const c_source_files = b.allocator.create(CSourceFiles) catch @panic("OOM"); @@ -864,14 +858,14 @@ pub fn addCSourceFiles(self: *CompileStep, files: []const []const u8, flags: []c self.link_objects.append(.{ .c_source_files = c_source_files }) catch @panic("OOM"); } -pub fn addCSourceFile(self: *CompileStep, file: []const u8, flags: []const []const u8) void { +pub fn addCSourceFile(self: *Compile, file: []const u8, flags: []const []const u8) void { self.addCSourceFileSource(.{ .args = flags, .source = .{ .path = file }, }); } -pub fn addCSourceFileSource(self: *CompileStep, source: CSourceFile) void { +pub fn addCSourceFileSource(self: *Compile, source: CSourceFile) void { const b = self.step.owner; const c_source_file = b.allocator.create(CSourceFile) catch @panic("OOM"); c_source_file.* = source.dupe(b); @@ -879,85 +873,85 @@ pub fn addCSourceFileSource(self: *CompileStep, source: CSourceFile) void { source.source.addStepDependencies(&self.step); } -pub fn setVerboseLink(self: *CompileStep, value: bool) void { +pub fn setVerboseLink(self: *Compile, value: bool) void { self.verbose_link = value; } -pub fn setVerboseCC(self: *CompileStep, value: bool) void { +pub fn setVerboseCC(self: *Compile, value: bool) void { self.verbose_cc = value; } -pub fn overrideZigLibDir(self: *CompileStep, dir_path: []const u8) void { +pub fn overrideZigLibDir(self: *Compile, dir_path: []const u8) void { const b = self.step.owner; self.zig_lib_dir = b.dupePath(dir_path); } -pub fn setMainPkgPath(self: *CompileStep, dir_path: []const u8) void { +pub fn setMainPkgPath(self: *Compile, dir_path: []const u8) void { const b = self.step.owner; self.main_pkg_path = b.dupePath(dir_path); } -pub fn setLibCFile(self: *CompileStep, libc_file: ?FileSource) void { +pub fn setLibCFile(self: *Compile, libc_file: ?FileSource) void { const b = self.step.owner; self.libc_file = if (libc_file) |f| f.dupe(b) else null; } /// Returns the generated executable, library or object file. /// To run an executable built with zig build, use `run`, or create an install step and invoke it. -pub fn getOutputSource(self: *CompileStep) FileSource { +pub fn getOutputSource(self: *Compile) FileSource { return .{ .generated = &self.output_path_source }; } -pub fn getOutputDirectorySource(self: *CompileStep) FileSource { +pub fn getOutputDirectorySource(self: *Compile) FileSource { return .{ .generated = &self.output_dirname_source }; } /// Returns the generated import library. This function can only be called for libraries. -pub fn getOutputLibSource(self: *CompileStep) FileSource { +pub fn getOutputLibSource(self: *Compile) FileSource { assert(self.kind == .lib); return .{ .generated = &self.output_lib_path_source }; } /// Returns the generated header file. /// This function can only be called for libraries or object files which have `emit_h` set. -pub fn getOutputHSource(self: *CompileStep) FileSource { +pub fn getOutputHSource(self: *Compile) FileSource { assert(self.kind != .exe and self.kind != .@"test"); assert(self.emit_h); return .{ .generated = &self.output_h_path_source }; } /// Returns the generated PDB file. This function can only be called for Windows and UEFI. -pub fn getOutputPdbSource(self: *CompileStep) FileSource { +pub fn getOutputPdbSource(self: *Compile) FileSource { // TODO: Is this right? Isn't PDB for *any* PE/COFF file? assert(self.target.isWindows() or self.target.isUefi()); return .{ .generated = &self.output_pdb_path_source }; } -pub fn addAssemblyFile(self: *CompileStep, path: []const u8) void { +pub fn addAssemblyFile(self: *Compile, path: []const u8) void { const b = self.step.owner; self.link_objects.append(.{ .assembly_file = .{ .path = b.dupe(path) }, }) catch @panic("OOM"); } -pub fn addAssemblyFileSource(self: *CompileStep, source: FileSource) void { +pub fn addAssemblyFileSource(self: *Compile, source: FileSource) void { const b = self.step.owner; const source_duped = source.dupe(b); self.link_objects.append(.{ .assembly_file = source_duped }) catch @panic("OOM"); source_duped.addStepDependencies(&self.step); } -pub fn addObjectFile(self: *CompileStep, source_file: []const u8) void { +pub fn addObjectFile(self: *Compile, source_file: []const u8) void { self.addObjectFileSource(.{ .path = source_file }); } -pub fn addObjectFileSource(self: *CompileStep, source: FileSource) void { +pub fn addObjectFileSource(self: *Compile, source: FileSource) void { const b = self.step.owner; self.link_objects.append(.{ .static_path = source.dupe(b) }) catch @panic("OOM"); source.addStepDependencies(&self.step); } -pub fn addObject(self: *CompileStep, obj: *CompileStep) void { +pub fn addObject(self: *Compile, obj: *Compile) void { assert(obj.kind == .obj); self.linkLibraryOrObject(obj); } @@ -967,54 +961,54 @@ pub const addIncludeDir = @compileError("deprecated; use addIncludePath"); pub const addLibPath = @compileError("deprecated, use addLibraryPath"); pub const addFrameworkDir = @compileError("deprecated, use addFrameworkPath"); -pub fn addSystemIncludePath(self: *CompileStep, path: []const u8) void { +pub fn addSystemIncludePath(self: *Compile, path: []const u8) void { const b = self.step.owner; self.include_dirs.append(IncludeDir{ .raw_path_system = b.dupe(path) }) catch @panic("OOM"); } -pub fn addIncludePath(self: *CompileStep, path: []const u8) void { +pub fn addIncludePath(self: *Compile, path: []const u8) void { const b = self.step.owner; self.include_dirs.append(IncludeDir{ .raw_path = b.dupe(path) }) catch @panic("OOM"); } -pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) void { +pub fn addConfigHeader(self: *Compile, config_header: *Step.ConfigHeader) void { self.step.dependOn(&config_header.step); self.include_dirs.append(.{ .config_header_step = config_header }) catch @panic("OOM"); } -pub fn addLibraryPath(self: *CompileStep, path: []const u8) void { +pub fn addLibraryPath(self: *Compile, path: []const u8) void { const b = self.step.owner; self.lib_paths.append(.{ .path = b.dupe(path) }) catch @panic("OOM"); } -pub fn addLibraryPathDirectorySource(self: *CompileStep, directory_source: FileSource) void { +pub fn addLibraryPathDirectorySource(self: *Compile, directory_source: FileSource) void { self.lib_paths.append(directory_source) catch @panic("OOM"); directory_source.addStepDependencies(&self.step); } -pub fn addRPath(self: *CompileStep, path: []const u8) void { +pub fn addRPath(self: *Compile, path: []const u8) void { const b = self.step.owner; self.rpaths.append(.{ .path = b.dupe(path) }) catch @panic("OOM"); } -pub fn addRPathDirectorySource(self: *CompileStep, directory_source: FileSource) void { +pub fn addRPathDirectorySource(self: *Compile, directory_source: FileSource) void { self.rpaths.append(directory_source) catch @panic("OOM"); directory_source.addStepDependencies(&self.step); } -pub fn addFrameworkPath(self: *CompileStep, dir_path: []const u8) void { +pub fn addFrameworkPath(self: *Compile, dir_path: []const u8) void { const b = self.step.owner; self.framework_dirs.append(.{ .path = b.dupe(dir_path) }) catch @panic("OOM"); } -pub fn addFrameworkPathDirectorySource(self: *CompileStep, directory_source: FileSource) void { +pub fn addFrameworkPathDirectorySource(self: *Compile, directory_source: FileSource) void { self.framework_dirs.append(directory_source) catch @panic("OOM"); directory_source.addStepDependencies(&self.step); } /// Adds a module to be used with `@import` and exposing it in the current /// package's module table using `name`. -pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void { +pub fn addModule(cs: *Compile, name: []const u8, module: *Module) void { const b = cs.step.owner; cs.modules.put(b.dupe(name), module) catch @panic("OOM"); @@ -1025,17 +1019,17 @@ pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void { /// Adds a module to be used with `@import` without exposing it in the current /// package's module table. -pub fn addAnonymousModule(cs: *CompileStep, name: []const u8, options: std.Build.CreateModuleOptions) void { +pub fn addAnonymousModule(cs: *Compile, name: []const u8, options: std.Build.CreateModuleOptions) void { const b = cs.step.owner; const module = b.createModule(options); return addModule(cs, name, module); } -pub fn addOptions(cs: *CompileStep, module_name: []const u8, options: *OptionsStep) void { +pub fn addOptions(cs: *Compile, module_name: []const u8, options: *Step.Options) void { addModule(cs, module_name, options.createModule()); } -fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module, done: *std.AutoHashMap(*Module, void)) !void { +fn addRecursiveBuildDeps(cs: *Compile, module: *Module, done: *std.AutoHashMap(*Module, void)) !void { if (done.contains(module)) return; try done.put(module, {}); module.source_file.addStepDependencies(&cs.step); @@ -1046,7 +1040,7 @@ fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module, done: *std.AutoHashM /// If Vcpkg was found on the system, it will be added to include and lib /// paths for the specified target. -pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void { +pub fn addVcpkgPaths(self: *Compile, linkage: Compile.Linkage) !void { const b = self.step.owner; // Ideally in the Unattempted case we would call the function recursively // after findVcpkgRoot and have only one switch statement, but the compiler @@ -1082,7 +1076,7 @@ pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void { } } -pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void { +pub fn setExecCmd(self: *Compile, args: []const ?[]const u8) void { const b = self.step.owner; assert(self.kind == .@"test"); const duped_args = b.allocator.alloc(?[]u8, args.len) catch @panic("OOM"); @@ -1092,7 +1086,7 @@ pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void { self.exec_cmd_args = duped_args; } -fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void { +fn linkLibraryOrObject(self: *Compile, other: *Compile) void { self.step.dependOn(&other.step); self.link_objects.append(.{ .other_step = other }) catch @panic("OOM"); self.include_dirs.append(.{ .other_step = other }) catch @panic("OOM"); @@ -1103,7 +1097,7 @@ fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void { } fn appendModuleArgs( - cs: *CompileStep, + cs: *Compile, zig_args: *ArrayList([]const u8), ) error{OutOfMemory}!void { const b = cs.step.owner; @@ -1214,7 +1208,7 @@ fn constructDepString( fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; - const self = @fieldParentPtr(CompileStep, "step", step); + const self = @fieldParentPtr(Compile, "step", step); if (self.root_src == null and self.link_objects.items.len == 0) { return step.fail("the linker needs one or more objects to link", .{}); @@ -2088,7 +2082,7 @@ const TransitiveDeps = struct { } } - fn addInner(td: *TransitiveDeps, other: *CompileStep, dyn: bool) !void { + fn addInner(td: *TransitiveDeps, other: *Compile, dyn: bool) !void { // Inherit dependency on libc and libc++ td.is_linking_libcpp = td.is_linking_libcpp or other.is_linking_libcpp; td.is_linking_libc = td.is_linking_libc or other.is_linking_libc; @@ -2128,7 +2122,7 @@ const TransitiveDeps = struct { } }; -fn checkCompileErrors(self: *CompileStep) !void { +fn checkCompileErrors(self: *Compile) !void { // Clear this field so that it does not get printed by the build runner. const actual_eb = self.step.result_error_bundle; self.step.result_error_bundle = std.zig.ErrorBundle.empty; diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 6bfe28ae62..a17784c96a 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const ConfigHeaderStep = @This(); +const ConfigHeader = @This(); const Step = std.Build.Step; pub const Style = union(enum) { @@ -48,8 +48,8 @@ pub const Options = struct { first_ret_addr: ?usize = null, }; -pub fn create(owner: *std.Build, options: Options) *ConfigHeaderStep { - const self = owner.allocator.create(ConfigHeaderStep) catch @panic("OOM"); +pub fn create(owner: *std.Build, options: Options) *ConfigHeader { + const self = owner.allocator.create(ConfigHeader) catch @panic("OOM"); var include_path: []const u8 = "config.h"; @@ -93,21 +93,21 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeaderStep { return self; } -pub fn addValues(self: *ConfigHeaderStep, values: anytype) void { +pub fn addValues(self: *ConfigHeader, values: anytype) void { return addValuesInner(self, values) catch @panic("OOM"); } -pub fn getFileSource(self: *ConfigHeaderStep) std.Build.FileSource { +pub fn getFileSource(self: *ConfigHeader) std.Build.FileSource { return .{ .generated = &self.output_file }; } -fn addValuesInner(self: *ConfigHeaderStep, values: anytype) !void { +fn addValuesInner(self: *ConfigHeader, values: anytype) !void { inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| { try putValue(self, field.name, field.type, @field(values, field.name)); } } -fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v: T) !void { +fn putValue(self: *ConfigHeader, field_name: []const u8, comptime T: type, v: T) !void { switch (@typeInfo(T)) { .Null => { try self.values.put(field_name, .undef); @@ -151,31 +151,31 @@ fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v else => {}, } - @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T)); + @compileError("unsupported ConfigHeader value type: " ++ @typeName(T)); }, - else => @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T)), + else => @compileError("unsupported ConfigHeader value type: " ++ @typeName(T)), } } fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; - const self = @fieldParentPtr(ConfigHeaderStep, "step", step); + const self = @fieldParentPtr(ConfigHeader, "step", step); const gpa = b.allocator; const arena = b.allocator; var man = b.cache.obtain(); defer man.deinit(); - // Random bytes to make ConfigHeaderStep unique. Refresh this with new - // random bytes when ConfigHeaderStep implementation is modified in a + // Random bytes to make ConfigHeader unique. Refresh this with new + // random bytes when ConfigHeader implementation is modified in a // non-backwards-compatible way. man.hash.add(@as(u32, 0xdef08d23)); var output = std.ArrayList(u8).init(gpa); defer output.deinit(); - const header_text = "This file was generated by ConfigHeaderStep using the Zig Build System."; + const header_text = "This file was generated by ConfigHeader using the Zig Build System."; const c_generated_line = "/* " ++ header_text ++ " */\n"; const asm_generated_line = "; " ++ header_text ++ "\n"; diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index 23d5d9e3ff..8e8cc51c0d 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -3,7 +3,7 @@ //! * Check mode: fail the step if a non-conforming file is found. const std = @import("std"); const Step = std.Build.Step; -const FmtStep = @This(); +const Fmt = @This(); step: Step, paths: []const []const u8, @@ -19,8 +19,8 @@ pub const Options = struct { check: bool = false, }; -pub fn create(owner: *std.Build, options: Options) *FmtStep { - const self = owner.allocator.create(FmtStep) catch @panic("OOM"); +pub fn create(owner: *std.Build, options: Options) *Fmt { + const self = owner.allocator.create(Fmt) catch @panic("OOM"); const name = if (options.check) "zig fmt --check" else "zig fmt"; self.* = .{ .step = Step.init(.{ @@ -47,7 +47,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; - const self = @fieldParentPtr(FmtStep, "step", step); + const self = @fieldParentPtr(Fmt, "step", step); var argv: std.ArrayListUnmanaged([]const u8) = .{}; try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len); diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index fa357a9ae9..9552a44440 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -1,24 +1,23 @@ const std = @import("std"); const Step = std.Build.Step; -const CompileStep = std.Build.CompileStep; const InstallDir = std.Build.InstallDir; -const InstallArtifactStep = @This(); +const InstallArtifact = @This(); const fs = std.fs; pub const base_id = .install_artifact; step: Step, -artifact: *CompileStep, +artifact: *Step.Compile, dest_dir: InstallDir, pdb_dir: ?InstallDir, h_dir: ?InstallDir, /// If non-null, adds additional path components relative to dest_dir, and -/// overrides the basename of the CompileStep. +/// overrides the basename of the Compile step. dest_sub_path: ?[]const u8, -pub fn create(owner: *std.Build, artifact: *CompileStep) *InstallArtifactStep { - const self = owner.allocator.create(InstallArtifactStep) catch @panic("OOM"); - self.* = InstallArtifactStep{ +pub fn create(owner: *std.Build, artifact: *Step.Compile) *InstallArtifact { + const self = owner.allocator.create(InstallArtifact) catch @panic("OOM"); + self.* = InstallArtifact{ .step = Step.init(.{ .id = base_id, .name = owner.fmt("install {s}", .{artifact.name}), @@ -66,7 +65,7 @@ pub fn create(owner: *std.Build, artifact: *CompileStep) *InstallArtifactStep { fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; - const self = @fieldParentPtr(InstallArtifactStep, "step", step); + const self = @fieldParentPtr(InstallArtifact, "step", step); const src_builder = self.artifact.step.owner; const dest_builder = step.owner; @@ -90,7 +89,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) { - try CompileStep.doAtomicSymLinks(step, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?); + try Step.Compile.doAtomicSymLinks(step, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?); } if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index b6b66fd1e0..784685dc3a 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -2,7 +2,7 @@ const std = @import("std"); const Step = std.Build.Step; const FileSource = std.Build.FileSource; const InstallDir = std.Build.InstallDir; -const InstallFileStep = @This(); +const InstallFile = @This(); const assert = std.debug.assert; pub const base_id = .install_file; @@ -20,10 +20,10 @@ pub fn create( source: FileSource, dir: InstallDir, dest_rel_path: []const u8, -) *InstallFileStep { +) *InstallFile { assert(dest_rel_path.len != 0); owner.pushInstalledFile(dir, dest_rel_path); - const self = owner.allocator.create(InstallFileStep) catch @panic("OOM"); + const self = owner.allocator.create(InstallFile) catch @panic("OOM"); self.* = .{ .step = Step.init(.{ .id = base_id, @@ -43,7 +43,7 @@ pub fn create( fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const src_builder = step.owner; - const self = @fieldParentPtr(InstallFileStep, "step", step); + const self = @fieldParentPtr(InstallFile, "step", step); const dest_builder = self.dest_builder; const full_src_path = self.source.getPath2(src_builder, step); const full_dest_path = dest_builder.getInstallPath(self.dir, self.dest_rel_path); diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 608c56591f..a8a0dafaef 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -1,12 +1,11 @@ const std = @import("std"); -const ObjCopyStep = @This(); +const ObjCopy = @This(); const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; const ArrayListUnmanaged = std.ArrayListUnmanaged; const File = std.fs.File; const InstallDir = std.Build.InstallDir; -const CompileStep = std.Build.CompileStep; const Step = std.Build.Step; const elf = std.elf; const fs = std.fs; @@ -40,9 +39,9 @@ pub fn create( owner: *std.Build, file_source: std.Build.FileSource, options: Options, -) *ObjCopyStep { - const self = owner.allocator.create(ObjCopyStep) catch @panic("OOM"); - self.* = ObjCopyStep{ +) *ObjCopy { + const self = owner.allocator.create(ObjCopy) catch @panic("OOM"); + self.* = ObjCopy{ .step = Step.init(.{ .id = base_id, .name = owner.fmt("objcopy {s}", .{file_source.getDisplayName()}), @@ -61,19 +60,19 @@ pub fn create( return self; } -pub fn getOutputSource(self: *const ObjCopyStep) std.Build.FileSource { +pub fn getOutputSource(self: *const ObjCopy) std.Build.FileSource { return .{ .generated = &self.output_file }; } fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; - const self = @fieldParentPtr(ObjCopyStep, "step", step); + const self = @fieldParentPtr(ObjCopy, "step", step); var man = b.cache.obtain(); defer man.deinit(); - // Random bytes to make ObjCopyStep unique. Refresh this with new random - // bytes when ObjCopyStep implementation is modified incompatibly. + // Random bytes to make ObjCopy unique. Refresh this with new random + // bytes when ObjCopy implementation is modified incompatibly. man.hash.add(@as(u32, 0xe18b7baf)); const full_src_path = self.file_source.getPath(b); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 101c284cf0..cc7152a81e 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -3,10 +3,9 @@ const builtin = @import("builtin"); const fs = std.fs; const Step = std.Build.Step; const GeneratedFile = std.Build.GeneratedFile; -const CompileStep = std.Build.CompileStep; const FileSource = std.Build.FileSource; -const OptionsStep = @This(); +const Options = @This(); pub const base_id = .options; @@ -17,8 +16,8 @@ contents: std.ArrayList(u8), artifact_args: std.ArrayList(OptionArtifactArg), file_source_args: std.ArrayList(OptionFileSourceArg), -pub fn create(owner: *std.Build) *OptionsStep { - const self = owner.allocator.create(OptionsStep) catch @panic("OOM"); +pub fn create(owner: *std.Build) *Options { + const self = owner.allocator.create(Options) catch @panic("OOM"); self.* = .{ .step = Step.init(.{ .id = base_id, @@ -36,11 +35,11 @@ pub fn create(owner: *std.Build) *OptionsStep { return self; } -pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value: T) void { +pub fn addOption(self: *Options, comptime T: type, name: []const u8, value: T) void { return addOptionFallible(self, T, name, value) catch @panic("unhandled error"); } -fn addOptionFallible(self: *OptionsStep, comptime T: type, name: []const u8, value: T) !void { +fn addOptionFallible(self: *Options, comptime T: type, name: []const u8, value: T) !void { const out = self.contents.writer(); switch (T) { []const []const u8 => { @@ -189,7 +188,7 @@ fn printLiteral(out: anytype, val: anytype, indent: u8) !void { /// The value is the path in the cache dir. /// Adds a dependency automatically. pub fn addOptionFileSource( - self: *OptionsStep, + self: *Options, name: []const u8, source: FileSource, ) void { @@ -202,19 +201,19 @@ pub fn addOptionFileSource( /// The value is the path in the cache dir. /// Adds a dependency automatically. -pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void { +pub fn addOptionArtifact(self: *Options, name: []const u8, artifact: *Step.Compile) void { self.artifact_args.append(.{ .name = self.step.owner.dupe(name), .artifact = artifact }) catch @panic("OOM"); self.step.dependOn(&artifact.step); } -pub fn createModule(self: *OptionsStep) *std.Build.Module { +pub fn createModule(self: *Options) *std.Build.Module { return self.step.owner.createModule(.{ .source_file = self.getSource(), .dependencies = &.{}, }); } -pub fn getSource(self: *OptionsStep) FileSource { +pub fn getSource(self: *Options) FileSource { return .{ .generated = &self.generated_file }; } @@ -223,7 +222,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; - const self = @fieldParentPtr(OptionsStep, "step", step); + const self = @fieldParentPtr(Options, "step", step); for (self.artifact_args.items) |item| { self.addOption( @@ -314,7 +313,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { const OptionArtifactArg = struct { name: []const u8, - artifact: *CompileStep, + artifact: *Step.Compile, }; const OptionFileSourceArg = struct { @@ -322,7 +321,7 @@ const OptionFileSourceArg = struct { source: FileSource, }; -test "OptionsStep" { +test Options { if (builtin.os.tag == .wasi) return error.SkipZigTest; var arena = std.heap.ArenaAllocator.init(std.testing.allocator); diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig index 59025a7e91..7666dd2a7d 100644 --- a/lib/std/Build/Step/RemoveDir.zig +++ b/lib/std/Build/Step/RemoveDir.zig @@ -1,15 +1,15 @@ const std = @import("std"); const fs = std.fs; const Step = std.Build.Step; -const RemoveDirStep = @This(); +const RemoveDir = @This(); pub const base_id = .remove_dir; step: Step, dir_path: []const u8, -pub fn init(owner: *std.Build, dir_path: []const u8) RemoveDirStep { - return RemoveDirStep{ +pub fn init(owner: *std.Build, dir_path: []const u8) RemoveDir { + return RemoveDir{ .step = Step.init(.{ .id = .remove_dir, .name = owner.fmt("RemoveDir {s}", .{dir_path}), @@ -26,7 +26,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; - const self = @fieldParentPtr(RemoveDirStep, "step", step); + const self = @fieldParentPtr(RemoveDir, "step", step); b.build_root.handle.deleteTree(self.dir_path) catch |err| { if (b.build_root.path) |base| { diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 4e973cfd98..c506e23f90 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1,8 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); const Step = std.Build.Step; -const CompileStep = std.Build.CompileStep; -const WriteFileStep = std.Build.WriteFileStep; const fs = std.fs; const mem = std.mem; const process = std.process; @@ -12,7 +10,7 @@ const Allocator = mem.Allocator; const ExecError = std.Build.ExecError; const assert = std.debug.assert; -const RunStep = @This(); +const Run = @This(); pub const base_id: Step.Id = .run; @@ -29,12 +27,12 @@ cwd: ?[]const u8, /// Override this field to modify the environment, or use setEnvironmentVariable env_map: ?*EnvMap, -/// Configures whether the RunStep is considered to have side-effects, and also -/// whether the RunStep will inherit stdio streams, forwarding them to the +/// Configures whether the Run step is considered to have side-effects, and also +/// whether the Run step will inherit stdio streams, forwarding them to the /// parent process, in which case will require a global lock to prevent other /// steps from interfering with stdio while the subprocess associated with this -/// RunStep is running. -/// If the RunStep is determined to not have side-effects, then execution will +/// Run step is running. +/// If the Run step is determined to not have side-effects, then execution will /// be skipped if all output files are up-to-date and input files are /// unchanged. stdio: StdIo = .infer_from_args, @@ -42,9 +40,9 @@ stdio: StdIo = .infer_from_args, stdin: ?[]const u8 = null, /// Additional file paths relative to build.zig that, when modified, indicate -/// that the RunStep should be re-executed. -/// If the RunStep is determined to have side-effects, this field is ignored -/// and the RunStep is always executed when it appears in the build graph. +/// that the Run step should be re-executed. +/// If the Run step is determined to have side-effects, this field is ignored +/// and the Run step is always executed when it appears in the build graph. extra_file_dependencies: []const []const u8 = &.{}, /// After adding an output argument, this step will by default rename itself @@ -52,14 +50,14 @@ extra_file_dependencies: []const []const u8 = &.{}, /// This can be disabled by setting this to false. rename_step_with_output_arg: bool = true, -/// If this is true, a RunStep which is configured to check the output of the +/// If this is true, a Run step which is configured to check the output of the /// executed binary will not fail the build if the binary cannot be executed /// due to being for a foreign binary to the host system which is running the /// build graph. /// Command-line arguments such as -fqemu and -fwasmtime may affect whether a /// binary is detected as foreign, as well as system configuration such as /// Rosetta (macOS) and binfmt_misc (Linux). -/// If this RunStep is considered to have side-effects, then this flag does +/// If this Run step is considered to have side-effects, then this flag does /// nothing. skip_foreign_checks: bool = false, @@ -73,18 +71,18 @@ captured_stderr: ?*Output = null, has_side_effects: bool = false, pub const StdIo = union(enum) { - /// Whether the RunStep has side-effects will be determined by whether or not one + /// Whether the Run step has side-effects will be determined by whether or not one /// of the args is an output file (added with `addOutputFileArg`). - /// If the RunStep is determined to have side-effects, this is the same as `inherit`. + /// If the Run step is determined to have side-effects, this is the same as `inherit`. /// The step will fail if the subprocess crashes or returns a non-zero exit code. infer_from_args, - /// Causes the RunStep to be considered to have side-effects, and therefore + /// Causes the Run step to be considered to have side-effects, and therefore /// always execute when it appears in the build graph. /// It also means that this step will obtain a global lock to prevent other /// steps from running in the meantime. /// The step will fail if the subprocess crashes or returns a non-zero exit code. inherit, - /// Causes the RunStep to be considered to *not* have side-effects. The + /// Causes the Run step to be considered to *not* have side-effects. The /// process will be re-executed if any of the input dependencies are /// modified. The exit code and standard I/O streams will be checked for /// certain conditions, and the step will succeed or fail based on these @@ -92,7 +90,7 @@ pub const StdIo = union(enum) { /// Note that an explicit check for exit code 0 needs to be added to this /// list if such a check is desirable. check: std.ArrayList(Check), - /// This RunStep is running a zig unit test binary and will communicate + /// This Run step is running a zig unit test binary and will communicate /// extra metadata over the IPC protocol. zig_test, @@ -106,7 +104,7 @@ pub const StdIo = union(enum) { }; pub const Arg = union(enum) { - artifact: *CompileStep, + artifact: *Step.Compile, file_source: std.Build.FileSource, directory_source: std.Build.FileSource, bytes: []u8, @@ -119,8 +117,8 @@ pub const Output = struct { basename: []const u8, }; -pub fn create(owner: *std.Build, name: []const u8) *RunStep { - const self = owner.allocator.create(RunStep) catch @panic("OOM"); +pub fn create(owner: *std.Build, name: []const u8) *Run { + const self = owner.allocator.create(Run) catch @panic("OOM"); self.* = .{ .step = Step.init(.{ .id = base_id, @@ -135,17 +133,17 @@ pub fn create(owner: *std.Build, name: []const u8) *RunStep { return self; } -pub fn setName(self: *RunStep, name: []const u8) void { +pub fn setName(self: *Run, name: []const u8) void { self.step.name = name; self.rename_step_with_output_arg = false; } -pub fn enableTestRunnerMode(rs: *RunStep) void { +pub fn enableTestRunnerMode(rs: *Run) void { rs.stdio = .zig_test; rs.addArgs(&.{"--listen=-"}); } -pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void { +pub fn addArtifactArg(self: *Run, artifact: *Step.Compile) void { self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM"); self.step.dependOn(&artifact.step); } @@ -153,12 +151,12 @@ pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void { /// This provides file path as a command line argument to the command being /// run, and returns a FileSource which can be used as inputs to other APIs /// throughout the build system. -pub fn addOutputFileArg(rs: *RunStep, basename: []const u8) std.Build.FileSource { +pub fn addOutputFileArg(rs: *Run, basename: []const u8) std.Build.FileSource { return addPrefixedOutputFileArg(rs, "", basename); } pub fn addPrefixedOutputFileArg( - rs: *RunStep, + rs: *Run, prefix: []const u8, basename: []const u8, ) std.Build.FileSource { @@ -179,38 +177,38 @@ pub fn addPrefixedOutputFileArg( return .{ .generated = &output.generated_file }; } -pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void { +pub fn addFileSourceArg(self: *Run, file_source: std.Build.FileSource) void { self.argv.append(.{ .file_source = file_source.dupe(self.step.owner), }) catch @panic("OOM"); file_source.addStepDependencies(&self.step); } -pub fn addDirectorySourceArg(self: *RunStep, directory_source: std.Build.FileSource) void { +pub fn addDirectorySourceArg(self: *Run, directory_source: std.Build.FileSource) void { self.argv.append(.{ .directory_source = directory_source.dupe(self.step.owner), }) catch @panic("OOM"); directory_source.addStepDependencies(&self.step); } -pub fn addArg(self: *RunStep, arg: []const u8) void { +pub fn addArg(self: *Run, arg: []const u8) void { self.argv.append(.{ .bytes = self.step.owner.dupe(arg) }) catch @panic("OOM"); } -pub fn addArgs(self: *RunStep, args: []const []const u8) void { +pub fn addArgs(self: *Run, args: []const []const u8) void { for (args) |arg| { self.addArg(arg); } } -pub fn clearEnvironment(self: *RunStep) void { +pub fn clearEnvironment(self: *Run) void { const b = self.step.owner; const new_env_map = b.allocator.create(EnvMap) catch @panic("OOM"); new_env_map.* = EnvMap.init(b.allocator); self.env_map = new_env_map; } -pub fn addPathDir(self: *RunStep, search_path: []const u8) void { +pub fn addPathDir(self: *Run, search_path: []const u8) void { const b = self.step.owner; const env_map = getEnvMapInternal(self); @@ -225,11 +223,11 @@ pub fn addPathDir(self: *RunStep, search_path: []const u8) void { } } -pub fn getEnvMap(self: *RunStep) *EnvMap { +pub fn getEnvMap(self: *Run) *EnvMap { return getEnvMapInternal(self); } -fn getEnvMapInternal(self: *RunStep) *EnvMap { +fn getEnvMapInternal(self: *Run) *EnvMap { const arena = self.step.owner.allocator; return self.env_map orelse { const env_map = arena.create(EnvMap) catch @panic("OOM"); @@ -239,25 +237,25 @@ fn getEnvMapInternal(self: *RunStep) *EnvMap { }; } -pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8) void { +pub fn setEnvironmentVariable(self: *Run, key: []const u8, value: []const u8) void { const b = self.step.owner; const env_map = self.getEnvMap(); env_map.put(b.dupe(key), b.dupe(value)) catch @panic("unhandled error"); } -pub fn removeEnvironmentVariable(self: *RunStep, key: []const u8) void { +pub fn removeEnvironmentVariable(self: *Run, key: []const u8) void { self.getEnvMap().remove(key); } /// Adds a check for exact stderr match. Does not add any other checks. -pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void { +pub fn expectStdErrEqual(self: *Run, bytes: []const u8) void { const new_check: StdIo.Check = .{ .expect_stderr_exact = self.step.owner.dupe(bytes) }; self.addCheck(new_check); } /// Adds a check for exact stdout match as well as a check for exit code 0, if /// there is not already an expected termination check. -pub fn expectStdOutEqual(self: *RunStep, bytes: []const u8) void { +pub fn expectStdOutEqual(self: *Run, bytes: []const u8) void { const new_check: StdIo.Check = .{ .expect_stdout_exact = self.step.owner.dupe(bytes) }; self.addCheck(new_check); if (!self.hasTermCheck()) { @@ -265,12 +263,12 @@ pub fn expectStdOutEqual(self: *RunStep, bytes: []const u8) void { } } -pub fn expectExitCode(self: *RunStep, code: u8) void { +pub fn expectExitCode(self: *Run, code: u8) void { const new_check: StdIo.Check = .{ .expect_term = .{ .Exited = code } }; self.addCheck(new_check); } -pub fn hasTermCheck(self: RunStep) bool { +pub fn hasTermCheck(self: Run) bool { for (self.stdio.check.items) |check| switch (check) { .expect_term => return true, else => continue, @@ -278,18 +276,18 @@ pub fn hasTermCheck(self: RunStep) bool { return false; } -pub fn addCheck(self: *RunStep, new_check: StdIo.Check) void { +pub fn addCheck(self: *Run, new_check: StdIo.Check) void { switch (self.stdio) { .infer_from_args => { self.stdio = .{ .check = std.ArrayList(StdIo.Check).init(self.step.owner.allocator) }; self.stdio.check.append(new_check) catch @panic("OOM"); }, .check => |*checks| checks.append(new_check) catch @panic("OOM"), - else => @panic("illegal call to addCheck: conflicting helper method calls. Suggest to directly set stdio field of RunStep instead"), + else => @panic("illegal call to addCheck: conflicting helper method calls. Suggest to directly set stdio field of Run instead"), } } -pub fn captureStdErr(self: *RunStep) std.Build.FileSource { +pub fn captureStdErr(self: *Run) std.Build.FileSource { assert(self.stdio != .inherit); if (self.captured_stderr) |output| return .{ .generated = &output.generated_file }; @@ -304,7 +302,7 @@ pub fn captureStdErr(self: *RunStep) std.Build.FileSource { return .{ .generated = &output.generated_file }; } -pub fn captureStdOut(self: *RunStep) std.Build.FileSource { +pub fn captureStdOut(self: *Run) std.Build.FileSource { assert(self.stdio != .inherit); if (self.captured_stdout) |output| return .{ .generated = &output.generated_file }; @@ -319,8 +317,8 @@ pub fn captureStdOut(self: *RunStep) std.Build.FileSource { return .{ .generated = &output.generated_file }; } -/// Returns whether the RunStep has side effects *other than* updating the output arguments. -fn hasSideEffects(self: RunStep) bool { +/// Returns whether the Run step has side effects *other than* updating the output arguments. +fn hasSideEffects(self: Run) bool { if (self.has_side_effects) return true; return switch (self.stdio) { .infer_from_args => !self.hasAnyOutputArgs(), @@ -330,7 +328,7 @@ fn hasSideEffects(self: RunStep) bool { }; } -fn hasAnyOutputArgs(self: RunStep) bool { +fn hasAnyOutputArgs(self: Run) bool { if (self.captured_stdout != null) return true; if (self.captured_stderr != null) return true; for (self.argv.items) |arg| switch (arg) { @@ -371,7 +369,7 @@ fn checksContainStderr(checks: []const StdIo.Check) bool { fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; - const self = @fieldParentPtr(RunStep, "step", step); + const self = @fieldParentPtr(Run, "step", step); const has_side_effects = self.hasSideEffects(); var argv_list = ArrayList([]const u8).init(arena); @@ -541,7 +539,7 @@ fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term } fn runCommand( - self: *RunStep, + self: *Run, argv: []const []const u8, has_side_effects: bool, digest: ?*const [std.Build.Cache.hex_digest_len]u8, @@ -567,7 +565,7 @@ fn runCommand( // FileNotFound: can happen with a wrong dynamic linker path if (err == error.InvalidExe or err == error.FileNotFound) interpret: { // TODO: learn the target from the binary directly rather than from - // relying on it being a CompileStep. This will make this logic + // relying on it being a Compile step. This will make this logic // work even for the edge case that the binary was produced by a // third party. const exe = switch (self.argv.items[0]) { @@ -862,7 +860,7 @@ const ChildProcResult = struct { }; fn spawnChildAndCollect( - self: *RunStep, + self: *Run, argv: []const []const u8, has_side_effects: bool, prog_node: *std.Progress.Node, @@ -936,7 +934,7 @@ const StdIoResult = struct { }; fn evalZigTest( - self: *RunStep, + self: *Run, child: *std.process.Child, prog_node: *std.Progress.Node, ) !StdIoResult { @@ -1121,7 +1119,7 @@ fn sendRunTestMessage(file: std.fs.File, index: u32) !void { try file.writeAll(full_msg); } -fn evalGeneric(self: *RunStep, child: *std.process.Child) !StdIoResult { +fn evalGeneric(self: *Run, child: *std.process.Child) !StdIoResult { const arena = self.step.owner.allocator; if (self.stdin) |stdin| { @@ -1188,7 +1186,7 @@ fn evalGeneric(self: *RunStep, child: *std.process.Child) !StdIoResult { }; } -fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void { +fn addPathForDynLibs(self: *Run, artifact: *Step.Compile) void { const b = self.step.owner; for (artifact.link_objects.items) |link_object| { switch (link_object) { @@ -1204,10 +1202,10 @@ fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void { } fn failForeign( - self: *RunStep, + self: *Run, suggested_flag: []const u8, argv0: []const u8, - exe: *CompileStep, + exe: *Step.Compile, ) error{ MakeFailed, MakeSkipped, OutOfMemory } { switch (self.stdio) { .check, .zig_test => { diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index 86727ea2f0..0c7ddc4720 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -1,12 +1,10 @@ const std = @import("std"); const Step = std.Build.Step; -const CompileStep = std.Build.CompileStep; -const CheckFileStep = std.Build.CheckFileStep; const fs = std.fs; const mem = std.mem; const CrossTarget = std.zig.CrossTarget; -const TranslateCStep = @This(); +const TranslateC = @This(); pub const base_id = .translate_c; @@ -25,10 +23,10 @@ pub const Options = struct { optimize: std.builtin.OptimizeMode, }; -pub fn create(owner: *std.Build, options: Options) *TranslateCStep { - const self = owner.allocator.create(TranslateCStep) catch @panic("OOM"); +pub fn create(owner: *std.Build, options: Options) *TranslateC { + const self = owner.allocator.create(TranslateC) catch @panic("OOM"); const source = options.source_file.dupe(owner); - self.* = TranslateCStep{ + self.* = TranslateC{ .step = Step.init(.{ .id = .translate_c, .name = "translate-c", @@ -52,11 +50,11 @@ pub const AddExecutableOptions = struct { version: ?std.builtin.Version = null, target: ?CrossTarget = null, optimize: ?std.builtin.Mode = null, - linkage: ?CompileStep.Linkage = null, + linkage: ?Step.Compile.Linkage = null, }; /// Creates a step to build an executable from the translated source. -pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep { +pub fn addExecutable(self: *TranslateC, options: AddExecutableOptions) *Step.Compile { return self.step.owner.addExecutable(.{ .root_source_file = .{ .generated = &self.output_file }, .name = options.name orelse "translated_c", @@ -67,12 +65,12 @@ pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *Comp }); } -pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void { +pub fn addIncludeDir(self: *TranslateC, include_dir: []const u8) void { self.include_dirs.append(self.step.owner.dupePath(include_dir)) catch @panic("OOM"); } -pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep { - return CheckFileStep.create( +pub fn addCheckFile(self: *TranslateC, expected_matches: []const []const u8) *Step.CheckFile { + return Step.CheckFile.create( self.step.owner, .{ .generated = &self.output_file }, .{ .expected_matches = expected_matches }, @@ -81,19 +79,19 @@ pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) /// If the value is omitted, it is set to 1. /// `name` and `value` need not live longer than the function call. -pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void { +pub fn defineCMacro(self: *TranslateC, name: []const u8, value: ?[]const u8) void { const macro = std.Build.constructCMacro(self.step.owner.allocator, name, value); self.c_macros.append(macro) catch @panic("OOM"); } /// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1. -pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void { +pub fn defineCMacroRaw(self: *TranslateC, name_and_value: []const u8) void { self.c_macros.append(self.step.owner.dupe(name_and_value)) catch @panic("OOM"); } fn make(step: *Step, prog_node: *std.Progress.Node) !void { const b = step.owner; - const self = @fieldParentPtr(TranslateCStep, "step", step); + const self = @fieldParentPtr(TranslateC, "step", step); var argv_list = std.ArrayList([]const u8).init(b.allocator); try argv_list.append(b.zig_exe); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 68f7c37c6c..0d817e7430 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -1,4 +1,4 @@ -//! WriteFileStep is primarily used to create a directory in an appropriate +//! WriteFile is primarily used to create a directory in an appropriate //! location inside the local cache which has a set of files that have either //! been generated during the build, or are copied from the source package. //! @@ -12,7 +12,7 @@ const std = @import("std"); const Step = std.Build.Step; const fs = std.fs; const ArrayList = std.ArrayList; -const WriteFileStep = @This(); +const WriteFile = @This(); step: Step, /// The elements here are pointers because we need stable pointers for the @@ -39,8 +39,8 @@ pub const Contents = union(enum) { copy: std.Build.FileSource, }; -pub fn create(owner: *std.Build) *WriteFileStep { - const wf = owner.allocator.create(WriteFileStep) catch @panic("OOM"); +pub fn create(owner: *std.Build) *WriteFile { + const wf = owner.allocator.create(WriteFile) catch @panic("OOM"); wf.* = .{ .step = Step.init(.{ .id = .write_file, @@ -55,7 +55,7 @@ pub fn create(owner: *std.Build) *WriteFileStep { return wf; } -pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void { +pub fn add(wf: *WriteFile, sub_path: []const u8, bytes: []const u8) void { const b = wf.step.owner; const gpa = b.allocator; const file = gpa.create(File) catch @panic("OOM"); @@ -72,11 +72,11 @@ pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void { /// Place the file into the generated directory within the local cache, /// along with all the rest of the files added to this step. The parameter /// here is the destination path relative to the local cache directory -/// associated with this WriteFileStep. It may be a basename, or it may +/// associated with this WriteFile. It may be a basename, or it may /// include sub-directories, in which case this step will ensure the /// required sub-path exists. /// This is the option expected to be used most commonly with `addCopyFile`. -pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void { +pub fn addCopyFile(wf: *WriteFile, source: std.Build.FileSource, sub_path: []const u8) void { const b = wf.step.owner; const gpa = b.allocator; const file = gpa.create(File) catch @panic("OOM"); @@ -97,7 +97,7 @@ pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: [ /// run by a developer with intent to modify source files and then commit /// those changes to version control. /// A file added this way is not available with `getFileSource`. -pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void { +pub fn addCopyFileToSource(wf: *WriteFile, source: std.Build.FileSource, sub_path: []const u8) void { const b = wf.step.owner; wf.output_source_files.append(b.allocator, .{ .contents = .{ .copy = source }, @@ -112,7 +112,7 @@ pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub /// run by a developer with intent to modify source files and then commit /// those changes to version control. /// A file added this way is not available with `getFileSource`. -pub fn addBytesToSource(wf: *WriteFileStep, bytes: []const u8, sub_path: []const u8) void { +pub fn addBytesToSource(wf: *WriteFile, bytes: []const u8, sub_path: []const u8) void { const b = wf.step.owner; wf.output_source_files.append(b.allocator, .{ .contents = .{ .bytes = bytes }, @@ -121,7 +121,7 @@ pub fn addBytesToSource(wf: *WriteFileStep, bytes: []const u8, sub_path: []const } /// Gets a file source for the given sub_path. If the file does not exist, returns `null`. -pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSource { +pub fn getFileSource(wf: *WriteFile, sub_path: []const u8) ?std.Build.FileSource { for (wf.files.items) |file| { if (std.mem.eql(u8, file.sub_path, sub_path)) { return .{ .generated = &file.generated_file }; @@ -131,12 +131,12 @@ pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSo } /// Returns a `FileSource` representing the base directory that contains all the -/// files from this `WriteFileStep`. -pub fn getDirectorySource(wf: *WriteFileStep) std.Build.FileSource { +/// files from this `WriteFile`. +pub fn getDirectorySource(wf: *WriteFile) std.Build.FileSource { return .{ .generated = &wf.generated_directory }; } -fn maybeUpdateName(wf: *WriteFileStep) void { +fn maybeUpdateName(wf: *WriteFile) void { if (wf.files.items.len == 1) { // First time adding a file; update name. if (std.mem.eql(u8, wf.step.name, "WriteFile")) { @@ -148,10 +148,10 @@ fn maybeUpdateName(wf: *WriteFileStep) void { fn make(step: *Step, prog_node: *std.Progress.Node) !void { _ = prog_node; const b = step.owner; - const wf = @fieldParentPtr(WriteFileStep, "step", step); + const wf = @fieldParentPtr(WriteFile, "step", step); // Writing to source files is kind of an extra capability of this - // WriteFileStep - arguably it should be a different step. But anyway here + // WriteFile - arguably it should be a different step. But anyway here // it is, it happens unconditionally and does not interact with the other // files here. var any_miss = false; @@ -194,14 +194,14 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { // the data to a file would probably be very fast - but as a way to find a canonical // location to put build artifacts. - // If, for example, a hard-coded path was used as the location to put WriteFileStep - // files, then two WriteFileSteps executing in parallel might clobber each other. + // If, for example, a hard-coded path was used as the location to put WriteFile + // files, then two WriteFiles executing in parallel might clobber each other. var man = b.cache.obtain(); defer man.deinit(); - // Random bytes to make WriteFileStep unique. Refresh this with - // new random bytes when WriteFileStep implementation is modified + // Random bytes to make WriteFile unique. Refresh this with + // new random bytes when WriteFile implementation is modified // in a non-backwards-compatible way. man.hash.add(@as(u32, 0xd767ee59)); diff --git a/test/link/macho/dead_strip/build.zig b/test/link/macho/dead_strip/build.zig index 4c739b3d8c..9d00bad9e0 100644 --- a/test/link/macho/dead_strip/build.zig +++ b/test/link/macho/dead_strip/build.zig @@ -42,7 +42,7 @@ fn createScenario( optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget, name: []const u8, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const exe = b.addExecutable(.{ .name = name, .optimize = optimize, diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig index 47e53f853e..ec073e183a 100644 --- a/test/link/macho/dead_strip_dylibs/build.zig +++ b/test/link/macho/dead_strip_dylibs/build.zig @@ -46,7 +46,7 @@ fn createScenario( b: *std.Build, optimize: std.builtin.OptimizeMode, name: []const u8, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const exe = b.addExecutable(.{ .name = name, .optimize = optimize, diff --git a/test/link/macho/headerpad/build.zig b/test/link/macho/headerpad/build.zig index 22cfcc90ec..99edfe72fa 100644 --- a/test/link/macho/headerpad/build.zig +++ b/test/link/macho/headerpad/build.zig @@ -104,7 +104,7 @@ fn simpleExe( b: *std.Build, optimize: std.builtin.OptimizeMode, name: []const u8, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const exe = b.addExecutable(.{ .name = name, .optimize = optimize, diff --git a/test/link/macho/search_strategy/build.zig b/test/link/macho/search_strategy/build.zig index 4777629c8b..853c471969 100644 --- a/test/link/macho/search_strategy/build.zig +++ b/test/link/macho/search_strategy/build.zig @@ -46,7 +46,7 @@ fn createScenario( optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget, name: []const u8, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const static = b.addStaticLibrary(.{ .name = name, .optimize = optimize, diff --git a/test/link/macho/unwind_info/build.zig b/test/link/macho/unwind_info/build.zig index 4ace2a4e96..96b5f6cacc 100644 --- a/test/link/macho/unwind_info/build.zig +++ b/test/link/macho/unwind_info/build.zig @@ -65,7 +65,7 @@ fn createScenario( optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget, name: []const u8, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const exe = b.addExecutable(.{ .name = name, .optimize = optimize, diff --git a/test/link/macho/uuid/build.zig b/test/link/macho/uuid/build.zig index df58aeacb7..0072825f46 100644 --- a/test/link/macho/uuid/build.zig +++ b/test/link/macho/uuid/build.zig @@ -1,5 +1,4 @@ const std = @import("std"); -const CompileStep = std.Build.CompileStep; const FileSource = std.Build.FileSource; const Step = std.Build.Step; @@ -60,7 +59,7 @@ fn simpleDylib( b: *std.Build, optimize: std.builtin.OptimizeMode, target: std.zig.CrossTarget, -) *std.Build.CompileStep { +) *std.Build.Step.Compile { const dylib = b.addSharedLibrary(.{ .name = "test", .version = .{ .major = 1, .minor = 0 }, diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 4b023f45b0..0451079a0e 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -465,7 +465,7 @@ pub fn lowerToBuildSteps( parent_step: *std.Build.Step, opt_test_filter: ?[]const u8, cases_dir_path: []const u8, - incremental_exe: *std.Build.CompileStep, + incremental_exe: *std.Build.Step.Compile, ) void { for (self.incremental_cases.items) |incr_case| { if (opt_test_filter) |test_filter| { diff --git a/test/src/StackTrace.zig b/test/src/StackTrace.zig index c32720a210..0d0b7155e6 100644 --- a/test/src/StackTrace.zig +++ b/test/src/StackTrace.zig @@ -3,7 +3,7 @@ step: *Step, test_index: usize, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode, -check_exe: *std.Build.CompileStep, +check_exe: *std.Build.Step.Compile, const Expect = [@typeInfo(OptimizeMode).Enum.fields.len][]const u8; diff --git a/test/standalone/install_raw_hex/build.zig b/test/standalone/install_raw_hex/build.zig index b34bb01378..c05490a3e5 100644 --- a/test/standalone/install_raw_hex/build.zig +++ b/test/standalone/install_raw_hex/build.zig @@ -1,6 +1,5 @@ const builtin = @import("builtin"); const std = @import("std"); -const CheckFileStep = std.Build.CheckFileStep; pub fn build(b: *std.Build) void { const test_step = b.step("test", "Test it"); diff --git a/test/tests.zig b/test/tests.zig index 7ec1aaaa65..641914aabe 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1132,7 +1132,7 @@ pub fn addCases( b: *std.Build, parent_step: *Step, opt_test_filter: ?[]const u8, - check_case_exe: *std.Build.CompileStep, + check_case_exe: *std.Build.Step.Compile, ) !void { const arena = b.allocator; const gpa = b.allocator; -- cgit v1.2.3 From ce9f3ec990cd556f2a9d06a6db2bb53e97a61172 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 4 May 2023 18:34:41 -0700 Subject: Fix SplitIterator and TokenIterator type instantiation --- src/main.zig | 2 +- test/src/Cases.zig | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'test/src') diff --git a/src/main.zig b/src/main.zig index a680a5d89e..c61e3f3719 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3531,7 +3531,7 @@ fn serveUpdateResults(s: *Server, comp: *Compilation) !void { } const ModuleDepIterator = struct { - split: mem.SplitIterator(u8), + split: mem.SplitIterator(u8, .scalar), fn init(deps_str: []const u8) ModuleDepIterator { return .{ .split = mem.split(u8, deps_str, ",") }; diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 0451079a0e..68ecebc7bd 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -795,7 +795,7 @@ const TestManifest = struct { }; const TrailingIterator = struct { - inner: std.mem.TokenIterator(u8), + inner: std.mem.TokenIterator(u8, .any), fn next(self: *TrailingIterator) ?[]const u8 { const next_inner = self.inner.next() orelse return null; @@ -805,7 +805,7 @@ const TestManifest = struct { fn ConfigValueIterator(comptime T: type) type { return struct { - inner: std.mem.SplitIterator(u8), + inner: std.mem.SplitIterator(u8, .scalar), fn next(self: *@This()) !?T { const next_raw = self.inner.next() orelse return null; -- cgit v1.2.3 From 815e53b147a321d0bdb47dc008aa8181f57175ac Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 4 May 2023 18:05:40 -0700 Subject: Update all std.mem.tokenize calls to their appropriate function Everywhere that can now use `tokenizeScalar` should get a nice little performance boost. --- build.zig | 8 ++++---- lib/std/Build.zig | 2 +- lib/std/Build/Cache.zig | 4 ++-- lib/std/Build/Step/CheckObject.zig | 8 ++++---- lib/std/Build/Step/Compile.zig | 6 +++--- lib/std/Build/Step/ConfigHeader.zig | 4 ++-- lib/std/child_process.zig | 4 ++-- lib/std/fs.zig | 2 +- lib/std/fs/path.zig | 26 ++++++++++++------------ lib/std/http/Client.zig | 4 ++-- lib/std/http/Server.zig | 4 ++-- lib/std/net.zig | 6 +++--- lib/std/os.zig | 2 +- lib/std/process.zig | 2 +- lib/std/zig/system/NativePaths.zig | 10 ++++----- lib/std/zig/system/NativeTargetInfo.zig | 4 ++-- src/arch/x86_64/CodeGen.zig | 6 +++--- src/glibc.zig | 2 +- src/libc_installation.zig | 8 ++++---- src/link/Plan9.zig | 2 +- src/print_zir.zig | 2 +- test/behavior/bugs/6456.zig | 2 +- test/src/Cases.zig | 4 ++-- tools/generate_linux_syscalls.zig | 36 ++++++++++++++++----------------- 24 files changed, 79 insertions(+), 79 deletions(-) (limited to 'test/src') diff --git a/build.zig b/build.zig index 208d06fe1d..21b323df56 100644 --- a/build.zig +++ b/build.zig @@ -284,7 +284,7 @@ pub fn build(b: *std.Build) !void { // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find // the information passed on to us from cmake. if (cfg.cmake_prefix_path.len > 0) { - var it = mem.tokenize(u8, cfg.cmake_prefix_path, ";"); + var it = mem.tokenizeScalar(u8, cfg.cmake_prefix_path, ';'); while (it.next()) |path| { b.addSearchPrefix(path); } @@ -687,7 +687,7 @@ fn addCxxKnownPath( if (!std.process.can_spawn) return error.RequiredLibraryNotFound; const path_padded = b.exec(&.{ ctx.cxx_compiler, b.fmt("-print-file-name={s}", .{objname}) }); - var tokenizer = mem.tokenize(u8, path_padded, "\r\n"); + var tokenizer = mem.tokenizeAny(u8, path_padded, "\r\n"); const path_unpadded = tokenizer.next().?; if (mem.eql(u8, path_unpadded, objname)) { if (errtxt) |msg| { @@ -710,7 +710,7 @@ fn addCxxKnownPath( } fn addCMakeLibraryList(exe: *std.Build.Step.Compile, list: []const u8) void { - var it = mem.tokenize(u8, list, ";"); + var it = mem.tokenizeScalar(u8, list, ';'); while (it.next()) |lib| { if (mem.startsWith(u8, lib, "-l")) { exe.linkSystemLibrary(lib["-l".len..]); @@ -855,7 +855,7 @@ fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig { // .prefix = ZIG_LLVM_LINK_MODE parsed manually below }; - var lines_it = mem.tokenize(u8, config_h_text, "\r\n"); + var lines_it = mem.tokenizeAny(u8, config_h_text, "\r\n"); while (lines_it.next()) |line| { inline for (mappings) |mapping| { if (mem.startsWith(u8, line, mapping.prefix)) { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index ca55d23937..4ab5db5c70 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1358,7 +1358,7 @@ pub fn findProgram(self: *Build, names: []const []const u8, paths: []const []con if (fs.path.isAbsolute(name)) { return name; } - var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter}); + var it = mem.tokenizeScalar(u8, PATH, fs.path.delimiter); while (it.next()) |path| { const full_path = self.pathJoin(&.{ path, diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 17429c0370..7709e5e26c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -434,7 +434,7 @@ pub const Manifest = struct { const input_file_count = self.files.items.len; var any_file_changed = false; - var line_iter = mem.tokenize(u8, file_contents, "\n"); + var line_iter = mem.tokenizeScalar(u8, file_contents, '\n'); var idx: usize = 0; if (if (line_iter.next()) |line| !std.mem.eql(u8, line, manifest_header) else true) { if (try self.upgradeToExclusiveLock()) continue; @@ -463,7 +463,7 @@ pub const Manifest = struct { break :blk new; }; - var iter = mem.tokenize(u8, line, " "); + var iter = mem.tokenizeScalar(u8, line, ' '); const size = iter.next() orelse return error.InvalidFormat; const inode = iter.next() orelse return error.InvalidFormat; const mtime_nsec_str = iter.next() orelse return error.InvalidFormat; diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index c77dc3de36..24ebfef388 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -103,8 +103,8 @@ const Action = struct { assert(act.tag == .match or act.tag == .not_present); const phrase = act.phrase.resolve(b, step); var candidate_var: ?struct { name: []const u8, value: u64 } = null; - var hay_it = mem.tokenize(u8, mem.trim(u8, haystack, " "), " "); - var needle_it = mem.tokenize(u8, mem.trim(u8, phrase, " "), " "); + var hay_it = mem.tokenizeScalar(u8, mem.trim(u8, haystack, " "), ' '); + var needle_it = mem.tokenizeScalar(u8, mem.trim(u8, phrase, " "), ' '); while (needle_it.next()) |needle_tok| { const hay_tok = hay_it.next() orelse return false; @@ -155,7 +155,7 @@ const Action = struct { var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa); var values = std.ArrayList(u64).init(gpa); - var it = mem.tokenize(u8, phrase, " "); + var it = mem.tokenizeScalar(u8, phrase, ' '); while (it.next()) |next| { if (mem.eql(u8, next, "+")) { try op_stack.append(.add); @@ -365,7 +365,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { var vars = std.StringHashMap(u64).init(gpa); for (self.checks.items) |chk| { - var it = mem.tokenize(u8, output, "\r\n"); + var it = mem.tokenizeAny(u8, output, "\r\n"); for (chk.actions.items) |act| { switch (act.tag) { .match => { diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 2371f49daf..6a05adc1a6 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -777,7 +777,7 @@ fn runPkgConfig(self: *Compile, lib_name: []const u8) ![]const []const u8 { var zig_args = ArrayList([]const u8).init(b.allocator); defer zig_args.deinit(); - var it = mem.tokenize(u8, stdout, " \r\n\t"); + var it = mem.tokenizeAny(u8, stdout, " \r\n\t"); while (it.next()) |tok| { if (mem.eql(u8, tok, "-I")) { const dir = it.next() orelse return error.PkgConfigInvalidOutput; @@ -2017,10 +2017,10 @@ fn execPkgConfigList(self: *std.Build, out_code: *u8) (PkgConfigError || ExecErr const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore); var list = ArrayList(PkgConfigPkg).init(self.allocator); errdefer list.deinit(); - var line_it = mem.tokenize(u8, stdout, "\r\n"); + var line_it = mem.tokenizeAny(u8, stdout, "\r\n"); while (line_it.next()) |line| { if (mem.trim(u8, line, " \t").len == 0) continue; - var tok_it = mem.tokenize(u8, line, " \t"); + var tok_it = mem.tokenizeAny(u8, line, " \t"); try list.append(PkgConfigPkg{ .name = tok_it.next() orelse return error.PkgConfigInvalidOutput, .desc = tok_it.rest(), diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index f6939e0e38..cd97367218 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -257,7 +257,7 @@ fn render_autoconf( try output.appendSlice("\n"); continue; } - var it = std.mem.tokenize(u8, line[1..], " \t\r"); + var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); const undef = it.next().?; if (!std.mem.eql(u8, undef, "undef")) { try output.appendSlice(line); @@ -304,7 +304,7 @@ fn render_cmake( try output.appendSlice("\n"); continue; } - var it = std.mem.tokenize(u8, line[1..], " \t\r"); + var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); const cmakedefine = it.next().?; if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and !std.mem.eql(u8, cmakedefine, "cmakedefine01")) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index daaa1689bc..d94f5ea000 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -850,7 +850,7 @@ pub const ChildProcess = struct { return original_err; } - var it = mem.tokenize(u16, PATH, &[_]u16{';'}); + var it = mem.tokenizeScalar(u16, PATH, ';'); while (it.next()) |search_path| { dir_buf.clearRetainingCapacity(); try dir_buf.appendSlice(self.allocator, search_path); @@ -1067,7 +1067,7 @@ fn windowsCreateProcessPathExt( // Now we know that at least *a* file matching the wildcard exists, we can loop // through PATHEXT in order and exec any that exist - var ext_it = mem.tokenize(u16, pathext, &[_]u16{';'}); + var ext_it = mem.tokenizeScalar(u16, pathext, ';'); while (ext_it.next()) |ext| { if (!windowsCreateProcessSupportsExtension(ext)) continue; diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 7327a3a913..5aeea8a4aa 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -3021,7 +3021,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 { } else if (argv0.len != 0) { // argv[0] is not empty (and not a path): search it inside PATH const PATH = std.os.getenvZ("PATH") orelse return error.FileNotFound; - var path_it = mem.tokenize(u8, PATH, &[_]u8{path.delimiter}); + var path_it = mem.tokenizeScalar(u8, PATH, path.delimiter); while (path_it.next()) |a_path| { var resolved_path_buf: [MAX_PATH_BYTES - 1:0]u8 = undefined; const resolved_path = std.fmt.bufPrintZ(&resolved_path_buf, "{s}/{s}", .{ diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 4c320ae5cf..e7a28a7615 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -358,7 +358,7 @@ pub fn windowsParsePath(path: []const u8) WindowsPath { return relative_path; } - var it = mem.tokenize(u8, path, &[_]u8{this_sep}); + var it = mem.tokenizeScalar(u8, path, this_sep); _ = (it.next() orelse return relative_path); _ = (it.next() orelse return relative_path); return WindowsPath{ @@ -420,8 +420,8 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool { const sep1 = ns1[0]; const sep2 = ns2[0]; - var it1 = mem.tokenize(u8, ns1, &[_]u8{sep1}); - var it2 = mem.tokenize(u8, ns2, &[_]u8{sep2}); + var it1 = mem.tokenizeScalar(u8, ns1, sep1); + var it2 = mem.tokenizeScalar(u8, ns2, sep2); // TODO ASCII is wrong, we actually need full unicode support to compare paths. return ascii.eqlIgnoreCase(it1.next().?, it2.next().?); @@ -441,8 +441,8 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8 const sep1 = p1[0]; const sep2 = p2[0]; - var it1 = mem.tokenize(u8, p1, &[_]u8{sep1}); - var it2 = mem.tokenize(u8, p2, &[_]u8{sep2}); + var it1 = mem.tokenizeScalar(u8, p1, sep1); + var it2 = mem.tokenizeScalar(u8, p2, sep2); // TODO ASCII is wrong, we actually need full unicode support to compare paths. return ascii.eqlIgnoreCase(it1.next().?, it2.next().?) and ascii.eqlIgnoreCase(it1.next().?, it2.next().?); @@ -535,7 +535,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { break :l disk_designator.len; }, .NetworkShare => { - var it = mem.tokenize(u8, paths[first_index], "/\\"); + var it = mem.tokenizeAny(u8, paths[first_index], "/\\"); const server_name = it.next().?; const other_name = it.next().?; @@ -570,7 +570,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { if (!correct_disk_designator) { continue; } - var it = mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\"); + var it = mem.tokenizeAny(u8, p[parsed.disk_designator.len..], "/\\"); while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; @@ -657,7 +657,7 @@ pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.E negative_count = 0; result.clearRetainingCapacity(); } - var it = mem.tokenize(u8, p, "/"); + var it = mem.tokenizeScalar(u8, p, '/'); while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; @@ -1078,8 +1078,8 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ! return resolved_to; } - var from_it = mem.tokenize(u8, resolved_from, "/\\"); - var to_it = mem.tokenize(u8, resolved_to, "/\\"); + var from_it = mem.tokenizeAny(u8, resolved_from, "/\\"); + var to_it = mem.tokenizeAny(u8, resolved_to, "/\\"); while (true) { const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest()); const to_rest = to_it.rest(); @@ -1102,7 +1102,7 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ! result_index += 3; } - var rest_it = mem.tokenize(u8, to_rest, "/\\"); + var rest_it = mem.tokenizeAny(u8, to_rest, "/\\"); while (rest_it.next()) |to_component| { result[result_index] = '\\'; result_index += 1; @@ -1124,8 +1124,8 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![] const resolved_to = try resolvePosix(allocator, &[_][]const u8{ cwd, to }); defer allocator.free(resolved_to); - var from_it = mem.tokenize(u8, resolved_from, "/"); - var to_it = mem.tokenize(u8, resolved_to, "/"); + var from_it = mem.tokenizeScalar(u8, resolved_from, '/'); + var to_it = mem.tokenizeScalar(u8, resolved_to, '/'); while (true) { const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest()); const to_rest = to_it.rest(); diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 023bdd28bc..5626864ceb 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -386,7 +386,7 @@ pub const Response = struct { }; pub fn parse(res: *Response, bytes: []const u8, trailing: bool) ParseError!void { - var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n"); + var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n"); const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 12) @@ -412,7 +412,7 @@ pub const Response = struct { else => {}, } - var line_it = mem.tokenize(u8, line, ": "); + var line_it = mem.tokenizeAny(u8, line, ": "); const header_name = line_it.next() orelse return error.HttpHeadersInvalid; const header_value = line_it.rest(); diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index 6b5db6725f..51ab6c086b 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -231,7 +231,7 @@ pub const Request = struct { }; pub fn parse(req: *Request, bytes: []const u8) ParseError!void { - var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n"); + var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n"); const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 10) @@ -265,7 +265,7 @@ pub const Request = struct { else => {}, } - var line_it = mem.tokenize(u8, line, ": "); + var line_it = mem.tokenizeAny(u8, line, ": "); const header_name = line_it.next() orelse return error.HttpHeadersInvalid; const header_value = line_it.rest(); diff --git a/lib/std/net.zig b/lib/std/net.zig index 57e50a7349..4360cc29f4 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1266,7 +1266,7 @@ fn linuxLookupNameFromHosts( var split_it = mem.split(u8, line, "#"); const no_comment_line = split_it.first(); - var line_it = mem.tokenize(u8, no_comment_line, " \t"); + var line_it = mem.tokenizeAny(u8, no_comment_line, " \t"); const ip_text = line_it.next() orelse continue; var first_name_text: ?[]const u8 = null; while (line_it.next()) |name_text| { @@ -1346,7 +1346,7 @@ fn linuxLookupNameFromDnsSearch( @memcpy(canon.items, canon_name); try canon.append('.'); - var tok_it = mem.tokenize(u8, search, " \t"); + var tok_it = mem.tokenizeAny(u8, search, " \t"); while (tok_it.next()) |tok| { canon.shrinkRetainingCapacity(canon_name.len + 1); try canon.appendSlice(tok); @@ -1468,7 +1468,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { var split = mem.split(u8, line, "#"); break :no_comment_line split.first(); }; - var line_it = mem.tokenize(u8, no_comment_line, " \t"); + var line_it = mem.tokenizeAny(u8, no_comment_line, " \t"); const token = line_it.next() orelse continue; if (mem.eql(u8, token, "options")) { diff --git a/lib/std/os.zig b/lib/std/os.zig index 779e913230..eac79690b5 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -1878,7 +1878,7 @@ pub fn execvpeZ_expandArg0( // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed // directly to the operating system in execveZ. var path_buf: [MAX_PATH_BYTES]u8 = undefined; - var it = mem.tokenize(u8, PATH, ":"); + var it = mem.tokenizeScalar(u8, PATH, ':'); var seen_eacces = false; var err: ExecveError = error.FileNotFound; diff --git a/lib/std/process.zig b/lib/std/process.zig index 504f9075eb..c33fd92db6 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1200,7 +1200,7 @@ fn totalSystemMemoryLinux() !usize { var buf: [50]u8 = undefined; const amt = try file.read(&buf); if (amt != 50) return error.Unexpected; - var it = std.mem.tokenize(u8, buf[0..amt], " \n"); + var it = std.mem.tokenizeAny(u8, buf[0..amt], " \n"); const label = it.next().?; if (!std.mem.eql(u8, label, "MemTotal:")) return error.Unexpected; const int_text = it.next() orelse return error.Unexpected; diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig index 70c795b0cf..368e3e062d 100644 --- a/lib/std/zig/system/NativePaths.zig +++ b/lib/std/zig/system/NativePaths.zig @@ -31,7 +31,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths defer allocator.free(nix_cflags_compile); is_nix = true; - var it = mem.tokenize(u8, nix_cflags_compile, " "); + var it = mem.tokenizeScalar(u8, nix_cflags_compile, ' '); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-isystem")) { @@ -62,7 +62,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths defer allocator.free(nix_ldflags); is_nix = true; - var it = mem.tokenize(u8, nix_ldflags, " "); + var it = mem.tokenizeScalar(u8, nix_ldflags, ' '); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-rpath")) { @@ -147,21 +147,21 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths // We use os.getenv here since this part won't be executed on // windows, to get rid of unnecessary error handling. if (std.os.getenv("C_INCLUDE_PATH")) |c_include_path| { - var it = mem.tokenize(u8, c_include_path, ":"); + var it = mem.tokenizeScalar(u8, c_include_path, ':'); while (it.next()) |dir| { try self.addIncludeDir(dir); } } if (std.os.getenv("CPLUS_INCLUDE_PATH")) |cplus_include_path| { - var it = mem.tokenize(u8, cplus_include_path, ":"); + var it = mem.tokenizeScalar(u8, cplus_include_path, ':'); while (it.next()) |dir| { try self.addIncludeDir(dir); } } if (std.os.getenv("LIBRARY_PATH")) |library_path| { - var it = mem.tokenize(u8, library_path, ":"); + var it = mem.tokenizeScalar(u8, library_path, ':'); while (it.next()) |dir| { try self.addLibDir(dir); } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index 539ad96365..808a1bda8d 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -354,7 +354,7 @@ fn detectAbiAndDynamicLinker( const newline = mem.indexOfScalar(u8, buffer[0..len], '\n') orelse break :blk file; const line = buffer[0..newline]; if (!mem.startsWith(u8, line, "#!")) break :blk file; - var it = mem.tokenize(u8, line[2..], " "); + var it = mem.tokenizeScalar(u8, line[2..], ' '); file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target); file.close(); } @@ -811,7 +811,7 @@ pub fn abiAndDynamicLinkerFromFile( const strtab = strtab_buf[0..strtab_read_len]; const rpath_list = mem.sliceTo(strtab, 0); - var it = mem.tokenize(u8, rpath_list, ":"); + var it = mem.tokenizeScalar(u8, rpath_list, ':'); while (it.next()) |rpath| { if (glibcVerFromRPath(rpath)) |ver| { result.target.os.version_range.linux.glibc = ver; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2dc1cc8ee4..be09a33bde 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8409,9 +8409,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; - var line_it = mem.tokenize(u8, asm_source, "\n\r;"); + var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;"); while (line_it.next()) |line| { - var mnem_it = mem.tokenize(u8, line, " \t"); + var mnem_it = mem.tokenizeAny(u8, line, " \t"); const mnem_str = mnem_it.next() orelse continue; if (mem.startsWith(u8, mnem_str, "#")) continue; @@ -8435,7 +8435,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.fail("Invalid mnemonic: '{s}'", .{mnem_str}); } }; - var op_it = mem.tokenize(u8, mnem_it.rest(), ","); + var op_it = mem.tokenizeScalar(u8, mnem_it.rest(), ','); var ops = [1]encoder.Instruction.Operand{.none} ** 4; for (&ops) |*op| { const op_str = mem.trim(u8, op_it.next() orelse break, " \t"); diff --git a/src/glibc.zig b/src/glibc.zig index 327e4f4bb9..00787381f4 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -109,7 +109,7 @@ pub fn loadMetaData(gpa: Allocator, contents: []const u8) LoadMetaDataError!*ABI const target_name = mem.sliceTo(contents[index..], 0); index += target_name.len + 1; - var component_it = mem.tokenize(u8, target_name, "-"); + var component_it = mem.tokenizeScalar(u8, target_name, '-'); const arch_name = component_it.next() orelse { log.err("abilists: expected arch name", .{}); return error.ZigInstallationCorrupt; diff --git a/src/libc_installation.zig b/src/libc_installation.zig index da877e1291..a62da6b9c7 100644 --- a/src/libc_installation.zig +++ b/src/libc_installation.zig @@ -60,7 +60,7 @@ pub const LibCInstallation = struct { const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize)); defer allocator.free(contents); - var it = std.mem.tokenize(u8, contents, "\n"); + var it = std.mem.tokenizeScalar(u8, contents, '\n'); while (it.next()) |line| { if (line.len == 0 or line[0] == '#') continue; var line_it = std.mem.split(u8, line, "="); @@ -293,7 +293,7 @@ pub const LibCInstallation = struct { }, } - var it = std.mem.tokenize(u8, exec_res.stderr, "\n\r"); + var it = std.mem.tokenizeAny(u8, exec_res.stderr, "\n\r"); var search_paths = std.ArrayList([]const u8).init(allocator); defer search_paths.deinit(); while (it.next()) |line| { @@ -613,7 +613,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 { }, } - var it = std.mem.tokenize(u8, exec_res.stdout, "\n\r"); + var it = std.mem.tokenizeAny(u8, exec_res.stdout, "\n\r"); const line = it.next() orelse return error.LibCRuntimeNotFound; // When this command fails, it returns exit code 0 and duplicates the input file name. // So we detect failure by checking if the output matches exactly the input. @@ -692,7 +692,7 @@ fn appendCcExe(args: *std.ArrayList([]const u8), skip_cc_env_var: bool) !void { return; }; // Respect space-separated flags to the C compiler. - var it = std.mem.tokenize(u8, cc_env_var, " "); + var it = std.mem.tokenizeScalar(u8, cc_env_var, ' '); while (it.next()) |arg| { try args.append(arg); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bef06d1c87..f8ac4e09c1 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -264,7 +264,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !void { const sep = std.fs.path.sep; - var it = std.mem.tokenize(u8, path, &.{sep}); + var it = std.mem.tokenizeScalar(u8, path, sep); while (it.next()) |component| { if (self.file_segments.get(component)) |num| { try a.writer().writeIntBig(u16, num); diff --git a/src/print_zir.zig b/src/print_zir.zig index f5e84fcf5b..6ded52ae9f 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -2581,7 +2581,7 @@ const Writer = struct { fn writeDocComment(self: *Writer, stream: anytype, doc_comment_index: u32) !void { if (doc_comment_index != 0) { const doc_comment = self.code.nullTerminatedString(doc_comment_index); - var it = std.mem.tokenize(u8, doc_comment, "\n"); + var it = std.mem.tokenizeScalar(u8, doc_comment, '\n'); while (it.next()) |doc_line| { try stream.writeByteNTimes(' ', self.indent); try stream.print("///{s}\n", .{doc_line}); diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig index 1eef9c7f75..297c9c7423 100644 --- a/test/behavior/bugs/6456.zig +++ b/test/behavior/bugs/6456.zig @@ -18,7 +18,7 @@ test "issue 6456" { comptime { var fields: []const StructField = &[0]StructField{}; - var it = std.mem.tokenize(u8, text, "\n"); + var it = std.mem.tokenizeScalar(u8, text, '\n'); while (it.next()) |name| { fields = fields ++ &[_]StructField{StructField{ .alignment = 0, diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 68ecebc7bd..aa5369af93 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -846,7 +846,7 @@ const TestManifest = struct { const actual_start = start orelse return error.MissingTestManifest; const manifest_bytes = bytes[actual_start..end]; - var it = std.mem.tokenize(u8, manifest_bytes, "\r\n"); + var it = std.mem.tokenizeAny(u8, manifest_bytes, "\r\n"); // First line is the test type const tt: Type = blk: { @@ -923,7 +923,7 @@ const TestManifest = struct { fn trailing(self: TestManifest) TrailingIterator { return .{ - .inner = std.mem.tokenize(u8, self.trailing_bytes, "\r\n"), + .inner = std.mem.tokenizeAny(u8, self.trailing_bytes, "\r\n"), }; } diff --git a/tools/generate_linux_syscalls.zig b/tools/generate_linux_syscalls.zig index 11b18ae3bf..32e287b434 100644 --- a/tools/generate_linux_syscalls.zig +++ b/tools/generate_linux_syscalls.zig @@ -51,11 +51,11 @@ pub fn main() !void { try writer.writeAll("pub const X86 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_32.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always i386 _ = fields.next() orelse return error.Incomplete; @@ -70,11 +70,11 @@ pub fn main() !void { try writer.writeAll("pub const X64 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_64.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; // The x32 abi syscalls are always at the end. @@ -96,11 +96,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/arm/tools/syscall.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; if (mem.eql(u8, abi, "oabi")) continue; @@ -127,11 +127,11 @@ pub fn main() !void { { try writer.writeAll("pub const Sparc64 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/sparc/kernel/syscalls/syscall.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; if (mem.eql(u8, abi, "32")) continue; @@ -151,11 +151,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_o32.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always o32 _ = fields.next() orelse return error.Incomplete; @@ -176,11 +176,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_n64.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always n64 _ = fields.next() orelse return error.Incomplete; @@ -197,11 +197,11 @@ pub fn main() !void { const table = try linux_dir.readFile("arch/powerpc/kernel/syscalls/syscall.tbl", buf); var list_64 = std.ArrayList(u8).init(allocator); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; const name = fields.next() orelse return error.Incomplete; @@ -277,9 +277,9 @@ pub fn main() !void { }, }; - var lines = mem.tokenize(u8, defines, "\n"); + var lines = mem.tokenizeScalar(u8, defines, '\n'); loop: while (lines.next()) |line| { - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const cmd = fields.next() orelse return error.Incomplete; if (!mem.eql(u8, cmd, "#define")) continue; const define = fields.next() orelse return error.Incomplete; @@ -339,9 +339,9 @@ pub fn main() !void { }, }; - var lines = mem.tokenize(u8, defines, "\n"); + var lines = mem.tokenizeScalar(u8, defines, '\n'); loop: while (lines.next()) |line| { - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const cmd = fields.next() orelse return error.Incomplete; if (!mem.eql(u8, cmd, "#define")) continue; const define = fields.next() orelse return error.Incomplete; -- cgit v1.2.3 From 2129f28953b72da2f1bb58ff063a044d737c59c4 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 4 May 2023 18:15:50 -0700 Subject: Update all std.mem.split calls to their appropriate function Everywhere that can now use `splitScalar` should get a nice little performance boost. --- build.zig | 6 +++--- doc/docgen.zig | 2 +- lib/std/Build/Step/Compile.zig | 2 +- lib/std/Build/Step/ConfigHeader.zig | 4 ++-- lib/std/SemanticVersion.zig | 10 +++++----- lib/std/builtin.zig | 2 +- lib/std/crypto/Certificate.zig | 4 ++-- lib/std/crypto/phc_encoding.zig | 9 ++++++--- lib/std/crypto/scrypt.zig | 2 +- lib/std/http/Client.zig | 2 +- lib/std/http/Server.zig | 2 +- lib/std/net.zig | 6 +++--- lib/std/process.zig | 2 +- lib/std/zig/CrossTarget.zig | 12 ++++++------ lib/std/zig/ErrorBundle.zig | 2 +- lib/std/zig/render.zig | 2 +- lib/std/zig/system/NativeTargetInfo.zig | 2 +- src/Autodoc.zig | 2 +- src/Compilation.zig | 6 +++--- src/libc_installation.zig | 2 +- src/link/MachO/Dylib.zig | 2 +- src/main.zig | 16 ++++++++-------- test/src/Cases.zig | 6 +++--- test/src/check-stack-trace.zig | 2 +- tools/gen_outline_atomics.zig | 2 +- tools/update_crc_catalog.zig | 2 +- tools/update_spirv_features.zig | 2 +- 27 files changed, 58 insertions(+), 55 deletions(-) (limited to 'test/src') diff --git a/build.zig b/build.zig index 21b323df56..e7e3f17a82 100644 --- a/build.zig +++ b/build.zig @@ -239,7 +239,7 @@ pub fn build(b: *std.Build) !void { }, 2 => { // Untagged development build (e.g. 0.10.0-dev.2025+ecf0050a9). - var it = mem.split(u8, git_describe, "-"); + var it = mem.splitScalar(u8, git_describe, '-'); const tagged_ancestor = it.first(); const commit_height = it.next().?; const commit_id = it.next().?; @@ -859,14 +859,14 @@ fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig { while (lines_it.next()) |line| { inline for (mappings) |mapping| { if (mem.startsWith(u8, line, mapping.prefix)) { - var it = mem.split(u8, line, "\""); + var it = mem.splitScalar(u8, line, '"'); _ = it.first(); // skip the stuff before the quote const quoted = it.next().?; // the stuff inside the quote @field(ctx, mapping.field) = toNativePathSep(b, quoted); } } if (mem.startsWith(u8, line, "#define ZIG_LLVM_LINK_MODE ")) { - var it = mem.split(u8, line, "\""); + var it = mem.splitScalar(u8, line, '"'); _ = it.next().?; // skip the stuff before the quote const quoted = it.next().?; // the stuff inside the quote ctx.llvm_linkage = if (mem.eql(u8, quoted, "shared")) .dynamic else .static; diff --git a/doc/docgen.zig b/doc/docgen.zig index 07636fd152..82f2d9b199 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -1223,7 +1223,7 @@ fn printShell(out: anytype, shell_content: []const u8, escape: bool) !void { const trimmed_shell_content = mem.trim(u8, shell_content, " \n"); try out.writeAll("
Shell
");
     var cmd_cont: bool = false;
-    var iter = std.mem.split(u8, trimmed_shell_content, "\n");
+    var iter = std.mem.splitScalar(u8, trimmed_shell_content, '\n');
     while (iter.next()) |orig_line| {
         const line = mem.trimRight(u8, orig_line, " ");
         if (!cmd_cont and line.len > 1 and mem.eql(u8, line[0..2], "$ ") and line[line.len - 1] != '\\') {
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 6a05adc1a6..3355d7ccc5 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -2140,7 +2140,7 @@ fn checkCompileErrors(self: *Compile) !void {
     // Render the expected lines into a string that we can compare verbatim.
     var expected_generated = std.ArrayList(u8).init(arena);
 
-    var actual_line_it = mem.split(u8, actual_stderr, "\n");
+    var actual_line_it = mem.splitScalar(u8, actual_stderr, '\n');
     for (self.expect_errors) |expect_line| {
         const actual_line = actual_line_it.next() orelse {
             try expected_generated.appendSlice(expect_line);
diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig
index cd97367218..4b76e24b26 100644
--- a/lib/std/Build/Step/ConfigHeader.zig
+++ b/lib/std/Build/Step/ConfigHeader.zig
@@ -250,7 +250,7 @@ fn render_autoconf(
 
     var any_errors = false;
     var line_index: u32 = 0;
-    var line_it = std.mem.split(u8, contents, "\n");
+    var line_it = std.mem.splitScalar(u8, contents, '\n');
     while (line_it.next()) |line| : (line_index += 1) {
         if (!std.mem.startsWith(u8, line, "#")) {
             try output.appendSlice(line);
@@ -297,7 +297,7 @@ fn render_cmake(
 
     var any_errors = false;
     var line_index: u32 = 0;
-    var line_it = std.mem.split(u8, contents, "\n");
+    var line_it = std.mem.splitScalar(u8, contents, '\n');
     while (line_it.next()) |line| : (line_index += 1) {
         if (!std.mem.startsWith(u8, line, "#")) {
             try output.appendSlice(line);
diff --git a/lib/std/SemanticVersion.zig b/lib/std/SemanticVersion.zig
index 26f6f581c8..4d505b4e30 100644
--- a/lib/std/SemanticVersion.zig
+++ b/lib/std/SemanticVersion.zig
@@ -42,8 +42,8 @@ pub fn order(lhs: Version, rhs: Version) std.math.Order {
     if (lhs.pre == null and rhs.pre != null) return .gt;
 
     // Iterate over pre-release identifiers until a difference is found.
-    var lhs_pre_it = std.mem.split(u8, lhs.pre.?, ".");
-    var rhs_pre_it = std.mem.split(u8, rhs.pre.?, ".");
+    var lhs_pre_it = std.mem.splitScalar(u8, lhs.pre.?, '.');
+    var rhs_pre_it = std.mem.splitScalar(u8, rhs.pre.?, '.');
     while (true) {
         const next_lid = lhs_pre_it.next();
         const next_rid = rhs_pre_it.next();
@@ -86,7 +86,7 @@ pub fn parse(text: []const u8) !Version {
     // Parse the required major, minor, and patch numbers.
     const extra_index = std.mem.indexOfAny(u8, text, "-+");
     const required = text[0..(extra_index orelse text.len)];
-    var it = std.mem.split(u8, required, ".");
+    var it = std.mem.splitScalar(u8, required, '.');
     var ver = Version{
         .major = try parseNum(it.first()),
         .minor = try parseNum(it.next() orelse return error.InvalidVersion),
@@ -108,7 +108,7 @@ pub fn parse(text: []const u8) !Version {
     // Check validity of optional pre-release identifiers.
     // See: https://semver.org/#spec-item-9
     if (ver.pre) |pre| {
-        it = std.mem.split(u8, pre, ".");
+        it = std.mem.splitScalar(u8, pre, '.');
         while (it.next()) |id| {
             // Identifiers MUST NOT be empty.
             if (id.len == 0) return error.InvalidVersion;
@@ -127,7 +127,7 @@ pub fn parse(text: []const u8) !Version {
     // Check validity of optional build metadata identifiers.
     // See: https://semver.org/#spec-item-10
     if (ver.build) |build| {
-        it = std.mem.split(u8, build, ".");
+        it = std.mem.splitScalar(u8, build, '.');
         while (it.next()) |id| {
             // Identifiers MUST NOT be empty.
             if (id.len == 0) return error.InvalidVersion;
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 684432bd40..7fe295123e 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -531,7 +531,7 @@ pub const Version = struct {
         // found no digits or '.' before unexpected character
         if (end == 0) return error.InvalidVersion;
 
-        var it = std.mem.split(u8, text[0..end], ".");
+        var it = std.mem.splitScalar(u8, text[0..end], '.');
         // substring is not empty, first call will succeed
         const major = it.first();
         if (major.len == 0) return error.InvalidVersion;
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 113d169cbc..3da4269ba2 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -337,8 +337,8 @@ pub const Parsed = struct {
             return true; // exact match
         }
 
-        var it_host = std.mem.split(u8, host_name, ".");
-        var it_dns = std.mem.split(u8, dns_name, ".");
+        var it_host = std.mem.splitScalar(u8, host_name, '.');
+        var it_dns = std.mem.splitScalar(u8, dns_name, '.');
 
         const len_match = while (true) {
             const host = it_host.next();
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index cc0f10e395..1eeee39a5a 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -7,9 +7,12 @@ const mem = std.mem;
 const meta = std.meta;
 
 const fields_delimiter = "$";
+const fields_delimiter_scalar = '$';
 const version_param_name = "v";
 const params_delimiter = ",";
+const params_delimiter_scalar = ',';
 const kv_delimiter = "=";
+const kv_delimiter_scalar = '=';
 
 pub const Error = std.crypto.errors.EncodingError || error{NoSpaceLeft};
 
@@ -73,7 +76,7 @@ pub fn BinValue(comptime max_len: usize) type {
 /// Other fields will also be deserialized from the function parameters section.
 pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult {
     var out = mem.zeroes(HashResult);
-    var it = mem.split(u8, str, fields_delimiter);
+    var it = mem.splitScalar(u8, str, fields_delimiter_scalar);
     var set_fields: usize = 0;
 
     while (true) {
@@ -104,7 +107,7 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
 
         // Read optional parameters
         var has_params = false;
-        var it_params = mem.split(u8, field, params_delimiter);
+        var it_params = mem.splitScalar(u8, field, params_delimiter_scalar);
         while (it_params.next()) |params| {
             const param = kvSplit(params) catch break;
             var found = false;
@@ -252,7 +255,7 @@ fn serializeTo(params: anytype, out: anytype) !void {
 
 // Split a `key=value` string into `key` and `value`
 fn kvSplit(str: []const u8) !struct { key: []const u8, value: []const u8 } {
-    var it = mem.split(u8, str, kv_delimiter);
+    var it = mem.splitScalar(u8, str, kv_delimiter_scalar);
     const key = it.first();
     const value = it.next() orelse return Error.InvalidEncoding;
     const ret = .{ .key = key, .value = value };
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index 077de3b510..b8e8ef55e2 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -287,7 +287,7 @@ const crypt_format = struct {
         out.r = try Codec.intDecode(u30, str[4..9]);
         out.p = try Codec.intDecode(u30, str[9..14]);
 
-        var it = mem.split(u8, str[14..], "$");
+        var it = mem.splitScalar(u8, str[14..], '$');
 
         const salt = it.first();
         if (@hasField(T, "salt")) out.salt = salt;
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 5626864ceb..9aaf13d333 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -426,7 +426,7 @@ pub const Response = struct {
             } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
                 // Transfer-Encoding: second, first
                 // Transfer-Encoding: deflate, chunked
-                var iter = mem.splitBackwards(u8, header_value, ",");
+                var iter = mem.splitBackwardsScalar(u8, header_value, ',');
 
                 if (iter.next()) |first| {
                     const trimmed = mem.trim(u8, first, " ");
diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig
index 51ab6c086b..1af64805b3 100644
--- a/lib/std/http/Server.zig
+++ b/lib/std/http/Server.zig
@@ -277,7 +277,7 @@ pub const Request = struct {
             } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
                 // Transfer-Encoding: second, first
                 // Transfer-Encoding: deflate, chunked
-                var iter = mem.splitBackwards(u8, header_value, ",");
+                var iter = mem.splitBackwardsScalar(u8, header_value, ',');
 
                 if (iter.next()) |first| {
                     const trimmed = mem.trim(u8, first, " ");
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 4360cc29f4..4e72ed3256 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1263,7 +1263,7 @@ fn linuxLookupNameFromHosts(
         },
         else => |e| return e,
     }) |line| {
-        var split_it = mem.split(u8, line, "#");
+        var split_it = mem.splitScalar(u8, line, '#');
         const no_comment_line = split_it.first();
 
         var line_it = mem.tokenizeAny(u8, no_comment_line, " \t");
@@ -1465,7 +1465,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
         else => |e| return e,
     }) |line| {
         const no_comment_line = no_comment_line: {
-            var split = mem.split(u8, line, "#");
+            var split = mem.splitScalar(u8, line, '#');
             break :no_comment_line split.first();
         };
         var line_it = mem.tokenizeAny(u8, no_comment_line, " \t");
@@ -1473,7 +1473,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
         const token = line_it.next() orelse continue;
         if (mem.eql(u8, token, "options")) {
             while (line_it.next()) |sub_tok| {
-                var colon_it = mem.split(u8, sub_tok, ":");
+                var colon_it = mem.splitScalar(u8, sub_tok, ':');
                 const name = colon_it.first();
                 const value_txt = colon_it.next() orelse continue;
                 const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) {
diff --git a/lib/std/process.zig b/lib/std/process.zig
index c33fd92db6..80be705187 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -310,7 +310,7 @@ pub fn getEnvMap(allocator: Allocator) !EnvMap {
 
         for (environ) |env| {
             const pair = mem.sliceTo(env, 0);
-            var parts = mem.split(u8, pair, "=");
+            var parts = mem.splitScalar(u8, pair, '=');
             const key = parts.first();
             const value = parts.rest();
             try result.put(key, value);
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 93b6d97d75..d072a4dbad 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -239,7 +239,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
         .dynamic_linker = DynamicLinker.init(args.dynamic_linker),
     };
 
-    var it = mem.split(u8, args.arch_os_abi, "-");
+    var it = mem.splitScalar(u8, args.arch_os_abi, '-');
     const arch_name = it.first();
     const arch_is_native = mem.eql(u8, arch_name, "native");
     if (!arch_is_native) {
@@ -257,7 +257,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
 
     const opt_abi_text = it.next();
     if (opt_abi_text) |abi_text| {
-        var abi_it = mem.split(u8, abi_text, ".");
+        var abi_it = mem.splitScalar(u8, abi_text, '.');
         const abi = std.meta.stringToEnum(Target.Abi, abi_it.first()) orelse
             return error.UnknownApplicationBinaryInterface;
         result.abi = abi;
@@ -343,7 +343,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
 /// This is intended to be used if the API user of CrossTarget needs to learn the
 /// target CPU architecture in order to fully populate `ParseOptions`.
 pub fn parseCpuArch(args: ParseOptions) ?Target.Cpu.Arch {
-    var it = mem.split(u8, args.arch_os_abi, "-");
+    var it = mem.splitScalar(u8, args.arch_os_abi, '-');
     const arch_name = it.first();
     const arch_is_native = mem.eql(u8, arch_name, "native");
     if (arch_is_native) {
@@ -645,7 +645,7 @@ pub fn updateCpuFeatures(self: CrossTarget, set: *Target.Cpu.Feature.Set) void {
 }
 
 fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const u8) !void {
-    var it = mem.split(u8, text, ".");
+    var it = mem.splitScalar(u8, text, '.');
     const os_name = it.first();
     diags.os_name = os_name;
     const os_is_native = mem.eql(u8, os_name, "native");
@@ -706,7 +706,7 @@ fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const
         .linux,
         .dragonfly,
         => {
-            var range_it = mem.split(u8, version_text, "...");
+            var range_it = mem.splitFull(u8, version_text, "...");
 
             const min_text = range_it.next().?;
             const min_ver = SemVer.parse(min_text) catch |err| switch (err) {
@@ -726,7 +726,7 @@ fn parseOs(result: *CrossTarget, diags: *ParseOptions.Diagnostics, text: []const
         },
 
         .windows => {
-            var range_it = mem.split(u8, version_text, "...");
+            var range_it = mem.splitFull(u8, version_text, "...");
 
             const min_text = range_it.first();
             const min_ver = std.meta.stringToEnum(Target.Os.WindowsVersion, min_text) orelse
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
index ffe748203e..fe0ae3c0f6 100644
--- a/lib/std/zig/ErrorBundle.zig
+++ b/lib/std/zig/ErrorBundle.zig
@@ -294,7 +294,7 @@ fn renderErrorMessageToWriter(
 ///
 /// This is used to split the message in `@compileError("hello\nworld")` for example.
 fn writeMsg(eb: ErrorBundle, err_msg: ErrorMessage, stderr: anytype, indent: usize) !void {
-    var lines = std.mem.split(u8, eb.nullTerminatedString(err_msg.msg), "\n");
+    var lines = std.mem.splitScalar(u8, eb.nullTerminatedString(err_msg.msg), '\n');
     while (lines.next()) |line| {
         try stderr.writeAll(line);
         if (lines.index == null) break;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index e1ccc8e0e8..83fa68567f 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1995,7 +1995,7 @@ fn renderArrayInit(
             if (!expr_newlines[i]) {
                 try ais.writer().writeAll(expr_text);
             } else {
-                var by_line = std.mem.split(u8, expr_text, "\n");
+                var by_line = std.mem.splitScalar(u8, expr_text, '\n');
                 var last_line_was_empty = false;
                 try ais.writer().writeAll(by_line.first());
                 while (by_line.next()) |line| {
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index 808a1bda8d..5cbc274ffd 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -556,7 +556,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version {
     const dynstr_size = @intCast(usize, dynstr.size);
     const dynstr_bytes = buf[0..dynstr_size];
     _ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
-    var it = mem.split(u8, dynstr_bytes, &.{0});
+    var it = mem.splitScalar(u8, dynstr_bytes, 0);
     var max_ver: std.builtin.Version = .{ .major = 2, .minor = 2, .patch = 5 };
     while (it.next()) |s| {
         if (mem.startsWith(u8, s, "GLIBC_2.")) {
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 5e1c4c7822..ce6bc2e7d9 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -4951,7 +4951,7 @@ fn findGuidePaths(self: *Autodoc, file: *File, str: []const u8) ![]const u8 {
 
     // TODO: this algo is kinda inefficient
 
-    var it = std.mem.split(u8, str, "\n");
+    var it = std.mem.splitScalar(u8, str, '\n');
     while (it.next()) |line| {
         const trimmed_line = std.mem.trim(u8, line, " ");
         if (std.mem.startsWith(u8, trimmed_line, guide_prefix)) {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 1b6d805bb3..7599d8a80f 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -4636,7 +4636,7 @@ pub fn hasSharedLibraryExt(filename: []const u8) bool {
         return true;
     }
     // Look for .so.X, .so.X.Y, .so.X.Y.Z
-    var it = mem.split(u8, filename, ".");
+    var it = mem.splitScalar(u8, filename, '.');
     _ = it.first();
     var so_txt = it.next() orelse return false;
     while (!mem.eql(u8, so_txt, "so")) {
@@ -5016,14 +5016,14 @@ fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []con
     defer context_lines.deinit();
 
     var current_err: ?*LldError = null;
-    var lines = mem.split(u8, stderr, std.cstr.line_sep);
+    var lines = mem.splitFull(u8, stderr, std.cstr.line_sep);
     while (lines.next()) |line| {
         if (mem.startsWith(u8, line, prefix ++ ":")) {
             if (current_err) |err| {
                 err.context_lines = try context_lines.toOwnedSlice();
             }
 
-            var split = std.mem.split(u8, line, "error: ");
+            var split = std.mem.splitFull(u8, line, "error: ");
             _ = split.first();
 
             const duped_msg = try std.fmt.allocPrint(comp.gpa, "{s}: {s}", .{ prefix, split.rest() });
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index a62da6b9c7..355c3bad8d 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -63,7 +63,7 @@ pub const LibCInstallation = struct {
         var it = std.mem.tokenizeScalar(u8, contents, '\n');
         while (it.next()) |line| {
             if (line.len == 0 or line[0] == '#') continue;
-            var line_it = std.mem.split(u8, line, "=");
+            var line_it = std.mem.splitScalar(u8, line, '=');
             const name = line_it.first();
             const value = line_it.rest();
             inline for (fields, 0..) |field, i| {
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 863f1e805a..971706dae6 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -91,7 +91,7 @@ pub const Id = struct {
         var out: u32 = 0;
         var values: [3][]const u8 = undefined;
 
-        var split = mem.split(u8, string, ".");
+        var split = mem.splitScalar(u8, string, '.');
         var count: u4 = 0;
         while (split.next()) |value| {
             if (count > 2) {
diff --git a/src/main.zig b/src/main.zig
index c61e3f3719..82269ebc93 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -976,7 +976,7 @@ fn buildOutputType(
                         }
                     } else if (mem.eql(u8, arg, "--mod")) {
                         const info = args_iter.nextOrFatal();
-                        var info_it = mem.split(u8, info, ":");
+                        var info_it = mem.splitScalar(u8, info, ':');
                         const mod_name = info_it.next() orelse fatal("expected non-empty argument after {s}", .{arg});
                         const deps_str = info_it.next() orelse fatal("expected 'name:deps:path' after {s}", .{arg});
                         const root_src_orig = info_it.rest();
@@ -1176,7 +1176,7 @@ fn buildOutputType(
                         } else {
                             if (build_options.omit_pkg_fetching_code) unreachable;
                             // example: --listen 127.0.0.1:9000
-                            var it = std.mem.split(u8, next_arg, ":");
+                            var it = std.mem.splitScalar(u8, next_arg, ':');
                             const host = it.next().?;
                             const port_text = it.next() orelse "14735";
                             const port = std.fmt.parseInt(u16, port_text, 10) catch |err|
@@ -1673,7 +1673,7 @@ fn buildOutputType(
                     },
                     .rdynamic => rdynamic = true,
                     .wl => {
-                        var split_it = mem.split(u8, it.only_arg, ",");
+                        var split_it = mem.splitScalar(u8, it.only_arg, ',');
                         while (split_it.next()) |linker_arg| {
                             // Handle nested-joined args like `-Wl,-rpath=foo`.
                             // Must be prefixed with 1 or 2 dashes.
@@ -2183,17 +2183,17 @@ fn buildOutputType(
                     const next_arg = linker_args_it.nextOrFatal();
                     try symbol_wrap_set.put(arena, next_arg, {});
                 } else if (mem.startsWith(u8, arg, "/subsystem:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     subsystem = try parseSubSystem(split_it.first());
                 } else if (mem.startsWith(u8, arg, "/implib:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     emit_implib = .{ .yes = split_it.first() };
                     emit_implib_arg_provided = true;
                 } else if (mem.startsWith(u8, arg, "/pdb:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     pdb_out_path = split_it.first();
                 } else if (mem.startsWith(u8, arg, "/version:")) {
-                    var split_it = mem.splitBackwards(u8, arg, ":");
+                    var split_it = mem.splitBackwardsScalar(u8, arg, ':');
                     const version_arg = split_it.first();
                     version = std.builtin.Version.parse(version_arg) catch |err| {
                         fatal("unable to parse /version '{s}': {s}", .{ arg, @errorName(err) });
@@ -3534,7 +3534,7 @@ const ModuleDepIterator = struct {
     split: mem.SplitIterator(u8, .scalar),
 
     fn init(deps_str: []const u8) ModuleDepIterator {
-        return .{ .split = mem.split(u8, deps_str, ",") };
+        return .{ .split = mem.splitScalar(u8, deps_str, ',') };
     }
 
     const Dependency = struct {
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index aa5369af93..7188e20859 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -877,7 +877,7 @@ const TestManifest = struct {
             if (trimmed.len == 0) break;
 
             // Parse key=value(s)
-            var kv_it = std.mem.split(u8, trimmed, "=");
+            var kv_it = std.mem.splitScalar(u8, trimmed, '=');
             const key = kv_it.first();
             try manifest.config_map.putNoClobber(key, kv_it.next() orelse return error.MissingValuesForConfig);
         }
@@ -895,7 +895,7 @@ const TestManifest = struct {
     ) ConfigValueIterator(T) {
         const bytes = self.config_map.get(key) orelse TestManifestConfigDefaults.get(self.type, key);
         return ConfigValueIterator(T){
-            .inner = std.mem.split(u8, bytes, ","),
+            .inner = std.mem.splitScalar(u8, bytes, ','),
         };
     }
 
@@ -1399,7 +1399,7 @@ fn runOneCase(
                 // Render the expected lines into a string that we can compare verbatim.
                 var expected_generated = std.ArrayList(u8).init(arena);
 
-                var actual_line_it = std.mem.split(u8, actual_stderr.items, "\n");
+                var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n');
                 for (expected_errors) |expect_line| {
                     const actual_line = actual_line_it.next() orelse {
                         try expected_generated.appendSlice(expect_line);
diff --git a/test/src/check-stack-trace.zig b/test/src/check-stack-trace.zig
index bb1db55076..9856b5738e 100644
--- a/test/src/check-stack-trace.zig
+++ b/test/src/check-stack-trace.zig
@@ -27,7 +27,7 @@ pub fn main() !void {
         var buf = std.ArrayList(u8).init(arena);
         defer buf.deinit();
         if (stderr.len != 0 and stderr[stderr.len - 1] == '\n') stderr = stderr[0 .. stderr.len - 1];
-        var it = mem.split(u8, stderr, "\n");
+        var it = mem.splitScalar(u8, stderr, '\n');
         process_lines: while (it.next()) |line| {
             if (line.len == 0) continue;
 
diff --git a/tools/gen_outline_atomics.zig b/tools/gen_outline_atomics.zig
index c04591d032..0cfdacffd1 100644
--- a/tools/gen_outline_atomics.zig
+++ b/tools/gen_outline_atomics.zig
@@ -88,7 +88,7 @@ fn writeFunction(
         \\    asm volatile (
         \\
     );
-    var iter = std.mem.split(u8, body, "\n");
+    var iter = std.mem.splitScalar(u8, body, '\n');
     while (iter.next()) |line| {
         try w.writeAll("        \\\\");
         try w.writeAll(line);
diff --git a/tools/update_crc_catalog.zig b/tools/update_crc_catalog.zig
index 034b7afc9d..2fdb9b030f 100644
--- a/tools/update_crc_catalog.zig
+++ b/tools/update_crc_catalog.zig
@@ -78,7 +78,7 @@ pub fn main() anyerror!void {
         var residue: []const u8 = undefined;
         var name: []const u8 = undefined;
 
-        var it = mem.split(u8, line, "  ");
+        var it = mem.splitFull(u8, line, "  ");
         while (it.next()) |property| {
             const i = mem.indexOf(u8, property, "=").?;
             const key = property[0..i];
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 8d398f58de..766a9b7a24 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -19,7 +19,7 @@ const Version = struct {
     minor: u32,
 
     fn parse(str: []const u8) !Version {
-        var it = std.mem.split(u8, str, ".");
+        var it = std.mem.splitScalar(u8, str, '.');
 
         const major = it.first();
         const minor = it.next() orelse return error.InvalidVersion;
-- 
cgit v1.2.3


From 3db3cf77904e664d589287602c14168a7a63f125 Mon Sep 17 00:00:00 2001
From: Ali Chraghi 
Date: Tue, 23 May 2023 15:33:12 +0330
Subject: std.sort: add pdqsort and heapsort

---
 lib/std/compress/deflate/huffman_code.zig     |    4 +-
 lib/std/compress/zstandard/decode/fse.zig     |    2 +-
 lib/std/compress/zstandard/decode/huffman.zig |    2 +-
 lib/std/comptime_string_map.zig               |    2 +-
 lib/std/debug.zig                             |    2 +-
 lib/std/enums.zig                             |    2 +-
 lib/std/http/Headers.zig                      |    2 +-
 lib/std/mem.zig                               |   28 +
 lib/std/meta.zig                              |    2 +-
 lib/std/multi_array_list.zig                  |    7 +-
 lib/std/net.zig                               |    2 +-
 lib/std/sort.zig                              | 1471 ++++---------------------
 lib/std/sort/block.zig                        | 1066 ++++++++++++++++++
 lib/std/sort/pdq.zig                          |  331 ++++++
 src/Compilation.zig                           |    2 +-
 src/Package.zig                               |    2 +-
 src/RangeSet.zig                              |    2 +-
 src/Sema.zig                                  |    2 +-
 src/arch/x86_64/CodeGen.zig                   |    2 +-
 src/arch/x86_64/Encoding.zig                  |    2 +-
 src/codegen/c/type.zig                        |    2 +-
 src/link/Coff.zig                             |    2 +-
 src/link/MachO/Object.zig                     |    8 +-
 src/link/MachO/UnwindInfo.zig                 |    2 +-
 src/link/MachO/dyld_info/Rebase.zig           |    2 +-
 src/link/MachO/dyld_info/bind.zig             |    2 +-
 src/link/MachO/zld.zig                        |    4 +-
 src/link/Wasm.zig                             |   10 +-
 src/objcopy.zig                               |    4 +-
 test/src/Cases.zig                            |    2 +-
 tools/gen_stubs.zig                           |    2 +-
 tools/generate_JSONTestSuite.zig              |    2 +-
 tools/process_headers.zig                     |    2 +-
 tools/update-linux-headers.zig                |    2 +-
 tools/update_clang_options.zig                |    2 +-
 tools/update_cpu_features.zig                 |    8 +-
 tools/update_spirv_features.zig               |    2 +-
 37 files changed, 1702 insertions(+), 1291 deletions(-)
 create mode 100644 lib/std/sort/block.zig
 create mode 100644 lib/std/sort/pdq.zig

(limited to 'test/src')

diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig
index 4827feb245..689ac1441a 100644
--- a/lib/std/compress/deflate/huffman_code.zig
+++ b/lib/std/compress/deflate/huffman_code.zig
@@ -93,7 +93,7 @@ pub const HuffmanEncoder = struct {
             return;
         }
         self.lfs = list;
-        sort.sort(LiteralNode, self.lfs, {}, byFreq);
+        mem.sort(LiteralNode, self.lfs, {}, byFreq);
 
         // Get the number of literals for each bit count
         var bit_count = self.bitCounts(list, max_bits);
@@ -270,7 +270,7 @@ pub const HuffmanEncoder = struct {
             var chunk = list[list.len - @intCast(u32, bits) ..];
 
             self.lns = chunk;
-            sort.sort(LiteralNode, self.lns, {}, byLiteral);
+            mem.sort(LiteralNode, self.lns, {}, byLiteral);
 
             for (chunk) |node| {
                 self.codes[node.literal] = HuffCode{
diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig
index 741fd81ccc..232af39ccf 100644
--- a/lib/std/compress/zstandard/decode/fse.zig
+++ b/lib/std/compress/zstandard/decode/fse.zig
@@ -107,7 +107,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
                 position &= entries.len - 1;
             }
         }
-        std.sort.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
+        std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
         for (0..probability) |i| {
             entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
                 .symbol = @intCast(u8, symbol),
diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig
index 2914198268..f5e977d0da 100644
--- a/lib/std/compress/zstandard/decode/huffman.zig
+++ b/lib/std/compress/zstandard/decode/huffman.zig
@@ -124,7 +124,7 @@ fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.P
         };
     }
 
-    std.sort.sort(
+    std.mem.sort(
         LiteralsSection.HuffmanTree.PrefixedSymbol,
         weight_sorted_prefixed_symbols,
         weights,
diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig
index 7620ec7af8..e6859c32c1 100644
--- a/lib/std/comptime_string_map.zig
+++ b/lib/std/comptime_string_map.zig
@@ -28,7 +28,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs_list: anytype) type {
                 sorted_kvs[i] = .{ .key = kv.@"0", .value = {} };
             }
         }
-        std.sort.sort(KV, &sorted_kvs, {}, lenAsc);
+        mem.sort(KV, &sorted_kvs, {}, lenAsc);
         const min_len = sorted_kvs[0].key.len;
         const max_len = sorted_kvs[sorted_kvs.len - 1].key.len;
         var len_indexes: [max_len + 1]usize = undefined;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index ecc1a9f0cf..005c2b5404 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1211,7 +1211,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
     // Even though lld emits symbols in ascending order, this debug code
     // should work for programs linked in any valid way.
     // This sort is so that we can binary search later.
-    std.sort.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan);
+    mem.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan);
 
     return ModuleDebugInfo{
         .base_address = undefined,
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index aa6edd60b1..757c616b9b 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -1314,7 +1314,7 @@ pub fn EnumIndexer(comptime E: type) type {
             }
         };
     }
-    std.sort.sort(EnumField, &fields, {}, ascByValue);
+    std.mem.sort(EnumField, &fields, {}, ascByValue);
     const min = fields[0].value;
     const max = fields[fields.len - 1].value;
     const fields_len = fields.len;
diff --git a/lib/std/http/Headers.zig b/lib/std/http/Headers.zig
index 429df9368a..fb7a9360d8 100644
--- a/lib/std/http/Headers.zig
+++ b/lib/std/http/Headers.zig
@@ -191,7 +191,7 @@ pub const Headers = struct {
 
     /// Sorts the headers in lexicographical order.
     pub fn sort(headers: *Headers) void {
-        std.sort.sort(Field, headers.list.items, {}, Field.lessThan);
+        std.mem.sort(Field, headers.list.items, {}, Field.lessThan);
         headers.rebuildIndex();
     }
 
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 8cb2c00a3a..311c97c254 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -566,6 +566,34 @@ test "zeroInit" {
     }, nested_baz);
 }
 
+pub fn sort(
+    comptime T: type,
+    items: []T,
+    context: anytype,
+    comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    std.sort.block(T, items, context, lessThanFn);
+}
+
+pub fn sortUnstable(
+    comptime T: type,
+    items: []T,
+    context: anytype,
+    comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    std.sort.pdq(T, items, context, lessThanFn);
+}
+
+/// TODO: currently this just calls `insertionSortContext`. The block sort implementation
+/// in this file needs to be adapted to use the sort context.
+pub fn sortContext(a: usize, b: usize, context: anytype) void {
+    std.sort.insertionContext(a, b, context);
+}
+
+pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void {
+    std.sort.pdqContext(a, b, context);
+}
+
 /// Compares two slices of numbers lexicographically. O(n).
 pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order {
     const n = math.min(lhs.len, rhs.len);
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index 8adba2439a..d0b07b934f 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -985,7 +985,7 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
         for (decls, 0..) |decl, i| {
             array[i] = &@field(Namespace, decl.name);
         }
-        std.sort.sort(*const Decl, &array, {}, S.declNameLessThan);
+        mem.sort(*const Decl, &array, {}, S.declNameLessThan);
         return &array;
     }
 }
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 322471bedf..44e226be33 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -160,7 +160,7 @@ pub fn MultiArrayList(comptime T: type) type {
                     return lhs.alignment > rhs.alignment;
                 }
             };
-            std.sort.sort(Data, &data, {}, Sort.lessThan);
+            mem.sort(Data, &data, {}, Sort.lessThan);
             var sizes_bytes: [fields.len]usize = undefined;
             var field_indexes: [fields.len]usize = undefined;
             for (data, 0..) |elem, i| {
@@ -488,10 +488,7 @@ pub fn MultiArrayList(comptime T: type) type {
                 }
             };
 
-            std.sort.sortContext(self.len, SortContext{
-                .sub_ctx = ctx,
-                .slice = self.slice(),
-            });
+            mem.sortContext(0, self.len, SortContext{ .sub_ctx = ctx, .slice = self.slice() });
         }
 
         fn capacityInBytes(capacity: usize) usize {
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 57e50a7349..7629ecc8f7 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1082,7 +1082,7 @@ fn linuxLookupName(
         key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT;
         addr.sortkey = key;
     }
-    std.sort.sort(LookupAddr, addrs.items, {}, addrCmpLessThan);
+    mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan);
 }
 
 const Policy = struct {
diff --git a/lib/std/sort.zig b/lib/std/sort.zig
index 3e219b8566..bf2bf40f89 100644
--- a/lib/std/sort.zig
+++ b/lib/std/sort.zig
@@ -4,1241 +4,152 @@ const testing = std.testing;
 const mem = std.mem;
 const math = std.math;
 
-pub fn binarySearch(
-    comptime T: type,
-    key: anytype,
-    items: []const T,
-    context: anytype,
-    comptime compareFn: fn (context: @TypeOf(context), key: @TypeOf(key), mid_item: T) math.Order,
-) ?usize {
-    var left: usize = 0;
-    var right: usize = items.len;
-
-    while (left < right) {
-        // Avoid overflowing in the midpoint calculation
-        const mid = left + (right - left) / 2;
-        // Compare the key with the midpoint element
-        switch (compareFn(context, key, items[mid])) {
-            .eq => return mid,
-            .gt => left = mid + 1,
-            .lt => right = mid,
-        }
-    }
-
-    return null;
-}
-
-test "binarySearch" {
-    const S = struct {
-        fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
-            _ = context;
-            return math.order(lhs, rhs);
-        }
-        fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
-            _ = context;
-            return math.order(lhs, rhs);
-        }
-    };
-    try testing.expectEqual(
-        @as(?usize, null),
-        binarySearch(u32, @as(u32, 1), &[_]u32{}, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, 0),
-        binarySearch(u32, @as(u32, 1), &[_]u32{1}, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, null),
-        binarySearch(u32, @as(u32, 1), &[_]u32{0}, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, null),
-        binarySearch(u32, @as(u32, 0), &[_]u32{1}, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, 4),
-        binarySearch(u32, @as(u32, 5), &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, 0),
-        binarySearch(u32, @as(u32, 2), &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32),
-    );
-    try testing.expectEqual(
-        @as(?usize, 1),
-        binarySearch(i32, @as(i32, -4), &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32),
-    );
-    try testing.expectEqual(
-        @as(?usize, 3),
-        binarySearch(i32, @as(i32, 98), &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32),
-    );
-    const R = struct {
-        b: i32,
-        e: i32,
-
-        fn r(b: i32, e: i32) @This() {
-            return @This(){ .b = b, .e = e };
-        }
-
-        fn order(context: void, key: i32, mid_item: @This()) math.Order {
-            _ = context;
-
-            if (key < mid_item.b) {
-                return .lt;
-            }
-
-            if (key > mid_item.e) {
-                return .gt;
-            }
-
-            return .eq;
-        }
-    };
-    try testing.expectEqual(
-        @as(?usize, null),
-        binarySearch(R, @as(i32, -45), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
-    );
-    try testing.expectEqual(
-        @as(?usize, 2),
-        binarySearch(R, @as(i32, 10), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
-    );
-    try testing.expectEqual(
-        @as(?usize, 1),
-        binarySearch(R, @as(i32, -20), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
-    );
-}
+pub const block = @import("sort/block.zig").block;
+pub const pdq = @import("sort/pdq.zig").pdq;
+pub const pdqContext = @import("sort/pdq.zig").pdqContext;
 
 /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case.
 /// O(1) memory (no allocator required).
 /// Sorts in ascending order with respect to the given `lessThan` function.
-/// This can be expressed in terms of `insertionSortContext` but the glue
-/// code is slightly longer than the direct implementation.
-pub fn insertionSort(
+pub fn insertion(
     comptime T: type,
     items: []T,
     context: anytype,
-    comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
+    comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool,
 ) void {
-    var i: usize = 1;
-    while (i < items.len) : (i += 1) {
-        const x = items[i];
-        var j: usize = i;
-        while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) {
-            items[j] = items[j - 1];
+    const Context = struct {
+        items: []T,
+        sub_ctx: @TypeOf(context),
+
+        pub fn lessThan(ctx: @This(), a: usize, b: usize) bool {
+            return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]);
         }
-        items[j] = x;
-    }
+
+        pub fn swap(ctx: @This(), a: usize, b: usize) void {
+            return mem.swap(T, &ctx.items[a], &ctx.items[b]);
+        }
+    };
+    insertionContext(0, items.len, Context{ .items = items, .sub_ctx = context });
 }
 
 /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case.
 /// O(1) memory (no allocator required).
-/// Sorts in ascending order with respect to the given `context.lessThan` function.
-pub fn insertionSortContext(len: usize, context: anytype) void {
-    var i: usize = 1;
-    while (i < len) : (i += 1) {
-        var j: usize = i;
-        while (j > 0 and context.lessThan(j, j - 1)) : (j -= 1) {
+/// Sorts in ascending order with respect to the given `lessThan` function.
+pub fn insertionContext(a: usize, b: usize, context: anytype) void {
+    var i = a + 1;
+    while (i < b) : (i += 1) {
+        var j = i;
+        while (j > a and context.lessThan(j, j - 1)) : (j -= 1) {
             context.swap(j, j - 1);
         }
     }
 }
 
-const Range = struct {
-    start: usize,
-    end: usize,
-
-    fn init(start: usize, end: usize) Range {
-        return Range{
-            .start = start,
-            .end = end,
-        };
-    }
-
-    fn length(self: Range) usize {
-        return self.end - self.start;
-    }
-};
-
-const Iterator = struct {
-    size: usize,
-    power_of_two: usize,
-    numerator: usize,
-    decimal: usize,
-    denominator: usize,
-    decimal_step: usize,
-    numerator_step: usize,
-
-    fn init(size2: usize, min_level: usize) Iterator {
-        const power_of_two = math.floorPowerOfTwo(usize, size2);
-        const denominator = power_of_two / min_level;
-        return Iterator{
-            .numerator = 0,
-            .decimal = 0,
-            .size = size2,
-            .power_of_two = power_of_two,
-            .denominator = denominator,
-            .decimal_step = size2 / denominator,
-            .numerator_step = size2 % denominator,
-        };
-    }
-
-    fn begin(self: *Iterator) void {
-        self.numerator = 0;
-        self.decimal = 0;
-    }
-
-    fn nextRange(self: *Iterator) Range {
-        const start = self.decimal;
-
-        self.decimal += self.decimal_step;
-        self.numerator += self.numerator_step;
-        if (self.numerator >= self.denominator) {
-            self.numerator -= self.denominator;
-            self.decimal += 1;
-        }
-
-        return Range{
-            .start = start,
-            .end = self.decimal,
-        };
-    }
-
-    fn finished(self: *Iterator) bool {
-        return self.decimal >= self.size;
-    }
-
-    fn nextLevel(self: *Iterator) bool {
-        self.decimal_step += self.decimal_step;
-        self.numerator_step += self.numerator_step;
-        if (self.numerator_step >= self.denominator) {
-            self.numerator_step -= self.denominator;
-            self.decimal_step += 1;
-        }
-
-        return (self.decimal_step < self.size);
-    }
-
-    fn length(self: *Iterator) usize {
-        return self.decimal_step;
-    }
-};
-
-const Pull = struct {
-    from: usize,
-    to: usize,
-    count: usize,
-    range: Range,
-};
-
-/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case.
+/// Unstable in-place sort. O(n*log(n)) best case, worst case and average case.
 /// O(1) memory (no allocator required).
 /// Sorts in ascending order with respect to the given `lessThan` function.
-/// Currently implemented as block sort.
-pub fn sort(
+pub fn heap(
     comptime T: type,
     items: []T,
     context: anytype,
-    comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
+    comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool,
 ) void {
+    const Context = struct {
+        items: []T,
+        sub_ctx: @TypeOf(context),
 
-    // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
-    var cache: [512]T = undefined;
-
-    if (items.len < 4) {
-        if (items.len == 3) {
-            // hard coded insertion sort
-            if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
-            if (lessThan(context, items[2], items[1])) {
-                mem.swap(T, &items[1], &items[2]);
-                if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
-            }
-        } else if (items.len == 2) {
-            if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
+        pub fn lessThan(ctx: @This(), a: usize, b: usize) bool {
+            return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]);
         }
-        return;
-    }
-
-    // sort groups of 4-8 items at a time using an unstable sorting network,
-    // but keep track of the original item orders to force it to be stable
-    // http://pages.ripco.net/~jgamble/nw.html
-    var iterator = Iterator.init(items.len, 4);
-    while (!iterator.finished()) {
-        var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
-        const range = iterator.nextRange();
-
-        const sliced_items = items[range.start..];
-        switch (range.length()) {
-            8 => {
-                swap(T, sliced_items, context, lessThan, &order, 0, 1);
-                swap(T, sliced_items, context, lessThan, &order, 2, 3);
-                swap(T, sliced_items, context, lessThan, &order, 4, 5);
-                swap(T, sliced_items, context, lessThan, &order, 6, 7);
-                swap(T, sliced_items, context, lessThan, &order, 0, 2);
-                swap(T, sliced_items, context, lessThan, &order, 1, 3);
-                swap(T, sliced_items, context, lessThan, &order, 4, 6);
-                swap(T, sliced_items, context, lessThan, &order, 5, 7);
-                swap(T, sliced_items, context, lessThan, &order, 1, 2);
-                swap(T, sliced_items, context, lessThan, &order, 5, 6);
-                swap(T, sliced_items, context, lessThan, &order, 0, 4);
-                swap(T, sliced_items, context, lessThan, &order, 3, 7);
-                swap(T, sliced_items, context, lessThan, &order, 1, 5);
-                swap(T, sliced_items, context, lessThan, &order, 2, 6);
-                swap(T, sliced_items, context, lessThan, &order, 1, 4);
-                swap(T, sliced_items, context, lessThan, &order, 3, 6);
-                swap(T, sliced_items, context, lessThan, &order, 2, 4);
-                swap(T, sliced_items, context, lessThan, &order, 3, 5);
-                swap(T, sliced_items, context, lessThan, &order, 3, 4);
-            },
-            7 => {
-                swap(T, sliced_items, context, lessThan, &order, 1, 2);
-                swap(T, sliced_items, context, lessThan, &order, 3, 4);
-                swap(T, sliced_items, context, lessThan, &order, 5, 6);
-                swap(T, sliced_items, context, lessThan, &order, 0, 2);
-                swap(T, sliced_items, context, lessThan, &order, 3, 5);
-                swap(T, sliced_items, context, lessThan, &order, 4, 6);
-                swap(T, sliced_items, context, lessThan, &order, 0, 1);
-                swap(T, sliced_items, context, lessThan, &order, 4, 5);
-                swap(T, sliced_items, context, lessThan, &order, 2, 6);
-                swap(T, sliced_items, context, lessThan, &order, 0, 4);
-                swap(T, sliced_items, context, lessThan, &order, 1, 5);
-                swap(T, sliced_items, context, lessThan, &order, 0, 3);
-                swap(T, sliced_items, context, lessThan, &order, 2, 5);
-                swap(T, sliced_items, context, lessThan, &order, 1, 3);
-                swap(T, sliced_items, context, lessThan, &order, 2, 4);
-                swap(T, sliced_items, context, lessThan, &order, 2, 3);
-            },
-            6 => {
-                swap(T, sliced_items, context, lessThan, &order, 1, 2);
-                swap(T, sliced_items, context, lessThan, &order, 4, 5);
-                swap(T, sliced_items, context, lessThan, &order, 0, 2);
-                swap(T, sliced_items, context, lessThan, &order, 3, 5);
-                swap(T, sliced_items, context, lessThan, &order, 0, 1);
-                swap(T, sliced_items, context, lessThan, &order, 3, 4);
-                swap(T, sliced_items, context, lessThan, &order, 2, 5);
-                swap(T, sliced_items, context, lessThan, &order, 0, 3);
-                swap(T, sliced_items, context, lessThan, &order, 1, 4);
-                swap(T, sliced_items, context, lessThan, &order, 2, 4);
-                swap(T, sliced_items, context, lessThan, &order, 1, 3);
-                swap(T, sliced_items, context, lessThan, &order, 2, 3);
-            },
-            5 => {
-                swap(T, sliced_items, context, lessThan, &order, 0, 1);
-                swap(T, sliced_items, context, lessThan, &order, 3, 4);
-                swap(T, sliced_items, context, lessThan, &order, 2, 4);
-                swap(T, sliced_items, context, lessThan, &order, 2, 3);
-                swap(T, sliced_items, context, lessThan, &order, 1, 4);
-                swap(T, sliced_items, context, lessThan, &order, 0, 3);
-                swap(T, sliced_items, context, lessThan, &order, 0, 2);
-                swap(T, sliced_items, context, lessThan, &order, 1, 3);
-                swap(T, sliced_items, context, lessThan, &order, 1, 2);
-            },
-            4 => {
-                swap(T, sliced_items, context, lessThan, &order, 0, 1);
-                swap(T, sliced_items, context, lessThan, &order, 2, 3);
-                swap(T, sliced_items, context, lessThan, &order, 0, 2);
-                swap(T, sliced_items, context, lessThan, &order, 1, 3);
-                swap(T, sliced_items, context, lessThan, &order, 1, 2);
-            },
-            else => {},
-        }
-    }
-    if (items.len < 8) return;
-
-    // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc.
-    while (true) {
-        // if every A and B block will fit into the cache, use a special branch
-        // specifically for merging with the cache
-        // (we use < rather than <= since the block size might be one more than
-        // iterator.length())
-        if (iterator.length() < cache.len) {
-            // if four subarrays fit into the cache, it's faster to merge both
-            // pairs of subarrays into the cache,
-            // then merge the two merged subarrays from the cache back into the original array
-            if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) {
-                iterator.begin();
-                while (!iterator.finished()) {
-                    // merge A1 and B1 into the cache
-                    var A1 = iterator.nextRange();
-                    var B1 = iterator.nextRange();
-                    var A2 = iterator.nextRange();
-                    var B2 = iterator.nextRange();
-
-                    if (lessThan(context, items[B1.end - 1], items[A1.start])) {
-                        // the two ranges are in reverse order, so copy them in reverse order into the cache
-                        const a1_items = items[A1.start..A1.end];
-                        @memcpy(cache[B1.length()..][0..a1_items.len], a1_items);
-                        const b1_items = items[B1.start..B1.end];
-                        @memcpy(cache[0..b1_items.len], b1_items);
-                    } else if (lessThan(context, items[B1.start], items[A1.end - 1])) {
-                        // these two ranges weren't already in order, so merge them into the cache
-                        mergeInto(T, items, A1, B1, context, lessThan, cache[0..]);
-                    } else {
-                        // if A1, B1, A2, and B2 are all in order, skip doing anything else
-                        if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue;
-
-                        // copy A1 and B1 into the cache in the same order
-                        const a1_items = items[A1.start..A1.end];
-                        @memcpy(cache[0..a1_items.len], a1_items);
-                        const b1_items = items[B1.start..B1.end];
-                        @memcpy(cache[A1.length()..][0..b1_items.len], b1_items);
-                    }
-                    A1 = Range.init(A1.start, B1.end);
-
-                    // merge A2 and B2 into the cache
-                    if (lessThan(context, items[B2.end - 1], items[A2.start])) {
-                        // the two ranges are in reverse order, so copy them in reverse order into the cache
-                        const a2_items = items[A2.start..A2.end];
-                        @memcpy(cache[A1.length() + B2.length() ..][0..a2_items.len], a2_items);
-                        const b2_items = items[B2.start..B2.end];
-                        @memcpy(cache[A1.length()..][0..b2_items.len], b2_items);
-                    } else if (lessThan(context, items[B2.start], items[A2.end - 1])) {
-                        // these two ranges weren't already in order, so merge them into the cache
-                        mergeInto(T, items, A2, B2, context, lessThan, cache[A1.length()..]);
-                    } else {
-                        // copy A2 and B2 into the cache in the same order
-                        const a2_items = items[A2.start..A2.end];
-                        @memcpy(cache[A1.length()..][0..a2_items.len], a2_items);
-                        const b2_items = items[B2.start..B2.end];
-                        @memcpy(cache[A1.length() + A2.length() ..][0..b2_items.len], b2_items);
-                    }
-                    A2 = Range.init(A2.start, B2.end);
-
-                    // merge A1 and A2 from the cache into the items
-                    const A3 = Range.init(0, A1.length());
-                    const B3 = Range.init(A1.length(), A1.length() + A2.length());
-
-                    if (lessThan(context, cache[B3.end - 1], cache[A3.start])) {
-                        // the two ranges are in reverse order, so copy them in reverse order into the items
-                        const a3_items = cache[A3.start..A3.end];
-                        @memcpy(items[A1.start + A2.length() ..][0..a3_items.len], a3_items);
-                        const b3_items = cache[B3.start..B3.end];
-                        @memcpy(items[A1.start..][0..b3_items.len], b3_items);
-                    } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) {
-                        // these two ranges weren't already in order, so merge them back into the items
-                        mergeInto(T, cache[0..], A3, B3, context, lessThan, items[A1.start..]);
-                    } else {
-                        // copy A3 and B3 into the items in the same order
-                        const a3_items = cache[A3.start..A3.end];
-                        @memcpy(items[A1.start..][0..a3_items.len], a3_items);
-                        const b3_items = cache[B3.start..B3.end];
-                        @memcpy(items[A1.start + A1.length() ..][0..b3_items.len], b3_items);
-                    }
-                }
-
-                // we merged two levels at the same time, so we're done with this level already
-                // (iterator.nextLevel() is called again at the bottom of this outer merge loop)
-                _ = iterator.nextLevel();
-            } else {
-                iterator.begin();
-                while (!iterator.finished()) {
-                    var A = iterator.nextRange();
-                    var B = iterator.nextRange();
-
-                    if (lessThan(context, items[B.end - 1], items[A.start])) {
-                        // the two ranges are in reverse order, so a simple rotation should fix it
-                        mem.rotate(T, items[A.start..B.end], A.length());
-                    } else if (lessThan(context, items[B.start], items[A.end - 1])) {
-                        // these two ranges weren't already in order, so we'll need to merge them!
-                        const a_items = items[A.start..A.end];
-                        @memcpy(cache[0..a_items.len], a_items);
-                        mergeExternal(T, items, A, B, context, lessThan, cache[0..]);
-                    }
-                }
-            }
-        } else {
-            // this is where the in-place merge logic starts!
-            // 1. pull out two internal buffers each containing √A unique values
-            //    1a. adjust block_size and buffer_size if we couldn't find enough unique values
-            // 2. loop over the A and B subarrays within this level of the merge sort
-            // 3. break A and B into blocks of size 'block_size'
-            // 4. "tag" each of the A blocks with values from the first internal buffer
-            // 5. roll the A blocks through the B blocks and drop/rotate them where they belong
-            // 6. merge each A block with any B values that follow, using the cache or the second internal buffer
-            // 7. sort the second internal buffer if it exists
-            // 8. redistribute the two internal buffers back into the items
-            var block_size: usize = math.sqrt(iterator.length());
-            var buffer_size = iterator.length() / block_size + 1;
-
-            // as an optimization, we really only need to pull out the internal buffers once for each level of merges
-            // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
-            var A: Range = undefined;
-            var B: Range = undefined;
-            var index: usize = 0;
-            var last: usize = 0;
-            var count: usize = 0;
-            var find: usize = 0;
-            var start: usize = 0;
-            var pull_index: usize = 0;
-            var pull = [_]Pull{
-                Pull{
-                    .from = 0,
-                    .to = 0,
-                    .count = 0,
-                    .range = Range.init(0, 0),
-                },
-                Pull{
-                    .from = 0,
-                    .to = 0,
-                    .count = 0,
-                    .range = Range.init(0, 0),
-                },
-            };
-
-            var buffer1 = Range.init(0, 0);
-            var buffer2 = Range.init(0, 0);
-
-            // find two internal buffers of size 'buffer_size' each
-            find = buffer_size + buffer_size;
-            var find_separately = false;
-
-            if (block_size <= cache.len) {
-                // if every A block fits into the cache then we won't need the second internal buffer,
-                // so we really only need to find 'buffer_size' unique values
-                find = buffer_size;
-            } else if (find > iterator.length()) {
-                // we can't fit both buffers into the same A or B subarray, so find two buffers separately
-                find = buffer_size;
-                find_separately = true;
-            }
-
-            // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each),
-            // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values,
-            // OR if we couldn't find that many unique values, we need the largest possible buffer we can get
-
-            // in the case where it couldn't find a single buffer of at least √A unique values,
-            // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace)
-            iterator.begin();
-            while (!iterator.finished()) {
-                A = iterator.nextRange();
-                B = iterator.nextRange();
-
-                // just store information about where the values will be pulled from and to,
-                // as well as how many values there are, to create the two internal buffers
-
-                // check A for the number of unique values we need to fill an internal buffer
-                // these values will be pulled out to the start of A
-                last = A.start;
-                count = 1;
-                while (count < find) : ({
-                    last = index;
-                    count += 1;
-                }) {
-                    index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), context, lessThan, find - count);
-                    if (index == A.end) break;
-                }
-                index = last;
-
-                if (count >= buffer_size) {
-                    // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
-                    pull[pull_index] = Pull{
-                        .range = Range.init(A.start, B.end),
-                        .count = count,
-                        .from = index,
-                        .to = A.start,
-                    };
-                    pull_index = 1;
-
-                    if (count == buffer_size + buffer_size) {
-                        // we were able to find a single contiguous section containing 2√A unique values,
-                        // so this section can be used to contain both of the internal buffers we'll need
-                        buffer1 = Range.init(A.start, A.start + buffer_size);
-                        buffer2 = Range.init(A.start + buffer_size, A.start + count);
-                        break;
-                    } else if (find == buffer_size + buffer_size) {
-                        // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
-                        // so we still need to find a second separate buffer of at least √A unique values
-                        buffer1 = Range.init(A.start, A.start + count);
-                        find = buffer_size;
-                    } else if (block_size <= cache.len) {
-                        // we found the first and only internal buffer that we need, so we're done!
-                        buffer1 = Range.init(A.start, A.start + count);
-                        break;
-                    } else if (find_separately) {
-                        // found one buffer, but now find the other one
-                        buffer1 = Range.init(A.start, A.start + count);
-                        find_separately = false;
-                    } else {
-                        // we found a second buffer in an 'A' subarray containing √A unique values, so we're done!
-                        buffer2 = Range.init(A.start, A.start + count);
-                        break;
-                    }
-                } else if (pull_index == 0 and count > buffer1.length()) {
-                    // keep track of the largest buffer we were able to find
-                    buffer1 = Range.init(A.start, A.start + count);
-                    pull[pull_index] = Pull{
-                        .range = Range.init(A.start, B.end),
-                        .count = count,
-                        .from = index,
-                        .to = A.start,
-                    };
-                }
-
-                // check B for the number of unique values we need to fill an internal buffer
-                // these values will be pulled out to the end of B
-                last = B.end - 1;
-                count = 1;
-                while (count < find) : ({
-                    last = index - 1;
-                    count += 1;
-                }) {
-                    index = findFirstBackward(T, items, items[last], Range.init(B.start, last), context, lessThan, find - count);
-                    if (index == B.start) break;
-                }
-                index = last;
 
-                if (count >= buffer_size) {
-                    // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
-                    pull[pull_index] = Pull{
-                        .range = Range.init(A.start, B.end),
-                        .count = count,
-                        .from = index,
-                        .to = B.end,
-                    };
-                    pull_index = 1;
-
-                    if (count == buffer_size + buffer_size) {
-                        // we were able to find a single contiguous section containing 2√A unique values,
-                        // so this section can be used to contain both of the internal buffers we'll need
-                        buffer1 = Range.init(B.end - count, B.end - buffer_size);
-                        buffer2 = Range.init(B.end - buffer_size, B.end);
-                        break;
-                    } else if (find == buffer_size + buffer_size) {
-                        // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
-                        // so we still need to find a second separate buffer of at least √A unique values
-                        buffer1 = Range.init(B.end - count, B.end);
-                        find = buffer_size;
-                    } else if (block_size <= cache.len) {
-                        // we found the first and only internal buffer that we need, so we're done!
-                        buffer1 = Range.init(B.end - count, B.end);
-                        break;
-                    } else if (find_separately) {
-                        // found one buffer, but now find the other one
-                        buffer1 = Range.init(B.end - count, B.end);
-                        find_separately = false;
-                    } else {
-                        // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray,
-                        // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2
-                        if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count;
-
-                        // we found a second buffer in an 'B' subarray containing √A unique values, so we're done!
-                        buffer2 = Range.init(B.end - count, B.end);
-                        break;
-                    }
-                } else if (pull_index == 0 and count > buffer1.length()) {
-                    // keep track of the largest buffer we were able to find
-                    buffer1 = Range.init(B.end - count, B.end);
-                    pull[pull_index] = Pull{
-                        .range = Range.init(A.start, B.end),
-                        .count = count,
-                        .from = index,
-                        .to = B.end,
-                    };
-                }
-            }
-
-            // pull out the two ranges so we can use them as internal buffers
-            pull_index = 0;
-            while (pull_index < 2) : (pull_index += 1) {
-                const length = pull[pull_index].count;
-
-                if (pull[pull_index].to < pull[pull_index].from) {
-                    // we're pulling the values out to the left, which means the start of an A subarray
-                    index = pull[pull_index].from;
-                    count = 1;
-                    while (count < length) : (count += 1) {
-                        index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), context, lessThan, length - count);
-                        const range = Range.init(index + 1, pull[pull_index].from + 1);
-                        mem.rotate(T, items[range.start..range.end], range.length() - count);
-                        pull[pull_index].from = index + count;
-                    }
-                } else if (pull[pull_index].to > pull[pull_index].from) {
-                    // we're pulling values out to the right, which means the end of a B subarray
-                    index = pull[pull_index].from + 1;
-                    count = 1;
-                    while (count < length) : (count += 1) {
-                        index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), context, lessThan, length - count);
-                        const range = Range.init(pull[pull_index].from, index - 1);
-                        mem.rotate(T, items[range.start..range.end], count);
-                        pull[pull_index].from = index - 1 - count;
-                    }
-                }
-            }
-
-            // adjust block_size and buffer_size based on the values we were able to pull out
-            buffer_size = buffer1.length();
-            block_size = iterator.length() / buffer_size + 1;
-
-            // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
-            // so this was originally here to test the math for adjusting block_size above
-            // assert((iterator.length() + 1)/block_size <= buffer_size);
-
-            // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort!
-            iterator.begin();
-            while (!iterator.finished()) {
-                A = iterator.nextRange();
-                B = iterator.nextRange();
-
-                // remove any parts of A or B that are being used by the internal buffers
-                start = A.start;
-                if (start == pull[0].range.start) {
-                    if (pull[0].from > pull[0].to) {
-                        A.start += pull[0].count;
-
-                        // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge
-                        // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4,
-                        // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal
-                        if (A.length() == 0) continue;
-                    } else if (pull[0].from < pull[0].to) {
-                        B.end -= pull[0].count;
-                        if (B.length() == 0) continue;
-                    }
-                }
-                if (start == pull[1].range.start) {
-                    if (pull[1].from > pull[1].to) {
-                        A.start += pull[1].count;
-                        if (A.length() == 0) continue;
-                    } else if (pull[1].from < pull[1].to) {
-                        B.end -= pull[1].count;
-                        if (B.length() == 0) continue;
-                    }
-                }
-
-                if (lessThan(context, items[B.end - 1], items[A.start])) {
-                    // the two ranges are in reverse order, so a simple rotation should fix it
-                    mem.rotate(T, items[A.start..B.end], A.length());
-                } else if (lessThan(context, items[A.end], items[A.end - 1])) {
-                    // these two ranges weren't already in order, so we'll need to merge them!
-                    var findA: usize = undefined;
-
-                    // break the remainder of A into blocks. firstA is the uneven-sized first A block
-                    var blockA = Range.init(A.start, A.end);
-                    var firstA = Range.init(A.start, A.start + blockA.length() % block_size);
-
-                    // swap the first value of each A block with the value in buffer1
-                    var indexA = buffer1.start;
-                    index = firstA.end;
-                    while (index < blockA.end) : ({
-                        indexA += 1;
-                        index += block_size;
-                    }) {
-                        mem.swap(T, &items[indexA], &items[index]);
-                    }
-
-                    // start rolling the A blocks through the B blocks!
-                    // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
-                    var lastA = firstA;
-                    var lastB = Range.init(0, 0);
-                    var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
-                    blockA.start += firstA.length();
-                    indexA = buffer1.start;
-
-                    // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it
-                    // otherwise, if the second buffer is available, block swap the contents into that
-                    if (lastA.length() <= cache.len) {
-                        const last_a_items = items[lastA.start..lastA.end];
-                        @memcpy(cache[0..last_a_items.len], last_a_items);
-                    } else if (buffer2.length() > 0) {
-                        blockSwap(T, items, lastA.start, buffer2.start, lastA.length());
-                    }
-
-                    if (blockA.length() > 0) {
-                        while (true) {
-                            // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block,
-                            // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks.
-                            if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) {
-                                // figure out where to split the previous B block, and rotate it at the split
-                                const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan);
-                                const B_remaining = lastB.end - B_split;
-
-                                // swap the minimum A block to the beginning of the rolling A blocks
-                                var minA = blockA.start;
-                                findA = minA + block_size;
-                                while (findA < blockA.end) : (findA += block_size) {
-                                    if (lessThan(context, items[findA], items[minA])) {
-                                        minA = findA;
-                                    }
-                                }
-                                blockSwap(T, items, blockA.start, minA, block_size);
-
-                                // swap the first item of the previous A block back with its original value, which is stored in buffer1
-                                mem.swap(T, &items[blockA.start], &items[indexA]);
-                                indexA += 1;
-
-                                // locally merge the previous A block with the B values that follow it
-                                // if lastA fits into the external cache we'll use that (with MergeExternal),
-                                // or if the second internal buffer exists we'll use that (with MergeInternal),
-                                // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace)
-
-                                if (lastA.length() <= cache.len) {
-                                    mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, cache[0..]);
-                                } else if (buffer2.length() > 0) {
-                                    mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, buffer2);
-                                } else {
-                                    mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan);
-                                }
-
-                                if (buffer2.length() > 0 or block_size <= cache.len) {
-                                    // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
-                                    if (block_size <= cache.len) {
-                                        @memcpy(cache[0..block_size], items[blockA.start..][0..block_size]);
-                                    } else {
-                                        blockSwap(T, items, blockA.start, buffer2.start, block_size);
-                                    }
-
-                                    // this is equivalent to rotating, but faster
-                                    // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it
-                                    // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs
-                                    blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
-                                } else {
-                                    // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
-                                    mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
-                                }
-
-                                // update the range for the remaining A blocks, and the range remaining from the B block after it was split
-                                lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size);
-                                lastB = Range.init(lastA.end, lastA.end + B_remaining);
-
-                                // if there are no more A blocks remaining, this step is finished!
-                                blockA.start += block_size;
-                                if (blockA.length() == 0) break;
-                            } else if (blockB.length() < block_size) {
-                                // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
-                                // the cache is disabled here since it might contain the contents of the previous A block
-                                mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start);
-
-                                lastB = Range.init(blockA.start, blockA.start + blockB.length());
-                                blockA.start += blockB.length();
-                                blockA.end += blockB.length();
-                                blockB.end = blockB.start;
-                            } else {
-                                // roll the leftmost A block to the end by swapping it with the next B block
-                                blockSwap(T, items, blockA.start, blockB.start, block_size);
-                                lastB = Range.init(blockA.start, blockA.start + block_size);
-
-                                blockA.start += block_size;
-                                blockA.end += block_size;
-                                blockB.start += block_size;
-
-                                if (blockB.end > B.end - block_size) {
-                                    blockB.end = B.end;
-                                } else {
-                                    blockB.end += block_size;
-                                }
-                            }
-                        }
-                    }
-
-                    // merge the last A block with the remaining B values
-                    if (lastA.length() <= cache.len) {
-                        mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, cache[0..]);
-                    } else if (buffer2.length() > 0) {
-                        mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, buffer2);
-                    } else {
-                        mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan);
-                    }
-                }
-            }
-
-            // when we're finished with this merge step we should have the one
-            // or two internal buffers left over, where the second buffer is all jumbled up
-            // insertion sort the second buffer, then redistribute the buffers
-            // back into the items using the opposite process used for creating the buffer
-
-            // while an unstable sort like quicksort could be applied here, in benchmarks
-            // it was consistently slightly slower than a simple insertion sort,
-            // even for tens of millions of items. this may be because insertion
-            // sort is quite fast when the data is already somewhat sorted, like it is here
-            insertionSort(T, items[buffer2.start..buffer2.end], context, lessThan);
-
-            pull_index = 0;
-            while (pull_index < 2) : (pull_index += 1) {
-                var unique = pull[pull_index].count * 2;
-                if (pull[pull_index].from > pull[pull_index].to) {
-                    // the values were pulled out to the left, so redistribute them back to the right
-                    var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count);
-                    while (buffer.length() > 0) {
-                        index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), context, lessThan, unique);
-                        const amount = index - buffer.end;
-                        mem.rotate(T, items[buffer.start..index], buffer.length());
-                        buffer.start += (amount + 1);
-                        buffer.end += amount;
-                        unique -= 2;
-                    }
-                } else if (pull[pull_index].from < pull[pull_index].to) {
-                    // the values were pulled out to the right, so redistribute them back to the left
-                    var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end);
-                    while (buffer.length() > 0) {
-                        index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), context, lessThan, unique);
-                        const amount = buffer.start - index;
-                        mem.rotate(T, items[index..buffer.end], amount);
-                        buffer.start -= amount;
-                        buffer.end -= (amount + 1);
-                        unique -= 2;
-                    }
-                }
-            }
+        pub fn swap(ctx: @This(), a: usize, b: usize) void {
+            return mem.swap(T, &ctx.items[a], &ctx.items[b]);
         }
-
-        // double the size of each A and B subarray that will be merged in the next level
-        if (!iterator.nextLevel()) break;
-    }
-}
-
-/// TODO currently this just calls `insertionSortContext`. The block sort implementation
-/// in this file needs to be adapted to use the sort context.
-pub fn sortContext(len: usize, context: anytype) void {
-    return insertionSortContext(len, context);
-}
-
-// merge operation without a buffer
-fn mergeInPlace(
-    comptime T: type,
-    items: []T,
-    A_arg: Range,
-    B_arg: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-) void {
-    if (A_arg.length() == 0 or B_arg.length() == 0) return;
-
-    // this just repeatedly binary searches into B and rotates A into position.
-    // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here,
-    // but I decided to stick with this because it had better situational performance
-    //
-    // (Hwang and Lin is designed for merging subarrays of very different sizes,
-    // but WikiSort almost always uses subarrays that are roughly the same size)
-    //
-    // normally this is incredibly suboptimal, but this function is only called
-    // when none of the A or B blocks in any subarray contained 2√A unique values,
-    // which places a hard limit on the number of times this will ACTUALLY need
-    // to binary search and rotate.
-    //
-    // according to my analysis the worst case is √A rotations performed on √A items
-    // once the constant factors are removed, which ends up being O(n)
-    //
-    // again, this is NOT a general-purpose solution – it only works well in this case!
-    // kind of like how the O(n^2) insertion sort is used in some places
-
-    var A = A_arg;
-    var B = B_arg;
-
-    while (true) {
-        // find the first place in B where the first item in A needs to be inserted
-        const mid = binaryFirst(T, items, items[A.start], B, context, lessThan);
-
-        // rotate A into place
-        const amount = mid - A.end;
-        mem.rotate(T, items[A.start..mid], A.length());
-        if (B.end == mid) break;
-
-        // calculate the new A and B ranges
-        B.start = mid;
-        A = Range.init(A.start + amount, B.start);
-        A.start = binaryLast(T, items, items[A.start], A, context, lessThan);
-        if (A.length() == 0) break;
-    }
-}
-
-// merge operation using an internal buffer
-fn mergeInternal(
-    comptime T: type,
-    items: []T,
-    A: Range,
-    B: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    buffer: Range,
-) void {
-    // whenever we find a value to add to the final array, swap it with the value that's already in that spot
-    // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
-    var A_count: usize = 0;
-    var B_count: usize = 0;
-    var insert: usize = 0;
-
-    if (B.length() > 0 and A.length() > 0) {
-        while (true) {
-            if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) {
-                mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]);
-                A_count += 1;
-                insert += 1;
-                if (A_count >= A.length()) break;
-            } else {
-                mem.swap(T, &items[A.start + insert], &items[B.start + B_count]);
-                B_count += 1;
-                insert += 1;
-                if (B_count >= B.length()) break;
-            }
-        }
-    }
-
-    // swap the remainder of A into the final array
-    blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count);
-}
-
-fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void {
-    var index: usize = 0;
-    while (index < block_size) : (index += 1) {
-        mem.swap(T, &items[start1 + index], &items[start2 + index]);
-    }
-}
-
-// combine a linear search with a binary search to reduce the number of comparisons in situations
-// where have some idea as to how many unique values there are and where the next value might be
-fn findFirstForward(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    unique: usize,
-) usize {
-    if (range.length() == 0) return range.start;
-    const skip = math.max(range.length() / unique, @as(usize, 1));
-
-    var index = range.start + skip;
-    while (lessThan(context, items[index - 1], value)) : (index += skip) {
-        if (index >= range.end - skip) {
-            return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan);
-        }
-    }
-
-    return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan);
-}
-
-fn findFirstBackward(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    unique: usize,
-) usize {
-    if (range.length() == 0) return range.start;
-    const skip = math.max(range.length() / unique, @as(usize, 1));
-
-    var index = range.end - skip;
-    while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
-        if (index < range.start + skip) {
-            return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan);
-        }
-    }
-
-    return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan);
-}
-
-fn findLastForward(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    unique: usize,
-) usize {
-    if (range.length() == 0) return range.start;
-    const skip = math.max(range.length() / unique, @as(usize, 1));
-
-    var index = range.start + skip;
-    while (!lessThan(context, value, items[index - 1])) : (index += skip) {
-        if (index >= range.end - skip) {
-            return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan);
-        }
-    }
-
-    return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan);
-}
-
-fn findLastBackward(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    unique: usize,
-) usize {
-    if (range.length() == 0) return range.start;
-    const skip = math.max(range.length() / unique, @as(usize, 1));
-
-    var index = range.end - skip;
-    while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
-        if (index < range.start + skip) {
-            return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan);
-        }
-    }
-
-    return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan);
+    };
+    heapContext(0, items.len, Context{ .items = items, .sub_ctx = context });
 }
 
-fn binaryFirst(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-) usize {
-    var curr = range.start;
-    var size = range.length();
-    if (range.start >= range.end) return range.end;
-    while (size > 0) {
-        const offset = size % 2;
-
-        size /= 2;
-        const mid_item = items[curr + size];
-        if (lessThan(context, mid_item, value)) {
-            curr += size + offset;
-        }
+/// Unstable in-place sort. O(n*log(n)) best case, worst case and average case.
+/// O(1) memory (no allocator required).
+/// Sorts in ascending order with respect to the given `lessThan` function.
+pub fn heapContext(a: usize, b: usize, context: anytype) void {
+    // build the heap in linear time.
+    var i = b / 2;
+    while (i > a) : (i -= 1) {
+        siftDown(i - 1, b, context);
     }
-    return curr;
-}
-
-fn binaryLast(
-    comptime T: type,
-    items: []T,
-    value: T,
-    range: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-) usize {
-    var curr = range.start;
-    var size = range.length();
-    if (range.start >= range.end) return range.end;
-    while (size > 0) {
-        const offset = size % 2;
 
-        size /= 2;
-        const mid_item = items[curr + size];
-        if (!lessThan(context, value, mid_item)) {
-            curr += size + offset;
-        }
+    // pop maximal elements from the heap.
+    i = b;
+    while (i > a) : (i -= 1) {
+        context.swap(a, i - 1);
+        siftDown(a, i - 1, context);
     }
-    return curr;
 }
 
-fn mergeInto(
-    comptime T: type,
-    from: []T,
-    A: Range,
-    B: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    into: []T,
-) void {
-    var A_index: usize = A.start;
-    var B_index: usize = B.start;
-    const A_last = A.end;
-    const B_last = B.end;
-    var insert_index: usize = 0;
-
+fn siftDown(root: usize, n: usize, context: anytype) void {
+    var node = root;
     while (true) {
-        if (!lessThan(context, from[B_index], from[A_index])) {
-            into[insert_index] = from[A_index];
-            A_index += 1;
-            insert_index += 1;
-            if (A_index == A_last) {
-                // copy the remainder of B into the final array
-                const from_b = from[B_index..B_last];
-                @memcpy(into[insert_index..][0..from_b.len], from_b);
-                break;
-            }
-        } else {
-            into[insert_index] = from[B_index];
-            B_index += 1;
-            insert_index += 1;
-            if (B_index == B_last) {
-                // copy the remainder of A into the final array
-                const from_a = from[A_index..A_last];
-                @memcpy(into[insert_index..][0..from_a.len], from_a);
-                break;
-            }
-        }
-    }
-}
-
-fn mergeExternal(
-    comptime T: type,
-    items: []T,
-    A: Range,
-    B: Range,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), T, T) bool,
-    cache: []T,
-) void {
-    // A fits into the cache, so use that instead of the internal buffer
-    var A_index: usize = 0;
-    var B_index: usize = B.start;
-    var insert_index: usize = A.start;
-    const A_last = A.length();
-    const B_last = B.end;
+        var child = 2 * node + 1;
+        if (child >= n) break;
 
-    if (B.length() > 0 and A.length() > 0) {
-        while (true) {
-            if (!lessThan(context, items[B_index], cache[A_index])) {
-                items[insert_index] = cache[A_index];
-                A_index += 1;
-                insert_index += 1;
-                if (A_index == A_last) break;
-            } else {
-                items[insert_index] = items[B_index];
-                B_index += 1;
-                insert_index += 1;
-                if (B_index == B_last) break;
-            }
+        // choose the greater child.
+        if (child + 1 < n and context.lessThan(child, child + 1)) {
+            child += 1;
         }
-    }
 
-    // copy the remainder of A into the final array
-    const cache_a = cache[A_index..A_last];
-    @memcpy(items[insert_index..][0..cache_a.len], cache_a);
-}
+        // stop if the invariant holds at `node`.
+        if (!context.lessThan(node, child)) break;
 
-fn swap(
-    comptime T: type,
-    items: []T,
-    context: anytype,
-    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
-    order: *[8]u8,
-    x: usize,
-    y: usize,
-) void {
-    if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) {
-        mem.swap(T, &items[x], &items[y]);
-        mem.swap(u8, &(order.*)[x], &(order.*)[y]);
+        // swap `node` with the greater child,
+        // move one step down, and continue sifting.
+        context.swap(node, child);
+        node = child;
     }
 }
 
-/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime asc(u8))`.
+/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, asc(u8))`.
 pub fn asc(comptime T: type) fn (void, T, T) bool {
-    const impl = struct {
-        fn inner(context: void, a: T, b: T) bool {
-            _ = context;
+    return struct {
+        pub fn inner(_: void, a: T, b: T) bool {
             return a < b;
         }
-    };
-
-    return impl.inner;
+    }.inner;
 }
 
-/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime desc(u8))`.
+/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, desc(u8))`.
 pub fn desc(comptime T: type) fn (void, T, T) bool {
-    const impl = struct {
-        fn inner(context: void, a: T, b: T) bool {
-            _ = context;
+    return struct {
+        pub fn inner(_: void, a: T, b: T) bool {
             return a > b;
         }
-    };
-
-    return impl.inner;
+    }.inner;
 }
 
+const asc_u8 = asc(u8);
+const asc_i32 = asc(i32);
+const desc_u8 = desc(u8);
+const desc_i32 = desc(i32);
+
+const sort_funcs = &[_]fn (comptime type, anytype, anytype, comptime anytype) void{
+    block,
+    pdq,
+    insertion,
+    heap,
+};
+
+const IdAndValue = struct {
+    id: usize,
+    value: i32,
+
+    fn lessThan(context: void, a: IdAndValue, b: IdAndValue) bool {
+        _ = context;
+        return a.value < b.value;
+    }
+};
+
 test "stable sort" {
-    try testStableSort();
-    comptime try testStableSort();
-}
-fn testStableSort() !void {
-    var expected = [_]IdAndValue{
+    const expected = [_]IdAndValue{
         IdAndValue{ .id = 0, .value = 0 },
         IdAndValue{ .id = 1, .value = 0 },
         IdAndValue{ .id = 2, .value = 0 },
@@ -1249,6 +160,7 @@ fn testStableSort() !void {
         IdAndValue{ .id = 1, .value = 2 },
         IdAndValue{ .id = 2, .value = 2 },
     };
+
     var cases = [_][9]IdAndValue{
         [_]IdAndValue{
             IdAndValue{ .id = 0, .value = 0 },
@@ -1273,26 +185,15 @@ fn testStableSort() !void {
             IdAndValue{ .id = 2, .value = 0 },
         },
     };
+
     for (&cases) |*case| {
-        insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue);
+        block(IdAndValue, (case.*)[0..], {}, IdAndValue.lessThan);
         for (case.*, 0..) |item, i| {
             try testing.expect(item.id == expected[i].id);
             try testing.expect(item.value == expected[i].value);
         }
     }
 }
-const IdAndValue = struct {
-    id: usize,
-    value: i32,
-};
-fn cmpByValue(context: void, a: IdAndValue, b: IdAndValue) bool {
-    return asc_i32(context, a.value, b.value);
-}
-
-const asc_u8 = asc(u8);
-const asc_i32 = asc(i32);
-const desc_u8 = desc(u8);
-const desc_i32 = desc(i32);
 
 test "sort" {
     const u8cases = [_][]const []const u8{
@@ -1322,14 +223,6 @@ test "sort" {
         },
     };
 
-    for (u8cases) |case| {
-        var buf: [8]u8 = undefined;
-        const slice = buf[0..case[0].len];
-        @memcpy(slice, case[0]);
-        sort(u8, slice, {}, asc_u8);
-        try testing.expect(mem.eql(u8, slice, case[1]));
-    }
-
     const i32cases = [_][]const []const i32{
         &[_][]const i32{
             &[_]i32{},
@@ -1357,12 +250,22 @@ test "sort" {
         },
     };
 
-    for (i32cases) |case| {
-        var buf: [8]i32 = undefined;
-        const slice = buf[0..case[0].len];
-        @memcpy(slice, case[0]);
-        sort(i32, slice, {}, asc_i32);
-        try testing.expect(mem.eql(i32, slice, case[1]));
+    inline for (sort_funcs) |sortFn| {
+        for (u8cases) |case| {
+            var buf: [8]u8 = undefined;
+            const slice = buf[0..case[0].len];
+            @memcpy(slice, case[0]);
+            sortFn(u8, slice, {}, asc_u8);
+            try testing.expect(mem.eql(u8, slice, case[1]));
+        }
+
+        for (i32cases) |case| {
+            var buf: [8]i32 = undefined;
+            const slice = buf[0..case[0].len];
+            @memcpy(slice, case[0]);
+            sortFn(i32, slice, {}, asc_i32);
+            try testing.expect(mem.eql(i32, slice, case[1]));
+        }
     }
 }
 
@@ -1394,53 +297,139 @@ test "sort descending" {
         },
     };
 
-    for (rev_cases) |case| {
-        var buf: [8]i32 = undefined;
-        const slice = buf[0..case[0].len];
-        @memcpy(slice, case[0]);
-        sort(i32, slice, {}, desc_i32);
-        try testing.expect(mem.eql(i32, slice, case[1]));
+    inline for (sort_funcs) |sortFn| {
+        for (rev_cases) |case| {
+            var buf: [8]i32 = undefined;
+            const slice = buf[0..case[0].len];
+            @memcpy(slice, case[0]);
+            sortFn(i32, slice, {}, desc_i32);
+            try testing.expect(mem.eql(i32, slice, case[1]));
+        }
     }
 }
 
-test "another sort case" {
-    var arr = [_]i32{ 5, 3, 1, 2, 4 };
-    sort(i32, arr[0..], {}, asc_i32);
-
-    try testing.expect(mem.eql(i32, &arr, &[_]i32{ 1, 2, 3, 4, 5 }));
-}
-
 test "sort fuzz testing" {
     var prng = std.rand.DefaultPrng.init(0x12345678);
     const random = prng.random();
     const test_case_count = 10;
-    var i: usize = 0;
-    while (i < test_case_count) : (i += 1) {
-        try fuzzTest(random);
+
+    inline for (sort_funcs) |sortFn| {
+        var i: usize = 0;
+        while (i < test_case_count) : (i += 1) {
+            const array_size = random.intRangeLessThan(usize, 0, 1000);
+            var array = try testing.allocator.alloc(i32, array_size);
+            defer testing.allocator.free(array);
+            // populate with random data
+            for (array) |*item| {
+                item.* = random.intRangeLessThan(i32, 0, 100);
+            }
+            sortFn(i32, array, {}, asc_i32);
+            try testing.expect(isSorted(i32, array, {}, asc_i32));
+        }
     }
 }
 
-var fixed_buffer_mem: [100 * 1024]u8 = undefined;
+pub fn binarySearch(
+    comptime T: type,
+    key: anytype,
+    items: []const T,
+    context: anytype,
+    comptime compareFn: fn (context: @TypeOf(context), key: @TypeOf(key), mid_item: T) math.Order,
+) ?usize {
+    var left: usize = 0;
+    var right: usize = items.len;
 
-fn fuzzTest(rng: std.rand.Random) !void {
-    const array_size = rng.intRangeLessThan(usize, 0, 1000);
-    var array = try testing.allocator.alloc(IdAndValue, array_size);
-    defer testing.allocator.free(array);
-    // populate with random data
-    for (array, 0..) |*item, index| {
-        item.id = index;
-        item.value = rng.intRangeLessThan(i32, 0, 100);
+    while (left < right) {
+        // Avoid overflowing in the midpoint calculation
+        const mid = left + (right - left) / 2;
+        // Compare the key with the midpoint element
+        switch (compareFn(context, key, items[mid])) {
+            .eq => return mid,
+            .gt => left = mid + 1,
+            .lt => right = mid,
+        }
     }
-    sort(IdAndValue, array, {}, cmpByValue);
 
-    var index: usize = 1;
-    while (index < array.len) : (index += 1) {
-        if (array[index].value == array[index - 1].value) {
-            try testing.expect(array[index].id > array[index - 1].id);
-        } else {
-            try testing.expect(array[index].value > array[index - 1].value);
+    return null;
+}
+
+test "binarySearch" {
+    const S = struct {
+        fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
+            _ = context;
+            return math.order(lhs, rhs);
         }
-    }
+        fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
+            _ = context;
+            return math.order(lhs, rhs);
+        }
+    };
+    try testing.expectEqual(
+        @as(?usize, null),
+        binarySearch(u32, @as(u32, 1), &[_]u32{}, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, 0),
+        binarySearch(u32, @as(u32, 1), &[_]u32{1}, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, null),
+        binarySearch(u32, @as(u32, 1), &[_]u32{0}, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, null),
+        binarySearch(u32, @as(u32, 0), &[_]u32{1}, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, 4),
+        binarySearch(u32, @as(u32, 5), &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, 0),
+        binarySearch(u32, @as(u32, 2), &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32),
+    );
+    try testing.expectEqual(
+        @as(?usize, 1),
+        binarySearch(i32, @as(i32, -4), &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32),
+    );
+    try testing.expectEqual(
+        @as(?usize, 3),
+        binarySearch(i32, @as(i32, 98), &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32),
+    );
+    const R = struct {
+        b: i32,
+        e: i32,
+
+        fn r(b: i32, e: i32) @This() {
+            return @This(){ .b = b, .e = e };
+        }
+
+        fn order(context: void, key: i32, mid_item: @This()) math.Order {
+            _ = context;
+
+            if (key < mid_item.b) {
+                return .lt;
+            }
+
+            if (key > mid_item.e) {
+                return .gt;
+            }
+
+            return .eq;
+        }
+    };
+    try testing.expectEqual(
+        @as(?usize, null),
+        binarySearch(R, @as(i32, -45), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
+    );
+    try testing.expectEqual(
+        @as(?usize, 2),
+        binarySearch(R, @as(i32, 10), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
+    );
+    try testing.expectEqual(
+        @as(?usize, 1),
+        binarySearch(R, @as(i32, -20), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
+    );
 }
 
 pub fn argMin(
diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig
new file mode 100644
index 0000000000..6c1be9c6c2
--- /dev/null
+++ b/lib/std/sort/block.zig
@@ -0,0 +1,1066 @@
+const std = @import("../std.zig");
+const sort = std.sort;
+const math = std.math;
+const mem = std.mem;
+
+const Range = struct {
+    start: usize,
+    end: usize,
+
+    fn init(start: usize, end: usize) Range {
+        return Range{
+            .start = start,
+            .end = end,
+        };
+    }
+
+    fn length(self: Range) usize {
+        return self.end - self.start;
+    }
+};
+
+const Iterator = struct {
+    size: usize,
+    power_of_two: usize,
+    numerator: usize,
+    decimal: usize,
+    denominator: usize,
+    decimal_step: usize,
+    numerator_step: usize,
+
+    fn init(size2: usize, min_level: usize) Iterator {
+        const power_of_two = math.floorPowerOfTwo(usize, size2);
+        const denominator = power_of_two / min_level;
+        return Iterator{
+            .numerator = 0,
+            .decimal = 0,
+            .size = size2,
+            .power_of_two = power_of_two,
+            .denominator = denominator,
+            .decimal_step = size2 / denominator,
+            .numerator_step = size2 % denominator,
+        };
+    }
+
+    fn begin(self: *Iterator) void {
+        self.numerator = 0;
+        self.decimal = 0;
+    }
+
+    fn nextRange(self: *Iterator) Range {
+        const start = self.decimal;
+
+        self.decimal += self.decimal_step;
+        self.numerator += self.numerator_step;
+        if (self.numerator >= self.denominator) {
+            self.numerator -= self.denominator;
+            self.decimal += 1;
+        }
+
+        return Range{
+            .start = start,
+            .end = self.decimal,
+        };
+    }
+
+    fn finished(self: *Iterator) bool {
+        return self.decimal >= self.size;
+    }
+
+    fn nextLevel(self: *Iterator) bool {
+        self.decimal_step += self.decimal_step;
+        self.numerator_step += self.numerator_step;
+        if (self.numerator_step >= self.denominator) {
+            self.numerator_step -= self.denominator;
+            self.decimal_step += 1;
+        }
+
+        return (self.decimal_step < self.size);
+    }
+
+    fn length(self: *Iterator) usize {
+        return self.decimal_step;
+    }
+};
+
+const Pull = struct {
+    from: usize,
+    to: usize,
+    count: usize,
+    range: Range,
+};
+
+/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case.
+/// O(1) memory (no allocator required).
+/// Sorts in ascending order with respect to the given `lessThan` function.
+///
+/// NOTE: the algorithm only work when the comparison is less-than or greater-than
+///       (See https://github.com/ziglang/zig/issues/8289)
+pub fn block(
+    comptime T: type,
+    items: []T,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+
+    // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
+    var cache: [512]T = undefined;
+
+    if (items.len < 4) {
+        if (items.len == 3) {
+            // hard coded insertion sort
+            if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
+            if (lessThan(context, items[2], items[1])) {
+                mem.swap(T, &items[1], &items[2]);
+                if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
+            }
+        } else if (items.len == 2) {
+            if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
+        }
+        return;
+    }
+
+    // sort groups of 4-8 items at a time using an unstable sorting network,
+    // but keep track of the original item orders to force it to be stable
+    // http://pages.ripco.net/~jgamble/nw.html
+    var iterator = Iterator.init(items.len, 4);
+    while (!iterator.finished()) {
+        var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
+        const range = iterator.nextRange();
+
+        const sliced_items = items[range.start..];
+        switch (range.length()) {
+            8 => {
+                swap(T, sliced_items, &order, 0, 1, context, lessThan);
+                swap(T, sliced_items, &order, 2, 3, context, lessThan);
+                swap(T, sliced_items, &order, 4, 5, context, lessThan);
+                swap(T, sliced_items, &order, 6, 7, context, lessThan);
+                swap(T, sliced_items, &order, 0, 2, context, lessThan);
+                swap(T, sliced_items, &order, 1, 3, context, lessThan);
+                swap(T, sliced_items, &order, 4, 6, context, lessThan);
+                swap(T, sliced_items, &order, 5, 7, context, lessThan);
+                swap(T, sliced_items, &order, 1, 2, context, lessThan);
+                swap(T, sliced_items, &order, 5, 6, context, lessThan);
+                swap(T, sliced_items, &order, 0, 4, context, lessThan);
+                swap(T, sliced_items, &order, 3, 7, context, lessThan);
+                swap(T, sliced_items, &order, 1, 5, context, lessThan);
+                swap(T, sliced_items, &order, 2, 6, context, lessThan);
+                swap(T, sliced_items, &order, 1, 4, context, lessThan);
+                swap(T, sliced_items, &order, 3, 6, context, lessThan);
+                swap(T, sliced_items, &order, 2, 4, context, lessThan);
+                swap(T, sliced_items, &order, 3, 5, context, lessThan);
+                swap(T, sliced_items, &order, 3, 4, context, lessThan);
+            },
+            7 => {
+                swap(T, sliced_items, &order, 1, 2, context, lessThan);
+                swap(T, sliced_items, &order, 3, 4, context, lessThan);
+                swap(T, sliced_items, &order, 5, 6, context, lessThan);
+                swap(T, sliced_items, &order, 0, 2, context, lessThan);
+                swap(T, sliced_items, &order, 3, 5, context, lessThan);
+                swap(T, sliced_items, &order, 4, 6, context, lessThan);
+                swap(T, sliced_items, &order, 0, 1, context, lessThan);
+                swap(T, sliced_items, &order, 4, 5, context, lessThan);
+                swap(T, sliced_items, &order, 2, 6, context, lessThan);
+                swap(T, sliced_items, &order, 0, 4, context, lessThan);
+                swap(T, sliced_items, &order, 1, 5, context, lessThan);
+                swap(T, sliced_items, &order, 0, 3, context, lessThan);
+                swap(T, sliced_items, &order, 2, 5, context, lessThan);
+                swap(T, sliced_items, &order, 1, 3, context, lessThan);
+                swap(T, sliced_items, &order, 2, 4, context, lessThan);
+                swap(T, sliced_items, &order, 2, 3, context, lessThan);
+            },
+            6 => {
+                swap(T, sliced_items, &order, 1, 2, context, lessThan);
+                swap(T, sliced_items, &order, 4, 5, context, lessThan);
+                swap(T, sliced_items, &order, 0, 2, context, lessThan);
+                swap(T, sliced_items, &order, 3, 5, context, lessThan);
+                swap(T, sliced_items, &order, 0, 1, context, lessThan);
+                swap(T, sliced_items, &order, 3, 4, context, lessThan);
+                swap(T, sliced_items, &order, 2, 5, context, lessThan);
+                swap(T, sliced_items, &order, 0, 3, context, lessThan);
+                swap(T, sliced_items, &order, 1, 4, context, lessThan);
+                swap(T, sliced_items, &order, 2, 4, context, lessThan);
+                swap(T, sliced_items, &order, 1, 3, context, lessThan);
+                swap(T, sliced_items, &order, 2, 3, context, lessThan);
+            },
+            5 => {
+                swap(T, sliced_items, &order, 0, 1, context, lessThan);
+                swap(T, sliced_items, &order, 3, 4, context, lessThan);
+                swap(T, sliced_items, &order, 2, 4, context, lessThan);
+                swap(T, sliced_items, &order, 2, 3, context, lessThan);
+                swap(T, sliced_items, &order, 1, 4, context, lessThan);
+                swap(T, sliced_items, &order, 0, 3, context, lessThan);
+                swap(T, sliced_items, &order, 0, 2, context, lessThan);
+                swap(T, sliced_items, &order, 1, 3, context, lessThan);
+                swap(T, sliced_items, &order, 1, 2, context, lessThan);
+            },
+            4 => {
+                swap(T, sliced_items, &order, 0, 1, context, lessThan);
+                swap(T, sliced_items, &order, 2, 3, context, lessThan);
+                swap(T, sliced_items, &order, 0, 2, context, lessThan);
+                swap(T, sliced_items, &order, 1, 3, context, lessThan);
+                swap(T, sliced_items, &order, 1, 2, context, lessThan);
+            },
+            else => {},
+        }
+    }
+    if (items.len < 8) return;
+
+    // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc.
+    while (true) {
+        // if every A and B block will fit into the cache, use a special branch
+        // specifically for merging with the cache
+        // (we use < rather than <= since the block size might be one more than
+        // iterator.length())
+        if (iterator.length() < cache.len) {
+            // if four subarrays fit into the cache, it's faster to merge both
+            // pairs of subarrays into the cache,
+            // then merge the two merged subarrays from the cache back into the original array
+            if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) {
+                iterator.begin();
+                while (!iterator.finished()) {
+                    // merge A1 and B1 into the cache
+                    var A1 = iterator.nextRange();
+                    var B1 = iterator.nextRange();
+                    var A2 = iterator.nextRange();
+                    var B2 = iterator.nextRange();
+
+                    if (lessThan(context, items[B1.end - 1], items[A1.start])) {
+                        // the two ranges are in reverse order, so copy them in reverse order into the cache
+                        const a1_items = items[A1.start..A1.end];
+                        @memcpy(cache[B1.length()..][0..a1_items.len], a1_items);
+                        const b1_items = items[B1.start..B1.end];
+                        @memcpy(cache[0..b1_items.len], b1_items);
+                    } else if (lessThan(context, items[B1.start], items[A1.end - 1])) {
+                        // these two ranges weren't already in order, so merge them into the cache
+                        mergeInto(T, items, A1, B1, cache[0..], context, lessThan);
+                    } else {
+                        // if A1, B1, A2, and B2 are all in order, skip doing anything else
+                        if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue;
+
+                        // copy A1 and B1 into the cache in the same order
+                        const a1_items = items[A1.start..A1.end];
+                        @memcpy(cache[0..a1_items.len], a1_items);
+                        const b1_items = items[B1.start..B1.end];
+                        @memcpy(cache[A1.length()..][0..b1_items.len], b1_items);
+                    }
+                    A1 = Range.init(A1.start, B1.end);
+
+                    // merge A2 and B2 into the cache
+                    if (lessThan(context, items[B2.end - 1], items[A2.start])) {
+                        // the two ranges are in reverse order, so copy them in reverse order into the cache
+                        const a2_items = items[A2.start..A2.end];
+                        @memcpy(cache[A1.length() + B2.length() ..][0..a2_items.len], a2_items);
+                        const b2_items = items[B2.start..B2.end];
+                        @memcpy(cache[A1.length()..][0..b2_items.len], b2_items);
+                    } else if (lessThan(context, items[B2.start], items[A2.end - 1])) {
+                        // these two ranges weren't already in order, so merge them into the cache
+                        mergeInto(T, items, A2, B2, cache[A1.length()..], context, lessThan);
+                    } else {
+                        // copy A2 and B2 into the cache in the same order
+                        const a2_items = items[A2.start..A2.end];
+                        @memcpy(cache[A1.length()..][0..a2_items.len], a2_items);
+                        const b2_items = items[B2.start..B2.end];
+                        @memcpy(cache[A1.length() + A2.length() ..][0..b2_items.len], b2_items);
+                    }
+                    A2 = Range.init(A2.start, B2.end);
+
+                    // merge A1 and A2 from the cache into the items
+                    const A3 = Range.init(0, A1.length());
+                    const B3 = Range.init(A1.length(), A1.length() + A2.length());
+
+                    if (lessThan(context, cache[B3.end - 1], cache[A3.start])) {
+                        // the two ranges are in reverse order, so copy them in reverse order into the items
+                        const a3_items = cache[A3.start..A3.end];
+                        @memcpy(items[A1.start + A2.length() ..][0..a3_items.len], a3_items);
+                        const b3_items = cache[B3.start..B3.end];
+                        @memcpy(items[A1.start..][0..b3_items.len], b3_items);
+                    } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) {
+                        // these two ranges weren't already in order, so merge them back into the items
+                        mergeInto(T, cache[0..], A3, B3, items[A1.start..], context, lessThan);
+                    } else {
+                        // copy A3 and B3 into the items in the same order
+                        const a3_items = cache[A3.start..A3.end];
+                        @memcpy(items[A1.start..][0..a3_items.len], a3_items);
+                        const b3_items = cache[B3.start..B3.end];
+                        @memcpy(items[A1.start + A1.length() ..][0..b3_items.len], b3_items);
+                    }
+                }
+
+                // we merged two levels at the same time, so we're done with this level already
+                // (iterator.nextLevel() is called again at the bottom of this outer merge loop)
+                _ = iterator.nextLevel();
+            } else {
+                iterator.begin();
+                while (!iterator.finished()) {
+                    var A = iterator.nextRange();
+                    var B = iterator.nextRange();
+
+                    if (lessThan(context, items[B.end - 1], items[A.start])) {
+                        // the two ranges are in reverse order, so a simple rotation should fix it
+                        mem.rotate(T, items[A.start..B.end], A.length());
+                    } else if (lessThan(context, items[B.start], items[A.end - 1])) {
+                        // these two ranges weren't already in order, so we'll need to merge them!
+                        const a_items = items[A.start..A.end];
+                        @memcpy(cache[0..a_items.len], a_items);
+                        mergeExternal(T, items, A, B, cache[0..], context, lessThan);
+                    }
+                }
+            }
+        } else {
+            // this is where the in-place merge logic starts!
+            // 1. pull out two internal buffers each containing √A unique values
+            //    1a. adjust block_size and buffer_size if we couldn't find enough unique values
+            // 2. loop over the A and B subarrays within this level of the merge sort
+            // 3. break A and B into blocks of size 'block_size'
+            // 4. "tag" each of the A blocks with values from the first internal buffer
+            // 5. roll the A blocks through the B blocks and drop/rotate them where they belong
+            // 6. merge each A block with any B values that follow, using the cache or the second internal buffer
+            // 7. sort the second internal buffer if it exists
+            // 8. redistribute the two internal buffers back into the items
+            var block_size: usize = math.sqrt(iterator.length());
+            var buffer_size = iterator.length() / block_size + 1;
+
+            // as an optimization, we really only need to pull out the internal buffers once for each level of merges
+            // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
+            var A: Range = undefined;
+            var B: Range = undefined;
+            var index: usize = 0;
+            var last: usize = 0;
+            var count: usize = 0;
+            var find: usize = 0;
+            var start: usize = 0;
+            var pull_index: usize = 0;
+            var pull = [_]Pull{
+                Pull{
+                    .from = 0,
+                    .to = 0,
+                    .count = 0,
+                    .range = Range.init(0, 0),
+                },
+                Pull{
+                    .from = 0,
+                    .to = 0,
+                    .count = 0,
+                    .range = Range.init(0, 0),
+                },
+            };
+
+            var buffer1 = Range.init(0, 0);
+            var buffer2 = Range.init(0, 0);
+
+            // find two internal buffers of size 'buffer_size' each
+            find = buffer_size + buffer_size;
+            var find_separately = false;
+
+            if (block_size <= cache.len) {
+                // if every A block fits into the cache then we won't need the second internal buffer,
+                // so we really only need to find 'buffer_size' unique values
+                find = buffer_size;
+            } else if (find > iterator.length()) {
+                // we can't fit both buffers into the same A or B subarray, so find two buffers separately
+                find = buffer_size;
+                find_separately = true;
+            }
+
+            // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each),
+            // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values,
+            // OR if we couldn't find that many unique values, we need the largest possible buffer we can get
+
+            // in the case where it couldn't find a single buffer of at least √A unique values,
+            // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace)
+            iterator.begin();
+            while (!iterator.finished()) {
+                A = iterator.nextRange();
+                B = iterator.nextRange();
+
+                // just store information about where the values will be pulled from and to,
+                // as well as how many values there are, to create the two internal buffers
+
+                // check A for the number of unique values we need to fill an internal buffer
+                // these values will be pulled out to the start of A
+                last = A.start;
+                count = 1;
+                while (count < find) : ({
+                    last = index;
+                    count += 1;
+                }) {
+                    index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), find - count, context, lessThan);
+                    if (index == A.end) break;
+                }
+                index = last;
+
+                if (count >= buffer_size) {
+                    // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
+                    pull[pull_index] = Pull{
+                        .range = Range.init(A.start, B.end),
+                        .count = count,
+                        .from = index,
+                        .to = A.start,
+                    };
+                    pull_index = 1;
+
+                    if (count == buffer_size + buffer_size) {
+                        // we were able to find a single contiguous section containing 2√A unique values,
+                        // so this section can be used to contain both of the internal buffers we'll need
+                        buffer1 = Range.init(A.start, A.start + buffer_size);
+                        buffer2 = Range.init(A.start + buffer_size, A.start + count);
+                        break;
+                    } else if (find == buffer_size + buffer_size) {
+                        // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
+                        // so we still need to find a second separate buffer of at least √A unique values
+                        buffer1 = Range.init(A.start, A.start + count);
+                        find = buffer_size;
+                    } else if (block_size <= cache.len) {
+                        // we found the first and only internal buffer that we need, so we're done!
+                        buffer1 = Range.init(A.start, A.start + count);
+                        break;
+                    } else if (find_separately) {
+                        // found one buffer, but now find the other one
+                        buffer1 = Range.init(A.start, A.start + count);
+                        find_separately = false;
+                    } else {
+                        // we found a second buffer in an 'A' subarray containing √A unique values, so we're done!
+                        buffer2 = Range.init(A.start, A.start + count);
+                        break;
+                    }
+                } else if (pull_index == 0 and count > buffer1.length()) {
+                    // keep track of the largest buffer we were able to find
+                    buffer1 = Range.init(A.start, A.start + count);
+                    pull[pull_index] = Pull{
+                        .range = Range.init(A.start, B.end),
+                        .count = count,
+                        .from = index,
+                        .to = A.start,
+                    };
+                }
+
+                // check B for the number of unique values we need to fill an internal buffer
+                // these values will be pulled out to the end of B
+                last = B.end - 1;
+                count = 1;
+                while (count < find) : ({
+                    last = index - 1;
+                    count += 1;
+                }) {
+                    index = findFirstBackward(T, items, items[last], Range.init(B.start, last), find - count, context, lessThan);
+                    if (index == B.start) break;
+                }
+                index = last;
+
+                if (count >= buffer_size) {
+                    // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
+                    pull[pull_index] = Pull{
+                        .range = Range.init(A.start, B.end),
+                        .count = count,
+                        .from = index,
+                        .to = B.end,
+                    };
+                    pull_index = 1;
+
+                    if (count == buffer_size + buffer_size) {
+                        // we were able to find a single contiguous section containing 2√A unique values,
+                        // so this section can be used to contain both of the internal buffers we'll need
+                        buffer1 = Range.init(B.end - count, B.end - buffer_size);
+                        buffer2 = Range.init(B.end - buffer_size, B.end);
+                        break;
+                    } else if (find == buffer_size + buffer_size) {
+                        // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
+                        // so we still need to find a second separate buffer of at least √A unique values
+                        buffer1 = Range.init(B.end - count, B.end);
+                        find = buffer_size;
+                    } else if (block_size <= cache.len) {
+                        // we found the first and only internal buffer that we need, so we're done!
+                        buffer1 = Range.init(B.end - count, B.end);
+                        break;
+                    } else if (find_separately) {
+                        // found one buffer, but now find the other one
+                        buffer1 = Range.init(B.end - count, B.end);
+                        find_separately = false;
+                    } else {
+                        // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray,
+                        // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2
+                        if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count;
+
+                        // we found a second buffer in an 'B' subarray containing √A unique values, so we're done!
+                        buffer2 = Range.init(B.end - count, B.end);
+                        break;
+                    }
+                } else if (pull_index == 0 and count > buffer1.length()) {
+                    // keep track of the largest buffer we were able to find
+                    buffer1 = Range.init(B.end - count, B.end);
+                    pull[pull_index] = Pull{
+                        .range = Range.init(A.start, B.end),
+                        .count = count,
+                        .from = index,
+                        .to = B.end,
+                    };
+                }
+            }
+
+            // pull out the two ranges so we can use them as internal buffers
+            pull_index = 0;
+            while (pull_index < 2) : (pull_index += 1) {
+                const length = pull[pull_index].count;
+
+                if (pull[pull_index].to < pull[pull_index].from) {
+                    // we're pulling the values out to the left, which means the start of an A subarray
+                    index = pull[pull_index].from;
+                    count = 1;
+                    while (count < length) : (count += 1) {
+                        index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), length - count, context, lessThan);
+                        const range = Range.init(index + 1, pull[pull_index].from + 1);
+                        mem.rotate(T, items[range.start..range.end], range.length() - count);
+                        pull[pull_index].from = index + count;
+                    }
+                } else if (pull[pull_index].to > pull[pull_index].from) {
+                    // we're pulling values out to the right, which means the end of a B subarray
+                    index = pull[pull_index].from + 1;
+                    count = 1;
+                    while (count < length) : (count += 1) {
+                        index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), length - count, context, lessThan);
+                        const range = Range.init(pull[pull_index].from, index - 1);
+                        mem.rotate(T, items[range.start..range.end], count);
+                        pull[pull_index].from = index - 1 - count;
+                    }
+                }
+            }
+
+            // adjust block_size and buffer_size based on the values we were able to pull out
+            buffer_size = buffer1.length();
+            block_size = iterator.length() / buffer_size + 1;
+
+            // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
+            // so this was originally here to test the math for adjusting block_size above
+            // assert((iterator.length() + 1)/block_size <= buffer_size);
+
+            // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort!
+            iterator.begin();
+            while (!iterator.finished()) {
+                A = iterator.nextRange();
+                B = iterator.nextRange();
+
+                // remove any parts of A or B that are being used by the internal buffers
+                start = A.start;
+                if (start == pull[0].range.start) {
+                    if (pull[0].from > pull[0].to) {
+                        A.start += pull[0].count;
+
+                        // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge
+                        // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4,
+                        // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal
+                        if (A.length() == 0) continue;
+                    } else if (pull[0].from < pull[0].to) {
+                        B.end -= pull[0].count;
+                        if (B.length() == 0) continue;
+                    }
+                }
+                if (start == pull[1].range.start) {
+                    if (pull[1].from > pull[1].to) {
+                        A.start += pull[1].count;
+                        if (A.length() == 0) continue;
+                    } else if (pull[1].from < pull[1].to) {
+                        B.end -= pull[1].count;
+                        if (B.length() == 0) continue;
+                    }
+                }
+
+                if (lessThan(context, items[B.end - 1], items[A.start])) {
+                    // the two ranges are in reverse order, so a simple rotation should fix it
+                    mem.rotate(T, items[A.start..B.end], A.length());
+                } else if (lessThan(context, items[A.end], items[A.end - 1])) {
+                    // these two ranges weren't already in order, so we'll need to merge them!
+                    var findA: usize = undefined;
+
+                    // break the remainder of A into blocks. firstA is the uneven-sized first A block
+                    var blockA = Range.init(A.start, A.end);
+                    var firstA = Range.init(A.start, A.start + blockA.length() % block_size);
+
+                    // swap the first value of each A block with the value in buffer1
+                    var indexA = buffer1.start;
+                    index = firstA.end;
+                    while (index < blockA.end) : ({
+                        indexA += 1;
+                        index += block_size;
+                    }) {
+                        mem.swap(T, &items[indexA], &items[index]);
+                    }
+
+                    // start rolling the A blocks through the B blocks!
+                    // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
+                    var lastA = firstA;
+                    var lastB = Range.init(0, 0);
+                    var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
+                    blockA.start += firstA.length();
+                    indexA = buffer1.start;
+
+                    // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it
+                    // otherwise, if the second buffer is available, block swap the contents into that
+                    if (lastA.length() <= cache.len) {
+                        const last_a_items = items[lastA.start..lastA.end];
+                        @memcpy(cache[0..last_a_items.len], last_a_items);
+                    } else if (buffer2.length() > 0) {
+                        blockSwap(T, items, lastA.start, buffer2.start, lastA.length());
+                    }
+
+                    if (blockA.length() > 0) {
+                        while (true) {
+                            // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block,
+                            // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks.
+                            if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) {
+                                // figure out where to split the previous B block, and rotate it at the split
+                                const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan);
+                                const B_remaining = lastB.end - B_split;
+
+                                // swap the minimum A block to the beginning of the rolling A blocks
+                                var minA = blockA.start;
+                                findA = minA + block_size;
+                                while (findA < blockA.end) : (findA += block_size) {
+                                    if (lessThan(context, items[findA], items[minA])) {
+                                        minA = findA;
+                                    }
+                                }
+                                blockSwap(T, items, blockA.start, minA, block_size);
+
+                                // swap the first item of the previous A block back with its original value, which is stored in buffer1
+                                mem.swap(T, &items[blockA.start], &items[indexA]);
+                                indexA += 1;
+
+                                // locally merge the previous A block with the B values that follow it
+                                // if lastA fits into the external cache we'll use that (with MergeExternal),
+                                // or if the second internal buffer exists we'll use that (with MergeInternal),
+                                // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace)
+
+                                if (lastA.length() <= cache.len) {
+                                    mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), cache[0..], context, lessThan);
+                                } else if (buffer2.length() > 0) {
+                                    mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), buffer2, context, lessThan);
+                                } else {
+                                    mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan);
+                                }
+
+                                if (buffer2.length() > 0 or block_size <= cache.len) {
+                                    // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
+                                    if (block_size <= cache.len) {
+                                        @memcpy(cache[0..block_size], items[blockA.start..][0..block_size]);
+                                    } else {
+                                        blockSwap(T, items, blockA.start, buffer2.start, block_size);
+                                    }
+
+                                    // this is equivalent to rotating, but faster
+                                    // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it
+                                    // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs
+                                    blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
+                                } else {
+                                    // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
+                                    mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
+                                }
+
+                                // update the range for the remaining A blocks, and the range remaining from the B block after it was split
+                                lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size);
+                                lastB = Range.init(lastA.end, lastA.end + B_remaining);
+
+                                // if there are no more A blocks remaining, this step is finished!
+                                blockA.start += block_size;
+                                if (blockA.length() == 0) break;
+                            } else if (blockB.length() < block_size) {
+                                // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
+                                // the cache is disabled here since it might contain the contents of the previous A block
+                                mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start);
+
+                                lastB = Range.init(blockA.start, blockA.start + blockB.length());
+                                blockA.start += blockB.length();
+                                blockA.end += blockB.length();
+                                blockB.end = blockB.start;
+                            } else {
+                                // roll the leftmost A block to the end by swapping it with the next B block
+                                blockSwap(T, items, blockA.start, blockB.start, block_size);
+                                lastB = Range.init(blockA.start, blockA.start + block_size);
+
+                                blockA.start += block_size;
+                                blockA.end += block_size;
+                                blockB.start += block_size;
+
+                                if (blockB.end > B.end - block_size) {
+                                    blockB.end = B.end;
+                                } else {
+                                    blockB.end += block_size;
+                                }
+                            }
+                        }
+                    }
+
+                    // merge the last A block with the remaining B values
+                    if (lastA.length() <= cache.len) {
+                        mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), cache[0..], context, lessThan);
+                    } else if (buffer2.length() > 0) {
+                        mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), buffer2, context, lessThan);
+                    } else {
+                        mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan);
+                    }
+                }
+            }
+
+            // when we're finished with this merge step we should have the one
+            // or two internal buffers left over, where the second buffer is all jumbled up
+            // insertion sort the second buffer, then redistribute the buffers
+            // back into the items using the opposite process used for creating the buffer
+
+            // while an unstable sort like quicksort could be applied here, in benchmarks
+            // it was consistently slightly slower than a simple insertion sort,
+            // even for tens of millions of items. this may be because insertion
+            // sort is quite fast when the data is already somewhat sorted, like it is here
+            sort.insertion(T, items[buffer2.start..buffer2.end], context, lessThan);
+
+            pull_index = 0;
+            while (pull_index < 2) : (pull_index += 1) {
+                var unique = pull[pull_index].count * 2;
+                if (pull[pull_index].from > pull[pull_index].to) {
+                    // the values were pulled out to the left, so redistribute them back to the right
+                    var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count);
+                    while (buffer.length() > 0) {
+                        index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), unique, context, lessThan);
+                        const amount = index - buffer.end;
+                        mem.rotate(T, items[buffer.start..index], buffer.length());
+                        buffer.start += (amount + 1);
+                        buffer.end += amount;
+                        unique -= 2;
+                    }
+                } else if (pull[pull_index].from < pull[pull_index].to) {
+                    // the values were pulled out to the right, so redistribute them back to the left
+                    var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end);
+                    while (buffer.length() > 0) {
+                        index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), unique, context, lessThan);
+                        const amount = buffer.start - index;
+                        mem.rotate(T, items[index..buffer.end], amount);
+                        buffer.start -= amount;
+                        buffer.end -= (amount + 1);
+                        unique -= 2;
+                    }
+                }
+            }
+        }
+
+        // double the size of each A and B subarray that will be merged in the next level
+        if (!iterator.nextLevel()) break;
+    }
+}
+// merge operation without a buffer
+fn mergeInPlace(
+    comptime T: type,
+    items: []T,
+    A_arg: Range,
+    B_arg: Range,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    if (A_arg.length() == 0 or B_arg.length() == 0) return;
+
+    // this just repeatedly binary searches into B and rotates A into position.
+    // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here,
+    // but I decided to stick with this because it had better situational performance
+    //
+    // (Hwang and Lin is designed for merging subarrays of very different sizes,
+    // but WikiSort almost always uses subarrays that are roughly the same size)
+    //
+    // normally this is incredibly suboptimal, but this function is only called
+    // when none of the A or B blocks in any subarray contained 2√A unique values,
+    // which places a hard limit on the number of times this will ACTUALLY need
+    // to binary search and rotate.
+    //
+    // according to my analysis the worst case is √A rotations performed on √A items
+    // once the constant factors are removed, which ends up being O(n)
+    //
+    // again, this is NOT a general-purpose solution – it only works well in this case!
+    // kind of like how the O(n^2) insertion sort is used in some places
+
+    var A = A_arg;
+    var B = B_arg;
+
+    while (true) {
+        // find the first place in B where the first item in A needs to be inserted
+        const mid = binaryFirst(T, items, items[A.start], B, context, lessThan);
+
+        // rotate A into place
+        const amount = mid - A.end;
+        mem.rotate(T, items[A.start..mid], A.length());
+        if (B.end == mid) break;
+
+        // calculate the new A and B ranges
+        B.start = mid;
+        A = Range.init(A.start + amount, B.start);
+        A.start = binaryLast(T, items, items[A.start], A, context, lessThan);
+        if (A.length() == 0) break;
+    }
+}
+
+// merge operation using an internal buffer
+fn mergeInternal(
+    comptime T: type,
+    items: []T,
+    A: Range,
+    B: Range,
+    buffer: Range,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    // whenever we find a value to add to the final array, swap it with the value that's already in that spot
+    // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
+    var A_count: usize = 0;
+    var B_count: usize = 0;
+    var insert: usize = 0;
+
+    if (B.length() > 0 and A.length() > 0) {
+        while (true) {
+            if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) {
+                mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]);
+                A_count += 1;
+                insert += 1;
+                if (A_count >= A.length()) break;
+            } else {
+                mem.swap(T, &items[A.start + insert], &items[B.start + B_count]);
+                B_count += 1;
+                insert += 1;
+                if (B_count >= B.length()) break;
+            }
+        }
+    }
+
+    // swap the remainder of A into the final array
+    blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count);
+}
+
+fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void {
+    var index: usize = 0;
+    while (index < block_size) : (index += 1) {
+        mem.swap(T, &items[start1 + index], &items[start2 + index]);
+    }
+}
+
+// combine a linear search with a binary search to reduce the number of comparisons in situations
+// where have some idea as to how many unique values there are and where the next value might be
+fn findFirstForward(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    unique: usize,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    if (range.length() == 0) return range.start;
+    const skip = math.max(range.length() / unique, @as(usize, 1));
+
+    var index = range.start + skip;
+    while (lessThan(context, items[index - 1], value)) : (index += skip) {
+        if (index >= range.end - skip) {
+            return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan);
+        }
+    }
+
+    return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan);
+}
+
+fn findFirstBackward(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    unique: usize,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    if (range.length() == 0) return range.start;
+    const skip = math.max(range.length() / unique, @as(usize, 1));
+
+    var index = range.end - skip;
+    while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
+        if (index < range.start + skip) {
+            return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan);
+        }
+    }
+
+    return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan);
+}
+
+fn findLastForward(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    unique: usize,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    if (range.length() == 0) return range.start;
+    const skip = math.max(range.length() / unique, @as(usize, 1));
+
+    var index = range.start + skip;
+    while (!lessThan(context, value, items[index - 1])) : (index += skip) {
+        if (index >= range.end - skip) {
+            return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan);
+        }
+    }
+
+    return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan);
+}
+
+fn findLastBackward(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    unique: usize,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    if (range.length() == 0) return range.start;
+    const skip = math.max(range.length() / unique, @as(usize, 1));
+
+    var index = range.end - skip;
+    while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
+        if (index < range.start + skip) {
+            return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan);
+        }
+    }
+
+    return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan);
+}
+
+fn binaryFirst(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    var curr = range.start;
+    var size = range.length();
+    if (range.start >= range.end) return range.end;
+    while (size > 0) {
+        const offset = size % 2;
+
+        size /= 2;
+        const mid_item = items[curr + size];
+        if (lessThan(context, mid_item, value)) {
+            curr += size + offset;
+        }
+    }
+    return curr;
+}
+
+fn binaryLast(
+    comptime T: type,
+    items: []T,
+    value: T,
+    range: Range,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) usize {
+    var curr = range.start;
+    var size = range.length();
+    if (range.start >= range.end) return range.end;
+    while (size > 0) {
+        const offset = size % 2;
+
+        size /= 2;
+        const mid_item = items[curr + size];
+        if (!lessThan(context, value, mid_item)) {
+            curr += size + offset;
+        }
+    }
+    return curr;
+}
+
+fn mergeInto(
+    comptime T: type,
+    from: []T,
+    A: Range,
+    B: Range,
+    into: []T,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    var A_index: usize = A.start;
+    var B_index: usize = B.start;
+    const A_last = A.end;
+    const B_last = B.end;
+    var insert_index: usize = 0;
+
+    while (true) {
+        if (!lessThan(context, from[B_index], from[A_index])) {
+            into[insert_index] = from[A_index];
+            A_index += 1;
+            insert_index += 1;
+            if (A_index == A_last) {
+                // copy the remainder of B into the final array
+                const from_b = from[B_index..B_last];
+                @memcpy(into[insert_index..][0..from_b.len], from_b);
+                break;
+            }
+        } else {
+            into[insert_index] = from[B_index];
+            B_index += 1;
+            insert_index += 1;
+            if (B_index == B_last) {
+                // copy the remainder of A into the final array
+                const from_a = from[A_index..A_last];
+                @memcpy(into[insert_index..][0..from_a.len], from_a);
+                break;
+            }
+        }
+    }
+}
+
+fn mergeExternal(
+    comptime T: type,
+    items: []T,
+    A: Range,
+    B: Range,
+    cache: []T,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    // A fits into the cache, so use that instead of the internal buffer
+    var A_index: usize = 0;
+    var B_index: usize = B.start;
+    var insert_index: usize = A.start;
+    const A_last = A.length();
+    const B_last = B.end;
+
+    if (B.length() > 0 and A.length() > 0) {
+        while (true) {
+            if (!lessThan(context, items[B_index], cache[A_index])) {
+                items[insert_index] = cache[A_index];
+                A_index += 1;
+                insert_index += 1;
+                if (A_index == A_last) break;
+            } else {
+                items[insert_index] = items[B_index];
+                B_index += 1;
+                insert_index += 1;
+                if (B_index == B_last) break;
+            }
+        }
+    }
+
+    // copy the remainder of A into the final array
+    const cache_a = cache[A_index..A_last];
+    @memcpy(items[insert_index..][0..cache_a.len], cache_a);
+}
+
+fn swap(
+    comptime T: type,
+    items: []T,
+    order: *[8]u8,
+    x: usize,
+    y: usize,
+    context: anytype,
+    comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) {
+        mem.swap(T, &items[x], &items[y]);
+        mem.swap(u8, &(order.*)[x], &(order.*)[y]);
+    }
+}
diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig
new file mode 100644
index 0000000000..e7042b0c76
--- /dev/null
+++ b/lib/std/sort/pdq.zig
@@ -0,0 +1,331 @@
+const std = @import("../std.zig");
+const sort = std.sort;
+const mem = std.mem;
+const math = std.math;
+const testing = std.testing;
+
+/// Unstable in-place sort. n best case, n*log(n) worst case and average case.
+/// log(n) memory (no allocator required).
+///
+/// Sorts in ascending order with respect to the given `lessThan` function.
+pub fn pdq(
+    comptime T: type,
+    items: []T,
+    context: anytype,
+    comptime lessThanFn: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
+) void {
+    const Context = struct {
+        items: []T,
+        sub_ctx: @TypeOf(context),
+
+        pub fn lessThan(ctx: @This(), a: usize, b: usize) bool {
+            return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]);
+        }
+
+        pub fn swap(ctx: @This(), a: usize, b: usize) void {
+            return mem.swap(T, &ctx.items[a], &ctx.items[b]);
+        }
+    };
+    pdqContext(0, items.len, Context{ .items = items, .sub_ctx = context });
+}
+
+const Hint = enum {
+    increasing,
+    decreasing,
+    unknown,
+};
+
+/// Unstable in-place sort. O(n) best case, O(n*log(n)) worst case and average case.
+/// O(log(n)) memory (no allocator required).
+///
+/// Sorts in ascending order with respect to the given `lessThan` function.
+pub fn pdqContext(a: usize, b: usize, context: anytype) void {
+    // slices of up to this length get sorted using insertion sort.
+    const max_insertion = 24;
+    // number of allowed imbalanced partitions before switching to heap sort.
+    const max_limit = std.math.floorPowerOfTwo(usize, b) + 1;
+
+    // set upper bound on stack memory usage.
+    const Range = struct { a: usize, b: usize, limit: usize };
+    const stack_size = math.log2(math.maxInt(usize) + 1);
+    var stack: [stack_size]Range = undefined;
+    var range = Range{ .a = a, .b = b, .limit = max_limit };
+    var top: usize = 0;
+
+    while (true) {
+        var was_balanced = true;
+        var was_partitioned = true;
+
+        while (true) {
+            const len = range.b - range.a;
+
+            // very short slices get sorted using insertion sort.
+            if (len <= max_insertion) {
+                break sort.insertionContext(range.a, range.b, context);
+            }
+
+            // if too many bad pivot choices were made, simply fall back to heapsort in order to
+            // guarantee O(n*log(n)) worst-case.
+            if (range.limit == 0) {
+                break sort.heapContext(range.a, range.b, context);
+            }
+
+            // if the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+            // some elements around. Hopefully we'll choose a better pivot this time.
+            if (!was_balanced) {
+                breakPatterns(range.a, range.b, context);
+                range.limit -= 1;
+            }
+
+            // choose a pivot and try guessing whether the slice is already sorted.
+            var pivot: usize = 0;
+            var hint = chosePivot(range.a, range.b, &pivot, context);
+
+            if (hint == .decreasing) {
+                // The maximum number of swaps was performed, so items are likely
+                // in reverse order. Reverse it to make sorting faster.
+                reverseRange(range.a, range.b, context);
+                pivot = (range.b - 1) - (pivot - range.a);
+                hint = .increasing;
+            }
+
+            // if the last partitioning was decently balanced and didn't shuffle elements, and if pivot
+            // selection predicts the slice is likely already sorted...
+            if (was_balanced and was_partitioned and hint == .increasing) {
+                // try identifying several out-of-order elements and shifting them to correct
+                // positions. If the slice ends up being completely sorted, we're done.
+                if (partialInsertionSort(range.a, range.b, context)) break;
+            }
+
+            // if the chosen pivot is equal to the predecessor, then it's the smallest element in the
+            // slice. Partition the slice into elements equal to and elements greater than the pivot.
+            // This case is usually hit when the slice contains many duplicate elements.
+            if (range.a > 0 and !context.lessThan(range.a - 1, pivot)) {
+                range.a = partitionEqual(range.a, range.b, pivot, context);
+                continue;
+            }
+
+            // partition the slice.
+            var mid = pivot;
+            was_partitioned = partition(range.a, range.b, &mid, context);
+
+            const left_len = mid - range.a;
+            const right_len = range.b - mid;
+            const balanced_threshold = len / 8;
+            if (left_len < right_len) {
+                was_balanced = left_len >= balanced_threshold;
+                stack[top] = .{ .a = range.a, .b = mid, .limit = range.limit };
+                top += 1;
+                range.a = mid + 1;
+            } else {
+                was_balanced = right_len >= balanced_threshold;
+                stack[top] = .{ .a = mid + 1, .b = range.b, .limit = range.limit };
+                top += 1;
+                range.b = mid;
+            }
+        }
+
+        top = math.sub(usize, top, 1) catch break;
+        range = stack[top];
+    }
+}
+
+/// partitions `items[a..b]` into elements smaller than `items[pivot]`,
+/// followed by elements greater than or equal to `items[pivot]`.
+///
+/// sets the new pivot.
+/// returns `true` if already partitioned.
+fn partition(a: usize, b: usize, pivot: *usize, context: anytype) bool {
+    // move pivot to the first place
+    context.swap(a, pivot.*);
+
+    var i = a + 1;
+    var j = b - 1;
+
+    while (i <= j and context.lessThan(i, a)) i += 1;
+    while (i <= j and !context.lessThan(j, a)) j -= 1;
+
+    // check if items are already partitioned (no item to swap)
+    if (i > j) {
+        // put pivot back to the middle
+        context.swap(j, a);
+        pivot.* = j;
+        return true;
+    }
+
+    context.swap(i, j);
+    i += 1;
+    j -= 1;
+
+    while (true) {
+        while (i <= j and context.lessThan(i, a)) i += 1;
+        while (i <= j and !context.lessThan(j, a)) j -= 1;
+        if (i > j) break;
+
+        context.swap(i, j);
+        i += 1;
+        j -= 1;
+    }
+
+    // TODO: Enable the BlockQuicksort optimization
+
+    context.swap(j, a);
+    pivot.* = j;
+    return false;
+}
+
+/// partitions items into elements equal to `items[pivot]`
+/// followed by elements greater than `items[pivot]`.
+///
+/// it assumed that `items[a..b]` does not contain elements smaller than the `items[pivot]`.
+fn partitionEqual(a: usize, b: usize, pivot: usize, context: anytype) usize {
+    // move pivot to the first place
+    context.swap(a, pivot);
+
+    var i = a + 1;
+    var j = b - 1;
+
+    while (true) {
+        while (i <= j and !context.lessThan(a, i)) i += 1;
+        while (i <= j and context.lessThan(a, j)) j -= 1;
+        if (i > j) break;
+
+        context.swap(i, j);
+        i += 1;
+        j -= 1;
+    }
+
+    return i;
+}
+
+/// partially sorts a slice by shifting several out-of-order elements around.
+///
+/// returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case.
+fn partialInsertionSort(a: usize, b: usize, context: anytype) bool {
+    @setCold(true);
+
+    // maximum number of adjacent out-of-order pairs that will get shifted
+    const max_steps = 5;
+    // if the slice is shorter than this, don't shift any elements
+    const shortest_shifting = 50;
+
+    var i = a + 1;
+    for (0..max_steps) |_| {
+        // find the next pair of adjacent out-of-order elements.
+        while (i < b and !context.lessThan(i, i - 1)) i += 1;
+
+        // are we done?
+        if (i == b) return true;
+
+        // don't shift elements on short arrays, that has a performance cost.
+        if (b - a < shortest_shifting) return false;
+
+        // swap the found pair of elements. This puts them in correct order.
+        context.swap(i, i - 1);
+
+        // shift the smaller element to the left.
+        if (i - a >= 2) {
+            var j = i - 1;
+            while (j >= 1) : (j -= 1) {
+                if (!context.lessThan(j, j - 1)) break;
+                context.swap(j, j - 1);
+            }
+        }
+
+        // shift the greater element to the right.
+        if (b - i >= 2) {
+            var j = i + 1;
+            while (j < b) : (j += 1) {
+                if (!context.lessThan(j, j - 1)) break;
+                context.swap(j, j - 1);
+            }
+        }
+    }
+
+    return false;
+}
+
+fn breakPatterns(a: usize, b: usize, context: anytype) void {
+    @setCold(true);
+
+    const len = b - a;
+    if (len < 8) return;
+
+    var rand = @intCast(u64, len);
+    const modulus = math.ceilPowerOfTwoAssert(u64, len);
+
+    var i = a + (len / 4) * 2 - 1;
+    while (i <= a + (len / 4) * 2 + 1) : (i += 1) {
+        // xorshift64
+        rand ^= rand << 13;
+        rand ^= rand >> 7;
+        rand ^= rand << 17;
+
+        var other = @intCast(usize, rand & (modulus - 1));
+        if (other >= len) other -= len;
+        context.swap(i, a + other);
+    }
+}
+
+/// choses a pivot in `items[a..b]`.
+/// swaps likely_sorted when `items[a..b]` seems to be already sorted.
+fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint {
+    // minimum length for using the Tukey's ninther method
+    const shortest_ninther = 50;
+    // max_swaps is the maximum number of swaps allowed in this function
+    const max_swaps = 4 * 3;
+
+    var len = b - a;
+    var i = a + len / 4 * 1;
+    var j = a + len / 4 * 2;
+    var k = a + len / 4 * 3;
+    var swaps: usize = 0;
+
+    if (len >= 8) {
+        if (len >= shortest_ninther) {
+            // find medians in the neighborhoods of `i`, `j` and `k`
+            i = sort3(i - 1, i, i + 1, &swaps, context);
+            j = sort3(j - 1, j, j + 1, &swaps, context);
+            k = sort3(k - 1, k, k + 1, &swaps, context);
+        }
+
+        // find the median among `i`, `j` and `k`
+        j = sort3(i, j, k, &swaps, context);
+    }
+
+    pivot.* = j;
+    return switch (swaps) {
+        0 => .increasing,
+        max_swaps => .decreasing,
+        else => .unknown,
+    };
+}
+
+fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) usize {
+    if (context.lessThan(b, a)) {
+        swaps.* += 1;
+        context.swap(b, a);
+    }
+
+    if (context.lessThan(c, b)) {
+        swaps.* += 1;
+        context.swap(c, b);
+    }
+
+    if (context.lessThan(b, a)) {
+        swaps.* += 1;
+        context.swap(b, a);
+    }
+
+    return b;
+}
+
+fn reverseRange(a: usize, b: usize, context: anytype) void {
+    var i = a;
+    var j = b - 1;
+    while (i < j) {
+        context.swap(i, j);
+        i += 1;
+        j -= 1;
+    }
+}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index b485800329..cc2e2a916b 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -672,7 +672,7 @@ fn addPackageTableToCacheHash(
         }
     }
     // Sort the slice by package name
-    std.sort.sort(Package.Table.KV, packages, {}, struct {
+    mem.sort(Package.Table.KV, packages, {}, struct {
         fn lessThan(_: void, lhs: Package.Table.KV, rhs: Package.Table.KV) bool {
             return std.mem.lessThan(u8, lhs.key, rhs.key);
         }
diff --git a/src/Package.zig b/src/Package.zig
index f28aac885d..cde3f38e28 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -672,7 +672,7 @@ fn computePackageHash(
         }
     }
 
-    std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
+    mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
 
     var hasher = Manifest.Hash.init(.{});
     var any_failures = false;
diff --git a/src/RangeSet.zig b/src/RangeSet.zig
index 7e501f984b..aa051ff424 100644
--- a/src/RangeSet.zig
+++ b/src/RangeSet.zig
@@ -60,7 +60,7 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
     if (self.ranges.items.len == 0)
         return false;
 
-    std.sort.sort(Range, self.ranges.items, LessThanContext{
+    std.mem.sort(Range, self.ranges.items, LessThanContext{
         .ty = ty,
         .module = self.module,
     }, lessThan);
diff --git a/src/Sema.zig b/src/Sema.zig
index 9178392d27..76c9891467 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -30979,7 +30979,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
                         ctx.struct_obj.fields.values()[b].ty.abiAlignment(target);
                 }
             };
-            std.sort.sort(u32, optimized_order, AlignSortContext{
+            mem.sort(u32, optimized_order, AlignSortContext{
                 .struct_obj = struct_obj,
                 .sema = sema,
             }, AlignSortContext.lessThan);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e835242379..55a9694fd3 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2176,7 +2176,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
             }
         };
         const sort_context = SortContext{ .frame_align = frame_align };
-        std.sort.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan);
+        mem.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan);
     }
 
     const call_frame_align = frame_align[@enumToInt(FrameIndex.call_frame)];
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index 6ed0aeeff4..625a5283b9 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -770,7 +770,7 @@ const mnemonic_to_encodings_map = init: {
     @setEvalBranchQuota(30_000);
     const encodings = @import("encodings.zig");
     var entries = encodings.table;
-    std.sort.sort(encodings.Entry, &entries, {}, struct {
+    std.mem.sort(encodings.Entry, &entries, {}, struct {
         fn lessThan(_: void, lhs: encodings.Entry, rhs: encodings.Entry) bool {
             return @enumToInt(lhs[0]) < @enumToInt(rhs[0]);
         }
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 892914ea3d..8494ae7353 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -1292,7 +1292,7 @@ pub const CType = extern union {
         fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field {
             const Field = Payload.Fields.Field;
             const slice = self.storage.anon.fields[0..fields_len];
-            std.sort.sort(Field, slice, {}, struct {
+            mem.sort(Field, slice, {}, struct {
                 fn before(_: void, lhs: Field, rhs: Field) bool {
                     return lhs.alignas.@"align" > rhs.alignas.@"align";
                 }
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 81e8c57bdd..01f18a73b3 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1837,7 +1837,7 @@ fn writeBaseRelocations(self: *Coff) !void {
             pages.appendAssumeCapacity(page.*);
         }
     }
-    std.sort.sort(u32, pages.items, {}, std.sort.asc(u32));
+    mem.sort(u32, pages.items, {}, std.sort.asc(u32));
 
     var buffer = std.ArrayList(u8).init(gpa);
     defer buffer.deinit();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 7cc6f78c7d..b218fdbd2d 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -209,7 +209,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
     // afterwards by address in each group. Normally, dysymtab should
     // be enough to guarantee the sort, but turns out not every compiler
     // is kind enough to specify the symbols in the correct order.
-    sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
+    mem.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
 
     var prev_sect_id: u8 = 0;
     var section_index_lookup: ?Entry = null;
@@ -462,7 +462,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
         sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
     }
 
-    std.sort.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
+    mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
 
     var sect_sym_index: u32 = 0;
     for (sorted_sections) |section| {
@@ -663,7 +663,7 @@ fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void {
     if (self.getSourceRelocs(section)) |relocs| {
         try self.relocations.ensureUnusedCapacity(gpa, relocs.len);
         self.relocations.appendUnalignedSliceAssumeCapacity(relocs);
-        std.sort.sort(macho.relocation_info, self.relocations.items[start..], {}, relocGreaterThan);
+        mem.sort(macho.relocation_info, self.relocations.items[start..], {}, relocGreaterThan);
     }
     self.section_relocs_lookup.items[sect_id] = start;
 }
@@ -901,7 +901,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void {
     const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice];
     try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len);
     self.data_in_code.appendUnalignedSliceAssumeCapacity(dice);
-    std.sort.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan);
+    mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan);
 }
 
 fn diceLessThan(ctx: void, lhs: macho.data_in_code_entry, rhs: macho.data_in_code_entry) bool {
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index 0071657f8b..8d2a36be9d 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -411,7 +411,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
         }
 
         var slice = common_encodings_counts.values();
-        std.sort.sort(CommonEncWithCount, slice, {}, CommonEncWithCount.greaterThan);
+        mem.sort(CommonEncWithCount, slice, {}, CommonEncWithCount.greaterThan);
 
         var i: u7 = 0;
         while (i < slice.len) : (i += 1) {
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index 1d7a0c94c0..5b386a8136 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -39,7 +39,7 @@ pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
 
     const writer = rebase.buffer.writer(gpa);
 
-    std.sort.sort(Entry, rebase.entries.items, {}, Entry.lessThan);
+    std.mem.sort(Entry, rebase.entries.items, {}, Entry.lessThan);
 
     try setTypePointer(writer);
 
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index 98a693920a..14ce1587aa 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -47,7 +47,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
 
             const writer = self.buffer.writer(gpa);
 
-            std.sort.sort(Entry, self.entries.items, ctx, Entry.lessThan);
+            std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan);
 
             var start: usize = 0;
             var seg_id: ?u8 = null;
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 7e6870ecbc..b151aee19b 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -1441,7 +1441,7 @@ pub const Zld = struct {
             }
         }
 
-        std.sort.sort(Section, sections.items, {}, SortSection.lessThan);
+        mem.sort(Section, sections.items, {}, SortSection.lessThan);
 
         self.sections.shrinkRetainingCapacity(0);
         for (sections.items) |out| {
@@ -2237,7 +2237,7 @@ pub const Zld = struct {
             }
         }
 
-        std.sort.sort(u64, addresses.items, {}, asc_u64);
+        mem.sort(u64, addresses.items, {}, asc_u64);
 
         var offsets = std.ArrayList(u32).init(gpa);
         defer offsets.deinit();
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index cd9c44d656..5dfc91d4ce 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2143,7 +2143,7 @@ fn sortDataSegments(wasm: *Wasm) !void {
         }
     };
 
-    std.sort.sort([]const u8, keys, {}, SortContext.sort);
+    mem.sort([]const u8, keys, {}, SortContext.sort);
     for (keys) |key| {
         const segment_index = wasm.data_segments.get(key).?;
         new_mapping.putAssumeCapacity(key, segment_index);
@@ -2187,7 +2187,7 @@ fn setupInitFunctions(wasm: *Wasm) !void {
     }
 
     // sort the initfunctions based on their priority
-    std.sort.sort(InitFuncLoc, wasm.init_funcs.items, {}, InitFuncLoc.lessThan);
+    mem.sort(InitFuncLoc, wasm.init_funcs.items, {}, InitFuncLoc.lessThan);
 }
 
 /// Generates an atom containing the global error set' size.
@@ -3687,7 +3687,7 @@ fn writeToFile(
             }
         }.sort;
 
-        std.sort.sort(*Atom, sorted_atoms.items, wasm, atom_sort_fn);
+        mem.sort(*Atom, sorted_atoms.items, wasm, atom_sort_fn);
 
         for (sorted_atoms.items) |sorted_atom| {
             try leb.writeULEB128(binary_writer, sorted_atom.size);
@@ -4050,8 +4050,8 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
         data_segment_index += 1;
     }
 
-    std.sort.sort(Name, funcs.values(), {}, Name.lessThan);
-    std.sort.sort(Name, globals.items, {}, Name.lessThan);
+    mem.sort(Name, funcs.values(), {}, Name.lessThan);
+    mem.sort(Name, globals.items, {}, Name.lessThan);
 
     const header_offset = try reserveCustomSectionHeader(binary_bytes);
     const writer = binary_bytes.writer();
diff --git a/src/objcopy.zig b/src/objcopy.zig
index 12129aba9c..c5d0e8dcb3 100644
--- a/src/objcopy.zig
+++ b/src/objcopy.zig
@@ -402,7 +402,7 @@ const BinaryElfOutput = struct {
             }
         }
 
-        std.sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
+        mem.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
 
         for (self.segments.items, 0..) |firstSegment, i| {
             if (firstSegment.firstSection) |firstSection| {
@@ -427,7 +427,7 @@ const BinaryElfOutput = struct {
             }
         }
 
-        std.sort.sort(*BinaryElfSection, self.sections.items, {}, sectionSortCompare);
+        mem.sort(*BinaryElfSection, self.sections.items, {}, sectionSortCompare);
 
         return self;
     }
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index 0451079a0e..63dd2fd3da 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -607,7 +607,7 @@ fn sortTestFilenames(filenames: [][]const u8) void {
             };
         }
     };
-    std.sort.sort([]const u8, filenames, Context{}, Context.lessThan);
+    std.mem.sort([]const u8, filenames, Context{}, Context.lessThan);
 }
 
 /// Iterates a set of filenames extracting batches that are either incremental
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index bc2637e197..95787b719a 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -437,7 +437,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
     const dynstr = elf_bytes[dynstr_offset..];
 
     // Sort the list by address, ascending.
-    std.sort.sort(Sym, @alignCast(8, dyn_syms), {}, S.symbolAddrLessThan);
+    mem.sort(Sym, @alignCast(8, dyn_syms), {}, S.symbolAddrLessThan);
 
     for (dyn_syms) |sym| {
         const this_section = s(sym.st_shndx);
diff --git a/tools/generate_JSONTestSuite.zig b/tools/generate_JSONTestSuite.zig
index b8550959c7..2229cf4012 100644
--- a/tools/generate_JSONTestSuite.zig
+++ b/tools/generate_JSONTestSuite.zig
@@ -23,7 +23,7 @@ pub fn main() !void {
     while (try it.next()) |entry| {
         try names.append(try allocator.dupe(u8, entry.name));
     }
-    std.sort.sort([]const u8, names.items, {}, (struct {
+    std.mem.sort([]const u8, names.items, {}, (struct {
         fn lessThan(_: void, a: []const u8, b: []const u8) bool {
             return std.mem.lessThan(u8, a, b);
         }
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index a6550a2573..0321c0e0eb 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -460,7 +460,7 @@ pub fn main() !void {
                 try contents_list.append(contents);
             }
         }
-        std.sort.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan);
+        std.mem.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan);
         const best_contents = contents_list.popOrNull().?;
         if (best_contents.hit_count > 1) {
             // worth it to make it generic
diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig
index 38fbab6645..0f31e5e893 100644
--- a/tools/update-linux-headers.zig
+++ b/tools/update-linux-headers.zig
@@ -260,7 +260,7 @@ pub fn main() !void {
                 try contents_list.append(contents);
             }
         }
-        std.sort.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan);
+        std.mem.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan);
         const best_contents = contents_list.popOrNull().?;
         if (best_contents.hit_count > 1) {
             // worth it to make it generic
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 682ec7e152..feefeb0a83 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -646,7 +646,7 @@ pub fn main() anyerror!void {
     }
     // Some options have multiple matches. As an example, "-Wl,foo" matches both
     // "W" and "Wl,". So we sort this list in order of descending priority.
-    std.sort.sort(*json.ObjectMap, all_objects.items, {}, objectLessThan);
+    std.mem.sort(*json.ObjectMap, all_objects.items, {}, objectLessThan);
 
     var buffered_stdout = std.io.bufferedWriter(std.io.getStdOut().writer());
     const stdout = buffered_stdout.writer();
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 53bb365f41..d5c3d48852 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -1187,8 +1187,8 @@ fn processOneTarget(job: Job) anyerror!void {
     for (llvm_target.extra_cpus) |extra_cpu| {
         try all_cpus.append(extra_cpu);
     }
-    std.sort.sort(Feature, all_features.items, {}, featureLessThan);
-    std.sort.sort(Cpu, all_cpus.items, {}, cpuLessThan);
+    mem.sort(Feature, all_features.items, {}, featureLessThan);
+    mem.sort(Cpu, all_cpus.items, {}, cpuLessThan);
 
     const target_sub_path = try fs.path.join(arena, &.{ "lib", "std", "target" });
     var target_dir = try job.zig_src_dir.makeOpenPath(target_sub_path, .{});
@@ -1283,7 +1283,7 @@ fn processOneTarget(job: Job) anyerror!void {
                 try dependencies.append(key.*);
             }
         }
-        std.sort.sort([]const u8, dependencies.items, {}, asciiLessThan);
+        mem.sort([]const u8, dependencies.items, {}, asciiLessThan);
 
         if (dependencies.items.len == 0) {
             try w.writeAll(
@@ -1328,7 +1328,7 @@ fn processOneTarget(job: Job) anyerror!void {
                 try cpu_features.append(key.*);
             }
         }
-        std.sort.sort([]const u8, cpu_features.items, {}, asciiLessThan);
+        mem.sort([]const u8, cpu_features.items, {}, asciiLessThan);
         if (cpu.llvm_name) |llvm_name| {
             try w.print(
                 \\    pub const {} = CpuModel{{
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 8d398f58de..44d8b6a445 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -303,7 +303,7 @@ fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Versi
         }
     }
 
-    std.sort.sort(Version, versions.items, {}, Version.lessThan);
+    std.mem.sort(Version, versions.items, {}, Version.lessThan);
 
     return versions.items;
 }
-- 
cgit v1.2.3


From 0f6fa3f20b3b28958921bd63a9a9d96468455e9c Mon Sep 17 00:00:00 2001
From: Linus Groh 
Date: Sun, 21 May 2023 14:27:28 +0100
Subject: std: Move std.debug.{TTY.Config,detectTTYConfig} to std.io.tty

Also get rid of the TTY wrapper struct, which was exlusively used as a
namespace - this is done by the tty.zig root struct now.

detectTTYConfig has been renamed to just detectConfig, which is enough
given the new namespace. Additionally, a doc comment had been added.
---
 lib/build_runner.zig        |  12 ++--
 lib/std/Build.zig           |   2 +-
 lib/std/Build/Step.zig      |   2 +-
 lib/std/builtin.zig         |   2 +-
 lib/std/debug.zig           | 137 ++++----------------------------------------
 lib/std/io.zig              |   2 +
 lib/std/io/tty.zig          | 121 ++++++++++++++++++++++++++++++++++++++
 lib/std/testing.zig         |   8 +--
 lib/std/zig/ErrorBundle.zig |   4 +-
 src/main.zig                |   4 +-
 test/src/Cases.zig          |   2 +-
 11 files changed, 152 insertions(+), 144 deletions(-)
 create mode 100644 lib/std/io/tty.zig

(limited to 'test/src')

diff --git a/lib/build_runner.zig b/lib/build_runner.zig
index 7eec164871..a09ec2cf1f 100644
--- a/lib/build_runner.zig
+++ b/lib/build_runner.zig
@@ -333,7 +333,7 @@ const Run = struct {
 
     claimed_rss: usize,
     enable_summary: ?bool,
-    ttyconf: std.debug.TTY.Config,
+    ttyconf: std.io.tty.Config,
     stderr: std.fs.File,
 };
 
@@ -535,7 +535,7 @@ const PrintNode = struct {
     last: bool = false,
 };
 
-fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.debug.TTY.Config) !void {
+fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.io.tty.Config) !void {
     const parent = node.parent orelse return;
     if (parent.parent == null) return;
     try printPrefix(parent, stderr, ttyconf);
@@ -553,7 +553,7 @@ fn printTreeStep(
     b: *std.Build,
     s: *Step,
     stderr: std.fs.File,
-    ttyconf: std.debug.TTY.Config,
+    ttyconf: std.io.tty.Config,
     parent_node: *PrintNode,
     step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
 ) !void {
@@ -1026,15 +1026,15 @@ fn cleanExit() void {
 
 const Color = enum { auto, off, on };
 
-fn get_tty_conf(color: Color, stderr: std.fs.File) std.debug.TTY.Config {
+fn get_tty_conf(color: Color, stderr: std.fs.File) std.io.tty.Config {
     return switch (color) {
-        .auto => std.debug.detectTTYConfig(stderr),
+        .auto => std.io.tty.detectConfig(stderr),
         .on => .escape_codes,
         .off => .no_color,
     };
 }
 
-fn renderOptions(ttyconf: std.debug.TTY.Config) std.zig.ErrorBundle.RenderOptions {
+fn renderOptions(ttyconf: std.io.tty.Config) std.zig.ErrorBundle.RenderOptions {
     return .{
         .ttyconf = ttyconf,
         .include_source_line = ttyconf != .no_color,
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index b36e815f72..bb642b5e66 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1712,7 +1712,7 @@ fn dumpBadGetPathHelp(
         s.name,
     });
 
-    const tty_config = std.debug.detectTTYConfig(stderr);
+    const tty_config = std.io.tty.detectConfig(stderr);
     tty_config.setColor(w, .red) catch {};
     try stderr.writeAll("    The step was created by this stack trace:\n");
     tty_config.setColor(w, .reset) catch {};
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 40c88df2b9..a0d7a6a296 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -237,7 +237,7 @@ pub fn dump(step: *Step) void {
 
     const stderr = std.io.getStdErr();
     const w = stderr.writer();
-    const tty_config = std.debug.detectTTYConfig(stderr);
+    const tty_config = std.io.tty.detectConfig(stderr);
     const debug_info = std.debug.getSelfDebugInfo() catch |err| {
         w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
             @errorName(err),
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 56fab05d88..710aaefd5a 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -51,7 +51,7 @@ pub const StackTrace = struct {
         const debug_info = std.debug.getSelfDebugInfo() catch |err| {
             return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
         };
-        const tty_config = std.debug.detectTTYConfig(std.io.getStdErr());
+        const tty_config = std.io.tty.detectConfig(std.io.getStdErr());
         try writer.writeAll("\n");
         std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| {
             try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index d98cf8f27d..08407023d6 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -5,7 +5,6 @@ const mem = std.mem;
 const io = std.io;
 const os = std.os;
 const fs = std.fs;
-const process = std.process;
 const testing = std.testing;
 const elf = std.elf;
 const DW = std.dwarf;
@@ -109,31 +108,6 @@ pub fn getSelfDebugInfo() !*DebugInfo {
     }
 }
 
-pub fn detectTTYConfig(file: std.fs.File) TTY.Config {
-    if (builtin.os.tag == .wasi) {
-        // Per https://github.com/WebAssembly/WASI/issues/162 ANSI codes
-        // aren't currently supported.
-        return .no_color;
-    } else if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) {
-        return .escape_codes;
-    } else if (process.hasEnvVarConstant("NO_COLOR")) {
-        return .no_color;
-    } else if (file.supportsAnsiEscapeCodes()) {
-        return .escape_codes;
-    } else if (native_os == .windows and file.isTty()) {
-        var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
-        if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) {
-            // TODO: Should this return an error instead?
-            return .no_color;
-        }
-        return .{ .windows_api = .{
-            .handle = file.handle,
-            .reset_attributes = info.wAttributes,
-        } };
-    }
-    return .no_color;
-}
-
 /// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
 /// TODO multithreaded awareness
 pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
@@ -154,7 +128,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
             stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
             return;
         };
-        writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(io.getStdErr()), start_addr) catch |err| {
+        writeCurrentStackTrace(stderr, debug_info, io.tty.detectConfig(io.getStdErr()), start_addr) catch |err| {
             stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
             return;
         };
@@ -182,7 +156,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
             stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
             return;
         };
-        const tty_config = detectTTYConfig(io.getStdErr());
+        const tty_config = io.tty.detectConfig(io.getStdErr());
         if (native_os == .windows) {
             writeCurrentStackTraceWindows(stderr, debug_info, tty_config, ip) catch return;
             return;
@@ -265,7 +239,7 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
             stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
             return;
         };
-        writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig(io.getStdErr())) catch |err| {
+        writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, io.tty.detectConfig(io.getStdErr())) catch |err| {
             stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
             return;
         };
@@ -403,7 +377,7 @@ pub fn writeStackTrace(
     out_stream: anytype,
     allocator: mem.Allocator,
     debug_info: *DebugInfo,
-    tty_config: TTY.Config,
+    tty_config: io.tty.Config,
 ) !void {
     _ = allocator;
     if (builtin.strip_debug_info) return error.MissingDebugInfo;
@@ -562,7 +536,7 @@ pub const StackIterator = struct {
 pub fn writeCurrentStackTrace(
     out_stream: anytype,
     debug_info: *DebugInfo,
-    tty_config: TTY.Config,
+    tty_config: io.tty.Config,
     start_addr: ?usize,
 ) !void {
     if (native_os == .windows) {
@@ -634,7 +608,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize {
 pub fn writeCurrentStackTraceWindows(
     out_stream: anytype,
     debug_info: *DebugInfo,
-    tty_config: TTY.Config,
+    tty_config: io.tty.Config,
     start_addr: ?usize,
 ) !void {
     var addr_buf: [1024]usize = undefined;
@@ -651,95 +625,6 @@ pub fn writeCurrentStackTraceWindows(
     }
 }
 
-/// Provides simple functionality for manipulating the terminal in some way,
-/// for debugging purposes, such as coloring text, etc.
-pub const TTY = struct {
-    pub const Color = enum {
-        red,
-        green,
-        yellow,
-        cyan,
-        white,
-        dim,
-        bold,
-        reset,
-    };
-
-    pub const Config = union(enum) {
-        no_color,
-        escape_codes,
-        windows_api: if (native_os == .windows) WindowsContext else void,
-
-        pub const WindowsContext = struct {
-            handle: File.Handle,
-            reset_attributes: u16,
-        };
-
-        pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void {
-            nosuspend switch (conf) {
-                .no_color => return,
-                .escape_codes => {
-                    const color_string = switch (color) {
-                        .red => "\x1b[31;1m",
-                        .green => "\x1b[32;1m",
-                        .yellow => "\x1b[33;1m",
-                        .cyan => "\x1b[36;1m",
-                        .white => "\x1b[37;1m",
-                        .bold => "\x1b[1m",
-                        .dim => "\x1b[2m",
-                        .reset => "\x1b[0m",
-                    };
-                    try out_stream.writeAll(color_string);
-                },
-                .windows_api => |ctx| if (native_os == .windows) {
-                    const attributes = switch (color) {
-                        .red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
-                        .green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
-                        .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
-                        .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
-                        .white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
-                        .dim => windows.FOREGROUND_INTENSITY,
-                        .reset => ctx.reset_attributes,
-                    };
-                    try windows.SetConsoleTextAttribute(ctx.handle, attributes);
-                } else {
-                    unreachable;
-                },
-            };
-        }
-
-        pub fn writeDEC(conf: Config, writer: anytype, codepoint: u8) !void {
-            const bytes = switch (conf) {
-                .no_color, .windows_api => switch (codepoint) {
-                    0x50...0x5e => @as(*const [1]u8, &codepoint),
-                    0x6a => "+", // ┘
-                    0x6b => "+", // ┐
-                    0x6c => "+", // ┌
-                    0x6d => "+", // └
-                    0x6e => "+", // ┼
-                    0x71 => "-", // ─
-                    0x74 => "+", // ├
-                    0x75 => "+", // ┤
-                    0x76 => "+", // ┴
-                    0x77 => "+", // ┬
-                    0x78 => "|", // │
-                    else => " ", // TODO
-                },
-                .escape_codes => switch (codepoint) {
-                    // Here we avoid writing the DEC beginning sequence and
-                    // ending sequence in separate syscalls by putting the
-                    // beginning and ending sequence into the same string
-                    // literals, to prevent terminals ending up in bad states
-                    // in case a crash happens between syscalls.
-                    inline 0x50...0x7f => |x| "\x1B\x28\x30" ++ [1]u8{x} ++ "\x1B\x28\x42",
-                    else => unreachable,
-                },
-            };
-            return writer.writeAll(bytes);
-        }
-    };
-};
-
 fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol {
     var min: usize = 0;
     var max: usize = symbols.len - 1;
@@ -785,7 +670,7 @@ test "machoSearchSymbols" {
     try testing.expectEqual(&symbols[2], machoSearchSymbols(&symbols, 5000).?);
 }
 
-fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
+fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void {
     const module_name = debug_info.getModuleNameForAddress(address);
     return printLineInfo(
         out_stream,
@@ -798,7 +683,7 @@ fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usiz
     );
 }
 
-pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
+pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: io.tty.Config) !void {
     const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
         error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config),
         else => return err,
@@ -827,7 +712,7 @@ fn printLineInfo(
     address: usize,
     symbol_name: []const u8,
     compile_unit_name: []const u8,
-    tty_config: TTY.Config,
+    tty_config: io.tty.Config,
     comptime printLineFromFile: anytype,
 ) !void {
     nosuspend {
@@ -2193,7 +2078,7 @@ test "manage resources correctly" {
     const writer = std.io.null_writer;
     var di = try openSelfDebugInfo(testing.allocator);
     defer di.deinit();
-    try printSourceAtAddress(&di, writer, showMyTrace(), detectTTYConfig(std.io.getStdErr()));
+    try printSourceAtAddress(&di, writer, showMyTrace(), io.tty.detectConfig(std.io.getStdErr()));
 }
 
 noinline fn showMyTrace() usize {
@@ -2253,7 +2138,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
         pub fn dump(t: @This()) void {
             if (!enabled) return;
 
-            const tty_config = detectTTYConfig(std.io.getStdErr());
+            const tty_config = io.tty.detectConfig(std.io.getStdErr());
             const stderr = io.getStdErr().writer();
             const end = @min(t.index, size);
             const debug_info = getSelfDebugInfo() catch |err| {
diff --git a/lib/std/io.zig b/lib/std/io.zig
index d95997f853..f6d893c7dd 100644
--- a/lib/std/io.zig
+++ b/lib/std/io.zig
@@ -155,6 +155,8 @@ pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAt
 
 pub const StreamSource = @import("io/stream_source.zig").StreamSource;
 
+pub const tty = @import("io/tty.zig");
+
 /// A Writer that doesn't write to anything.
 pub const null_writer = @as(NullWriter, .{ .context = {} });
 
diff --git a/lib/std/io/tty.zig b/lib/std/io/tty.zig
new file mode 100644
index 0000000000..ea1c52db00
--- /dev/null
+++ b/lib/std/io/tty.zig
@@ -0,0 +1,121 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const File = std.fs.File;
+const process = std.process;
+const windows = std.os.windows;
+const native_os = builtin.os.tag;
+
+/// Detect suitable TTY configuration options for the given file (commonly stdout/stderr).
+/// This includes feature checks for ANSI escape codes and the Windows console API, as well as
+/// respecting the `NO_COLOR` environment variable.
+pub fn detectConfig(file: File) Config {
+    if (builtin.os.tag == .wasi) {
+        // Per https://github.com/WebAssembly/WASI/issues/162 ANSI codes
+        // aren't currently supported.
+        return .no_color;
+    } else if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) {
+        return .escape_codes;
+    } else if (process.hasEnvVarConstant("NO_COLOR")) {
+        return .no_color;
+    } else if (file.supportsAnsiEscapeCodes()) {
+        return .escape_codes;
+    } else if (native_os == .windows and file.isTty()) {
+        var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
+        if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) {
+            // TODO: Should this return an error instead?
+            return .no_color;
+        }
+        return .{ .windows_api = .{
+            .handle = file.handle,
+            .reset_attributes = info.wAttributes,
+        } };
+    }
+    return .no_color;
+}
+
+pub const Color = enum {
+    red,
+    green,
+    yellow,
+    cyan,
+    white,
+    dim,
+    bold,
+    reset,
+};
+
+/// Provides simple functionality for manipulating the terminal in some way,
+/// such as coloring text, etc.
+pub const Config = union(enum) {
+    no_color,
+    escape_codes,
+    windows_api: if (native_os == .windows) WindowsContext else void,
+
+    pub const WindowsContext = struct {
+        handle: File.Handle,
+        reset_attributes: u16,
+    };
+
+    pub fn setColor(conf: Config, out_stream: anytype, color: Color) !void {
+        nosuspend switch (conf) {
+            .no_color => return,
+            .escape_codes => {
+                const color_string = switch (color) {
+                    .red => "\x1b[31;1m",
+                    .green => "\x1b[32;1m",
+                    .yellow => "\x1b[33;1m",
+                    .cyan => "\x1b[36;1m",
+                    .white => "\x1b[37;1m",
+                    .bold => "\x1b[1m",
+                    .dim => "\x1b[2m",
+                    .reset => "\x1b[0m",
+                };
+                try out_stream.writeAll(color_string);
+            },
+            .windows_api => |ctx| if (native_os == .windows) {
+                const attributes = switch (color) {
+                    .red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
+                    .green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
+                    .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
+                    .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+                    .white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+                    .dim => windows.FOREGROUND_INTENSITY,
+                    .reset => ctx.reset_attributes,
+                };
+                try windows.SetConsoleTextAttribute(ctx.handle, attributes);
+            } else {
+                unreachable;
+            },
+        };
+    }
+
+    pub fn writeDEC(conf: Config, writer: anytype, codepoint: u8) !void {
+        const bytes = switch (conf) {
+            .no_color, .windows_api => switch (codepoint) {
+                0x50...0x5e => @as(*const [1]u8, &codepoint),
+                0x6a => "+", // ┘
+                0x6b => "+", // ┐
+                0x6c => "+", // ┌
+                0x6d => "+", // └
+                0x6e => "+", // ┼
+                0x71 => "-", // ─
+                0x74 => "+", // ├
+                0x75 => "+", // ┤
+                0x76 => "+", // ┴
+                0x77 => "+", // ┬
+                0x78 => "|", // │
+                else => " ", // TODO
+            },
+            .escape_codes => switch (codepoint) {
+                // Here we avoid writing the DEC beginning sequence and
+                // ending sequence in separate syscalls by putting the
+                // beginning and ending sequence into the same string
+                // literals, to prevent terminals ending up in bad states
+                // in case a crash happens between syscalls.
+                inline 0x50...0x7f => |x| "\x1B\x28\x30" ++ [1]u8{x} ++ "\x1B\x28\x42",
+                else => unreachable,
+            },
+        };
+        return writer.writeAll(bytes);
+    }
+};
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 8576ec0c83..7986c50eaf 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -279,7 +279,7 @@ test "expectApproxEqRel" {
 /// This function is intended to be used only in tests. When the two slices are not
 /// equal, prints diagnostics to stderr to show exactly how they are not equal (with
 /// the differences highlighted in red), then returns a test failure error.
-/// The colorized output is optional and controlled by the return of `std.debug.detectTTYConfig()`.
+/// The colorized output is optional and controlled by the return of `std.io.tty.detectConfig()`.
 /// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead.
 pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void {
     if (expected.ptr == actual.ptr and expected.len == actual.len) {
@@ -312,7 +312,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
     const actual_window = actual[window_start..@min(actual.len, window_start + max_window_size)];
     const actual_truncated = window_start + actual_window.len < actual.len;
 
-    const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
+    const ttyconf = std.io.tty.detectConfig(std.io.getStdErr());
     var differ = if (T == u8) BytesDiffer{
         .expected = expected_window,
         .actual = actual_window,
@@ -379,7 +379,7 @@ fn SliceDiffer(comptime T: type) type {
         start_index: usize,
         expected: []const T,
         actual: []const T,
-        ttyconf: std.debug.TTY.Config,
+        ttyconf: std.io.tty.Config,
 
         const Self = @This();
 
@@ -398,7 +398,7 @@ fn SliceDiffer(comptime T: type) type {
 const BytesDiffer = struct {
     expected: []const u8,
     actual: []const u8,
-    ttyconf: std.debug.TTY.Config,
+    ttyconf: std.io.tty.Config,
 
     pub fn write(self: BytesDiffer, writer: anytype) !void {
         var expected_iterator = ChunkIterator{ .bytes = self.expected };
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
index f74d82273a..46b5799807 100644
--- a/lib/std/zig/ErrorBundle.zig
+++ b/lib/std/zig/ErrorBundle.zig
@@ -148,7 +148,7 @@ pub fn nullTerminatedString(eb: ErrorBundle, index: usize) [:0]const u8 {
 }
 
 pub const RenderOptions = struct {
-    ttyconf: std.debug.TTY.Config,
+    ttyconf: std.io.tty.Config,
     include_reference_trace: bool = true,
     include_source_line: bool = true,
     include_log_text: bool = true,
@@ -181,7 +181,7 @@ fn renderErrorMessageToWriter(
     err_msg_index: MessageIndex,
     stderr: anytype,
     kind: []const u8,
-    color: std.debug.TTY.Color,
+    color: std.io.tty.Color,
     indent: usize,
 ) anyerror!void {
     const ttyconf = options.ttyconf;
diff --git a/src/main.zig b/src/main.zig
index 650741e5e4..afda88cebd 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -6044,9 +6044,9 @@ const ClangSearchSanitizer = struct {
     };
 };
 
-fn get_tty_conf(color: Color) std.debug.TTY.Config {
+fn get_tty_conf(color: Color) std.io.tty.Config {
     return switch (color) {
-        .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+        .auto => std.io.tty.detectConfig(std.io.getStdErr()),
         .on => .escape_codes,
         .off => .no_color,
     };
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index 63dd2fd3da..08568d0dd6 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -1354,7 +1354,7 @@ fn runOneCase(
             defer all_errors.deinit(allocator);
             if (all_errors.errorMessageCount() > 0) {
                 all_errors.renderToStdErr(.{
-                    .ttyconf = std.debug.detectTTYConfig(std.io.getStdErr()),
+                    .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()),
                 });
                 // TODO print generated C code
                 return error.UnexpectedCompileErrors;
-- 
cgit v1.2.3


From 5744ceedb8ea4b3e5906175033f634b17287f3ca Mon Sep 17 00:00:00 2001
From: Mason Remaley 
Date: Wed, 24 May 2023 14:26:07 -0700
Subject: Fixes `WriteFile.getFileSource` failure on Windows (#15730)

---
 lib/std/Build.zig                |  2 +-
 lib/std/Build/Step/WriteFile.zig | 23 +++++++++--------------
 test/src/Cases.zig               |  8 +++++---
 test/src/CompareOutput.zig       | 10 ++++------
 test/src/StackTrace.zig          |  5 ++---
 test/src/run_translated_c.zig    |  4 ++--
 test/src/translate_c.zig         |  4 ++--
 test/tests.zig                   |  2 +-
 8 files changed, 26 insertions(+), 32 deletions(-)

(limited to 'test/src')

diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index bb642b5e66..bf0c74bd64 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -759,7 +759,7 @@ pub fn dupePath(self: *Build, bytes: []const u8) []u8 {
 
 pub fn addWriteFile(self: *Build, file_path: []const u8, data: []const u8) *Step.WriteFile {
     const write_file_step = self.addWriteFiles();
-    write_file_step.add(file_path, data);
+    _ = write_file_step.add(file_path, data);
     return write_file_step;
 }
 
diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig
index 0d817e7430..0448aa8d2a 100644
--- a/lib/std/Build/Step/WriteFile.zig
+++ b/lib/std/Build/Step/WriteFile.zig
@@ -27,6 +27,10 @@ pub const File = struct {
     generated_file: std.Build.GeneratedFile,
     sub_path: []const u8,
     contents: Contents,
+
+    pub fn getFileSource(self: *File) std.Build.FileSource {
+        return .{ .generated = &self.generated_file };
+    }
 };
 
 pub const OutputSourceFile = struct {
@@ -55,7 +59,7 @@ pub fn create(owner: *std.Build) *WriteFile {
     return wf;
 }
 
-pub fn add(wf: *WriteFile, sub_path: []const u8, bytes: []const u8) void {
+pub fn add(wf: *WriteFile, sub_path: []const u8, bytes: []const u8) std.Build.FileSource {
     const b = wf.step.owner;
     const gpa = b.allocator;
     const file = gpa.create(File) catch @panic("OOM");
@@ -65,8 +69,8 @@ pub fn add(wf: *WriteFile, sub_path: []const u8, bytes: []const u8) void {
         .contents = .{ .bytes = b.dupe(bytes) },
     };
     wf.files.append(gpa, file) catch @panic("OOM");
-
     wf.maybeUpdateName();
+    return file.getFileSource();
 }
 
 /// Place the file into the generated directory within the local cache,
@@ -76,7 +80,7 @@ pub fn add(wf: *WriteFile, sub_path: []const u8, bytes: []const u8) void {
 /// include sub-directories, in which case this step will ensure the
 /// required sub-path exists.
 /// This is the option expected to be used most commonly with `addCopyFile`.
-pub fn addCopyFile(wf: *WriteFile, source: std.Build.FileSource, sub_path: []const u8) void {
+pub fn addCopyFile(wf: *WriteFile, source: std.Build.FileSource, sub_path: []const u8) std.Build.FileSource {
     const b = wf.step.owner;
     const gpa = b.allocator;
     const file = gpa.create(File) catch @panic("OOM");
@@ -89,6 +93,7 @@ pub fn addCopyFile(wf: *WriteFile, source: std.Build.FileSource, sub_path: []con
 
     wf.maybeUpdateName();
     source.addStepDependencies(&wf.step);
+    return file.getFileSource();
 }
 
 /// A path relative to the package root.
@@ -96,7 +101,6 @@ pub fn addCopyFile(wf: *WriteFile, source: std.Build.FileSource, sub_path: []con
 /// used as part of the normal build process, but as a utility occasionally
 /// run by a developer with intent to modify source files and then commit
 /// those changes to version control.
-/// A file added this way is not available with `getFileSource`.
 pub fn addCopyFileToSource(wf: *WriteFile, source: std.Build.FileSource, sub_path: []const u8) void {
     const b = wf.step.owner;
     wf.output_source_files.append(b.allocator, .{
@@ -111,7 +115,6 @@ pub fn addCopyFileToSource(wf: *WriteFile, source: std.Build.FileSource, sub_pat
 /// used as part of the normal build process, but as a utility occasionally
 /// run by a developer with intent to modify source files and then commit
 /// those changes to version control.
-/// A file added this way is not available with `getFileSource`.
 pub fn addBytesToSource(wf: *WriteFile, bytes: []const u8, sub_path: []const u8) void {
     const b = wf.step.owner;
     wf.output_source_files.append(b.allocator, .{
@@ -120,15 +123,7 @@ pub fn addBytesToSource(wf: *WriteFile, bytes: []const u8, sub_path: []const u8)
     }) catch @panic("OOM");
 }
 
-/// Gets a file source for the given sub_path. If the file does not exist, returns `null`.
-pub fn getFileSource(wf: *WriteFile, sub_path: []const u8) ?std.Build.FileSource {
-    for (wf.files.items) |file| {
-        if (std.mem.eql(u8, file.sub_path, sub_path)) {
-            return .{ .generated = &file.generated_file };
-        }
-    }
-    return null;
-}
+pub const getFileSource = @compileError("Deprecated; use the return value from add()/addCopyFile(), or use files[i].getFileSource()");
 
 /// Returns a `FileSource` representing the base directory that contains all the
 /// files from this `WriteFile`.
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index 08568d0dd6..e4f3d532ce 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -494,10 +494,12 @@ pub fn lowerToBuildSteps(
         }
 
         const writefiles = b.addWriteFiles();
+        var file_sources = std.StringHashMap(std.Build.FileSource).init(b.allocator);
+        defer file_sources.deinit();
         for (update.files.items) |file| {
-            writefiles.add(file.path, file.src);
+            file_sources.put(file.path, writefiles.add(file.path, file.src)) catch @panic("OOM");
         }
-        const root_source_file = writefiles.getFileSource(update.files.items[0].path).?;
+        const root_source_file = writefiles.files.items[0].getFileSource();
 
         const artifact = if (case.is_test) b.addTest(.{
             .root_source_file = root_source_file,
@@ -540,7 +542,7 @@ pub fn lowerToBuildSteps(
 
         for (case.deps.items) |dep| {
             artifact.addAnonymousModule(dep.name, .{
-                .source_file = writefiles.getFileSource(dep.path).?,
+                .source_file = file_sources.get(dep.path).?,
             });
         }
 
diff --git a/test/src/CompareOutput.zig b/test/src/CompareOutput.zig
index fb89082def..d2f7a23089 100644
--- a/test/src/CompareOutput.zig
+++ b/test/src/CompareOutput.zig
@@ -82,7 +82,7 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void {
 
     const write_src = b.addWriteFiles();
     for (case.sources.items) |src_file| {
-        write_src.add(src_file.filename, src_file.source);
+        _ = write_src.add(src_file.filename, src_file.source);
     }
 
     switch (case.special) {
@@ -99,7 +99,7 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void {
                 .target = .{},
                 .optimize = .Debug,
             });
-            exe.addAssemblyFileSource(write_src.getFileSource(case.sources.items[0].filename).?);
+            exe.addAssemblyFileSource(write_src.files.items[0].getFileSource());
 
             const run = b.addRunArtifact(exe);
             run.setName(annotated_case_name);
@@ -117,10 +117,9 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void {
                     if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
                 }
 
-                const basename = case.sources.items[0].filename;
                 const exe = b.addExecutable(.{
                     .name = "test",
-                    .root_source_file = write_src.getFileSource(basename).?,
+                    .root_source_file = write_src.files.items[0].getFileSource(),
                     .optimize = optimize,
                     .target = .{},
                 });
@@ -144,10 +143,9 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void {
                 if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
             }
 
-            const basename = case.sources.items[0].filename;
             const exe = b.addExecutable(.{
                 .name = "test",
-                .root_source_file = write_src.getFileSource(basename).?,
+                .root_source_file = write_src.files.items[0].getFileSource(),
                 .target = .{},
                 .optimize = .Debug,
             });
diff --git a/test/src/StackTrace.zig b/test/src/StackTrace.zig
index 0d0b7155e6..0552b419c0 100644
--- a/test/src/StackTrace.zig
+++ b/test/src/StackTrace.zig
@@ -72,11 +72,10 @@ fn addExpect(
         if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
     }
 
-    const src_basename = "source.zig";
-    const write_src = b.addWriteFile(src_basename, source);
+    const write_src = b.addWriteFile("source.zig", source);
     const exe = b.addExecutable(.{
         .name = "test",
-        .root_source_file = write_src.getFileSource(src_basename).?,
+        .root_source_file = write_src.files.items[0].getFileSource(),
         .optimize = optimize_mode,
         .target = .{},
     });
diff --git a/test/src/run_translated_c.zig b/test/src/run_translated_c.zig
index 946e56f512..42db8b19c5 100644
--- a/test/src/run_translated_c.zig
+++ b/test/src/run_translated_c.zig
@@ -82,10 +82,10 @@ pub const RunTranslatedCContext = struct {
 
         const write_src = b.addWriteFiles();
         for (case.sources.items) |src_file| {
-            write_src.add(src_file.filename, src_file.source);
+            _ = write_src.add(src_file.filename, src_file.source);
         }
         const translate_c = b.addTranslateC(.{
-            .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+            .source_file = write_src.files.items[0].getFileSource(),
             .target = .{},
             .optimize = .Debug,
         });
diff --git a/test/src/translate_c.zig b/test/src/translate_c.zig
index e275ee57ee..a1f312d623 100644
--- a/test/src/translate_c.zig
+++ b/test/src/translate_c.zig
@@ -104,11 +104,11 @@ pub const TranslateCContext = struct {
 
         const write_src = b.addWriteFiles();
         for (case.sources.items) |src_file| {
-            write_src.add(src_file.filename, src_file.source);
+            _ = write_src.add(src_file.filename, src_file.source);
         }
 
         const translate_c = b.addTranslateC(.{
-            .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+            .source_file = write_src.files.items[0].getFileSource(),
             .target = case.target,
             .optimize = .Debug,
         });
diff --git a/test/tests.zig b/test/tests.zig
index 641914aabe..5912ceb907 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -759,7 +759,7 @@ pub fn addCliTests(b: *std.Build) *Step {
             "-fno-emit-bin", "-fno-emit-h",
             "-fstrip",       "-OReleaseFast",
         });
-        run.addFileSourceArg(writefile.getFileSource("example.zig").?);
+        run.addFileSourceArg(writefile.files.items[0].getFileSource());
         const example_s = run.addPrefixedOutputFileArg("-femit-asm=", "example.s");
 
         const checkfile = b.addCheckFile(example_s, .{
-- 
cgit v1.2.3


From 4159add4abe92e903d8797a005344d8325def2fc Mon Sep 17 00:00:00 2001
From: Linus Groh 
Date: Sat, 20 May 2023 23:06:42 +0100
Subject: std.fs.file: Rename File.Kind enum values to snake case

---
 lib/std/Build/Step/InstallDir.zig     |   4 +-
 lib/std/crypto/Certificate/Bundle.zig |   2 +-
 lib/std/fs.zig                        | 132 +++++++++++++++++-----------------
 lib/std/fs/file.zig                   | 120 +++++++++++++++----------------
 lib/std/fs/test.zig                   |  20 +++---
 src/Package.zig                       |   4 +-
 src/main.zig                          |   6 +-
 test/src/Cases.zig                    |   4 +-
 tools/process_headers.zig             |   4 +-
 tools/update-linux-headers.zig        |   4 +-
 tools/update_glibc.zig                |   4 +-
 tools/update_spirv_features.zig       |   2 +-
 12 files changed, 153 insertions(+), 153 deletions(-)

(limited to 'test/src')

diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig
index 28280dcb7f..597601ce4e 100644
--- a/lib/std/Build/Step/InstallDir.zig
+++ b/lib/std/Build/Step/InstallDir.zig
@@ -80,8 +80,8 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
         const cwd = fs.cwd();
 
         switch (entry.kind) {
-            .Directory => try cwd.makePath(dest_path),
-            .File => {
+            .directory => try cwd.makePath(dest_path),
+            .file => {
                 for (self.options.blank_extensions) |ext| {
                     if (mem.endsWith(u8, entry.path, ext)) {
                         try dest_builder.truncateFile(dest_path);
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
index fe1ef4c0c3..434de6e0a8 100644
--- a/lib/std/crypto/Certificate/Bundle.zig
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -169,7 +169,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir
     var it = iterable_dir.iterate();
     while (try it.next()) |entry| {
         switch (entry.kind) {
-            .File, .SymLink => {},
+            .file, .sym_link => {},
             else => continue,
         }
 
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 041e49d549..496dbf5f0a 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -385,16 +385,16 @@ pub const IterableDir = struct {
                         continue :start_over;
                     }
 
-                    const entry_kind = switch (darwin_entry.d_type) {
-                        os.DT.BLK => Entry.Kind.BlockDevice,
-                        os.DT.CHR => Entry.Kind.CharacterDevice,
-                        os.DT.DIR => Entry.Kind.Directory,
-                        os.DT.FIFO => Entry.Kind.NamedPipe,
-                        os.DT.LNK => Entry.Kind.SymLink,
-                        os.DT.REG => Entry.Kind.File,
-                        os.DT.SOCK => Entry.Kind.UnixDomainSocket,
-                        os.DT.WHT => Entry.Kind.Whiteout,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (darwin_entry.d_type) {
+                        os.DT.BLK => .block_device,
+                        os.DT.CHR => .character_device,
+                        os.DT.DIR => .directory,
+                        os.DT.FIFO => .named_pipe,
+                        os.DT.LNK => .sym_link,
+                        os.DT.REG => .file,
+                        os.DT.SOCK => .unix_domain_socket,
+                        os.DT.WHT => .whiteout,
+                        else => .unknown,
                     };
                     return Entry{
                         .name = name,
@@ -442,17 +442,17 @@ pub const IterableDir = struct {
                         error.FileNotFound => unreachable, // lost the race
                         else => |e| return e,
                     };
-                    const entry_kind = switch (stat_info.mode & os.S.IFMT) {
-                        os.S.IFIFO => Entry.Kind.NamedPipe,
-                        os.S.IFCHR => Entry.Kind.CharacterDevice,
-                        os.S.IFDIR => Entry.Kind.Directory,
-                        os.S.IFBLK => Entry.Kind.BlockDevice,
-                        os.S.IFREG => Entry.Kind.File,
-                        os.S.IFLNK => Entry.Kind.SymLink,
-                        os.S.IFSOCK => Entry.Kind.UnixDomainSocket,
-                        os.S.IFDOOR => Entry.Kind.Door,
-                        os.S.IFPORT => Entry.Kind.EventPort,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (stat_info.mode & os.S.IFMT) {
+                        os.S.IFIFO => .named_pipe,
+                        os.S.IFCHR => .character_device,
+                        os.S.IFDIR => .directory,
+                        os.S.IFBLK => .block_device,
+                        os.S.IFREG => .file,
+                        os.S.IFLNK => .sym_link,
+                        os.S.IFSOCK => .unix_domain_socket,
+                        os.S.IFDOOR => .door,
+                        os.S.IFPORT => .event_port,
+                        else => .unknown,
                     };
                     return Entry{
                         .name = name,
@@ -501,16 +501,16 @@ pub const IterableDir = struct {
                         continue :start_over;
                     }
 
-                    const entry_kind = switch (bsd_entry.d_type) {
-                        os.DT.BLK => Entry.Kind.BlockDevice,
-                        os.DT.CHR => Entry.Kind.CharacterDevice,
-                        os.DT.DIR => Entry.Kind.Directory,
-                        os.DT.FIFO => Entry.Kind.NamedPipe,
-                        os.DT.LNK => Entry.Kind.SymLink,
-                        os.DT.REG => Entry.Kind.File,
-                        os.DT.SOCK => Entry.Kind.UnixDomainSocket,
-                        os.DT.WHT => Entry.Kind.Whiteout,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (bsd_entry.d_type) {
+                        os.DT.BLK => .block_device,
+                        os.DT.CHR => .character_device,
+                        os.DT.DIR => .directory,
+                        os.DT.FIFO => .named_pipe,
+                        os.DT.LNK => .sym_link,
+                        os.DT.REG => .file,
+                        os.DT.SOCK => .unix_domain_socket,
+                        os.DT.WHT => .whiteout,
+                        else => .unknown,
                     };
                     return Entry{
                         .name = name,
@@ -595,14 +595,14 @@ pub const IterableDir = struct {
                     }
                     const statmode = stat_info.mode & os.S.IFMT;
 
-                    const entry_kind = switch (statmode) {
-                        os.S.IFDIR => Entry.Kind.Directory,
-                        os.S.IFBLK => Entry.Kind.BlockDevice,
-                        os.S.IFCHR => Entry.Kind.CharacterDevice,
-                        os.S.IFLNK => Entry.Kind.SymLink,
-                        os.S.IFREG => Entry.Kind.File,
-                        os.S.IFIFO => Entry.Kind.NamedPipe,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (statmode) {
+                        os.S.IFDIR => .directory,
+                        os.S.IFBLK => .block_device,
+                        os.S.IFCHR => .character_device,
+                        os.S.IFLNK => .sym_link,
+                        os.S.IFREG => .file,
+                        os.S.IFIFO => .named_pipe,
+                        else => .unknown,
                     };
 
                     return Entry{
@@ -679,15 +679,15 @@ pub const IterableDir = struct {
                         continue :start_over;
                     }
 
-                    const entry_kind = switch (linux_entry.d_type) {
-                        linux.DT.BLK => Entry.Kind.BlockDevice,
-                        linux.DT.CHR => Entry.Kind.CharacterDevice,
-                        linux.DT.DIR => Entry.Kind.Directory,
-                        linux.DT.FIFO => Entry.Kind.NamedPipe,
-                        linux.DT.LNK => Entry.Kind.SymLink,
-                        linux.DT.REG => Entry.Kind.File,
-                        linux.DT.SOCK => Entry.Kind.UnixDomainSocket,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (linux_entry.d_type) {
+                        linux.DT.BLK => .block_device,
+                        linux.DT.CHR => .character_device,
+                        linux.DT.DIR => .directory,
+                        linux.DT.FIFO => .named_pipe,
+                        linux.DT.LNK => .sym_link,
+                        linux.DT.REG => .file,
+                        linux.DT.SOCK => .unix_domain_socket,
+                        else => .unknown,
                     };
                     return Entry{
                         .name = name,
@@ -761,11 +761,11 @@ pub const IterableDir = struct {
                     // Trust that Windows gives us valid UTF-16LE
                     const name_utf8_len = std.unicode.utf16leToUtf8(self.name_data[0..], name_utf16le) catch unreachable;
                     const name_utf8 = self.name_data[0..name_utf8_len];
-                    const kind = blk: {
+                    const kind: Entry.Kind = blk: {
                         const attrs = dir_info.FileAttributes;
-                        if (attrs & w.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory;
-                        if (attrs & w.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink;
-                        break :blk Entry.Kind.File;
+                        if (attrs & w.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk .directory;
+                        if (attrs & w.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk .sym_link;
+                        break :blk .file;
                     };
                     return Entry{
                         .name = name_utf8,
@@ -850,14 +850,14 @@ pub const IterableDir = struct {
                         continue :start_over;
                     }
 
-                    const entry_kind = switch (entry.d_type) {
-                        .BLOCK_DEVICE => Entry.Kind.BlockDevice,
-                        .CHARACTER_DEVICE => Entry.Kind.CharacterDevice,
-                        .DIRECTORY => Entry.Kind.Directory,
-                        .SYMBOLIC_LINK => Entry.Kind.SymLink,
-                        .REGULAR_FILE => Entry.Kind.File,
-                        .SOCKET_STREAM, .SOCKET_DGRAM => Entry.Kind.UnixDomainSocket,
-                        else => Entry.Kind.Unknown,
+                    const entry_kind: Entry.Kind = switch (entry.d_type) {
+                        .BLOCK_DEVICE => .block_device,
+                        .CHARACTER_DEVICE => .character_device,
+                        .DIRECTORY => .directory,
+                        .SYMBOLIC_LINK => .sym_link,
+                        .REGULAR_FILE => .file,
+                        .SOCKET_STREAM, .SOCKET_DGRAM => .unix_domain_socket,
+                        else => .unknown,
                     };
                     return Entry{
                         .name = name,
@@ -964,7 +964,7 @@ pub const IterableDir = struct {
                         dirname_len += 1;
                     }
                     try self.name_buffer.appendSlice(base.name);
-                    if (base.kind == .Directory) {
+                    if (base.kind == .directory) {
                         var new_dir = top.iter.dir.openIterableDir(base.name, .{}) catch |err| switch (err) {
                             error.NameTooLong => unreachable, // no path sep in base.name
                             else => |e| return e,
@@ -2106,7 +2106,7 @@ pub const Dir = struct {
     /// this function recursively removes its entries and then tries again.
     /// This operation is not atomic on most file systems.
     pub fn deleteTree(self: Dir, sub_path: []const u8) DeleteTreeError!void {
-        var initial_iterable_dir = (try self.deleteTreeOpenInitialSubpath(sub_path, .File)) orelse return;
+        var initial_iterable_dir = (try self.deleteTreeOpenInitialSubpath(sub_path, .file)) orelse return;
 
         const StackItem = struct {
             name: []const u8,
@@ -2130,7 +2130,7 @@ pub const Dir = struct {
         process_stack: while (stack.len != 0) {
             var top = &(stack.slice()[stack.len - 1]);
             while (try top.iter.next()) |entry| {
-                var treat_as_dir = entry.kind == .Directory;
+                var treat_as_dir = entry.kind == .directory;
                 handle_entry: while (true) {
                     if (treat_as_dir) {
                         if (stack.ensureUnusedCapacity(1)) {
@@ -2291,7 +2291,7 @@ pub const Dir = struct {
     /// Like `deleteTree`, but only keeps one `Iterator` active at a time to minimize the function's stack size.
     /// This is slower than `deleteTree` but uses less stack space.
     pub fn deleteTreeMinStackSize(self: Dir, sub_path: []const u8) DeleteTreeError!void {
-        return self.deleteTreeMinStackSizeWithKindHint(sub_path, .File);
+        return self.deleteTreeMinStackSizeWithKindHint(sub_path, .file);
     }
 
     fn deleteTreeMinStackSizeWithKindHint(self: Dir, sub_path: []const u8, kind_hint: File.Kind) DeleteTreeError!void {
@@ -2316,7 +2316,7 @@ pub const Dir = struct {
             scan_dir: while (true) {
                 var dir_it = iterable_dir.iterateAssumeFirstIteration();
                 dir_it: while (try dir_it.next()) |entry| {
-                    var treat_as_dir = entry.kind == .Directory;
+                    var treat_as_dir = entry.kind == .directory;
                     handle_entry: while (true) {
                         if (treat_as_dir) {
                             const new_dir = iterable_dir.dir.openIterableDir(entry.name, .{ .no_follow = true }) catch |err| switch (err) {
@@ -2407,7 +2407,7 @@ pub const Dir = struct {
     fn deleteTreeOpenInitialSubpath(self: Dir, sub_path: []const u8, kind_hint: File.Kind) !?IterableDir {
         return iterable_dir: {
             // Treat as a file by default
-            var treat_as_dir = kind_hint == .Directory;
+            var treat_as_dir = kind_hint == .directory;
 
             handle_entry: while (true) {
                 if (treat_as_dir) {
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 23021a26f5..83db10ef32 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -35,17 +35,17 @@ pub const File = struct {
     pub const Gid = os.gid_t;
 
     pub const Kind = enum {
-        BlockDevice,
-        CharacterDevice,
-        Directory,
-        NamedPipe,
-        SymLink,
-        File,
-        UnixDomainSocket,
-        Whiteout,
-        Door,
-        EventPort,
-        Unknown,
+        block_device,
+        character_device,
+        directory,
+        named_pipe,
+        sym_link,
+        file,
+        unix_domain_socket,
+        whiteout,
+        door,
+        event_port,
+        unknown,
     };
 
     /// This is the default mode given to POSIX operating systems for creating
@@ -329,32 +329,32 @@ pub const File = struct {
             const mtime = st.mtime();
             const ctime = st.ctime();
             const kind: Kind = if (builtin.os.tag == .wasi and !builtin.link_libc) switch (st.filetype) {
-                .BLOCK_DEVICE => Kind.BlockDevice,
-                .CHARACTER_DEVICE => Kind.CharacterDevice,
-                .DIRECTORY => Kind.Directory,
-                .SYMBOLIC_LINK => Kind.SymLink,
-                .REGULAR_FILE => Kind.File,
-                .SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
-                else => Kind.Unknown,
+                .BLOCK_DEVICE => .block_device,
+                .CHARACTER_DEVICE => .character_device,
+                .DIRECTORY => .directory,
+                .SYMBOLIC_LINK => .sym_link,
+                .REGULAR_FILE => .file,
+                .SOCKET_STREAM, .SOCKET_DGRAM => .unix_domain_socket,
+                else => .unknown,
             } else blk: {
                 const m = st.mode & os.S.IFMT;
                 switch (m) {
-                    os.S.IFBLK => break :blk Kind.BlockDevice,
-                    os.S.IFCHR => break :blk Kind.CharacterDevice,
-                    os.S.IFDIR => break :blk Kind.Directory,
-                    os.S.IFIFO => break :blk Kind.NamedPipe,
-                    os.S.IFLNK => break :blk Kind.SymLink,
-                    os.S.IFREG => break :blk Kind.File,
-                    os.S.IFSOCK => break :blk Kind.UnixDomainSocket,
+                    os.S.IFBLK => break :blk .block_device,
+                    os.S.IFCHR => break :blk .character_device,
+                    os.S.IFDIR => break :blk .directory,
+                    os.S.IFIFO => break :blk .named_pipe,
+                    os.S.IFLNK => break :blk .sym_link,
+                    os.S.IFREG => break :blk .file,
+                    os.S.IFSOCK => break :blk .unix_domain_socket,
                     else => {},
                 }
                 if (builtin.os.tag == .solaris) switch (m) {
-                    os.S.IFDOOR => break :blk Kind.Door,
-                    os.S.IFPORT => break :blk Kind.EventPort,
+                    os.S.IFDOOR => break :blk .door,
+                    os.S.IFPORT => break :blk .event_port,
                     else => {},
                 };
 
-                break :blk .Unknown;
+                break :blk .unknown;
             };
 
             return Stat{
@@ -391,7 +391,7 @@ pub const File = struct {
                 .inode = info.InternalInformation.IndexNumber,
                 .size = @bitCast(u64, info.StandardInformation.EndOfFile),
                 .mode = 0,
-                .kind = if (info.StandardInformation.Directory == 0) .File else .Directory,
+                .kind = if (info.StandardInformation.Directory == 0) .file else .directory,
                 .atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
                 .mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
                 .ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
@@ -609,7 +609,7 @@ pub const File = struct {
         }
 
         /// Returns the `Kind` of file.
-        /// On Windows, can only return: `.File`, `.Directory`, `.SymLink` or `.Unknown`
+        /// On Windows, can only return: `.file`, `.directory`, `.sym_link` or `.unknown`
         pub fn kind(self: Self) Kind {
             return self.inner.kind();
         }
@@ -652,35 +652,35 @@ pub const File = struct {
         /// Returns the `Kind` of the file
         pub fn kind(self: Self) Kind {
             if (builtin.os.tag == .wasi and !builtin.link_libc) return switch (self.stat.filetype) {
-                .BLOCK_DEVICE => Kind.BlockDevice,
-                .CHARACTER_DEVICE => Kind.CharacterDevice,
-                .DIRECTORY => Kind.Directory,
-                .SYMBOLIC_LINK => Kind.SymLink,
-                .REGULAR_FILE => Kind.File,
-                .SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
-                else => Kind.Unknown,
+                .BLOCK_DEVICE => .block_device,
+                .CHARACTER_DEVICE => .character_device,
+                .DIRECTORY => .directory,
+                .SYMBOLIC_LINK => .sym_link,
+                .REGULAR_FILE => .file,
+                .SOCKET_STREAM, .SOCKET_DGRAM => .unix_domain_socket,
+                else => .unknown,
             };
 
             const m = self.stat.mode & os.S.IFMT;
 
             switch (m) {
-                os.S.IFBLK => return Kind.BlockDevice,
-                os.S.IFCHR => return Kind.CharacterDevice,
-                os.S.IFDIR => return Kind.Directory,
-                os.S.IFIFO => return Kind.NamedPipe,
-                os.S.IFLNK => return Kind.SymLink,
-                os.S.IFREG => return Kind.File,
-                os.S.IFSOCK => return Kind.UnixDomainSocket,
+                os.S.IFBLK => return .block_device,
+                os.S.IFCHR => return .character_device,
+                os.S.IFDIR => return .directory,
+                os.S.IFIFO => return .named_pipe,
+                os.S.IFLNK => return .sym_link,
+                os.S.IFREG => return .file,
+                os.S.IFSOCK => return .unix_domain_socket,
                 else => {},
             }
 
             if (builtin.os.tag == .solaris) switch (m) {
-                os.S.IFDOOR => return Kind.Door,
-                os.S.IFPORT => return Kind.EventPort,
+                os.S.IFDOOR => return .door,
+                os.S.IFPORT => return .event_port,
                 else => {},
             };
 
-            return .Unknown;
+            return .unknown;
         }
 
         /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
@@ -738,17 +738,17 @@ pub const File = struct {
             const m = self.statx.mode & os.S.IFMT;
 
             switch (m) {
-                os.S.IFBLK => return Kind.BlockDevice,
-                os.S.IFCHR => return Kind.CharacterDevice,
-                os.S.IFDIR => return Kind.Directory,
-                os.S.IFIFO => return Kind.NamedPipe,
-                os.S.IFLNK => return Kind.SymLink,
-                os.S.IFREG => return Kind.File,
-                os.S.IFSOCK => return Kind.UnixDomainSocket,
+                os.S.IFBLK => return .block_device,
+                os.S.IFCHR => return .character_device,
+                os.S.IFDIR => return .directory,
+                os.S.IFIFO => return .named_pipe,
+                os.S.IFLNK => return .sym_link,
+                os.S.IFREG => return .file,
+                os.S.IFSOCK => return .unix_domain_socket,
                 else => {},
             }
 
-            return .Unknown;
+            return .unknown;
         }
 
         /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
@@ -790,18 +790,18 @@ pub const File = struct {
         }
 
         /// Returns the `Kind` of the file.
-        /// Can only return: `.File`, `.Directory`, `.SymLink` or `.Unknown`
+        /// Can only return: `.file`, `.directory`, `.sym_link` or `.unknown`
         pub fn kind(self: Self) Kind {
             if (self.attributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) {
                 if (self.reparse_tag & 0x20000000 != 0) {
-                    return .SymLink;
+                    return .sym_link;
                 }
             } else if (self.attributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) {
-                return .Directory;
+                return .directory;
             } else {
-                return .File;
+                return .file;
             }
-            return .Unknown;
+            return .unknown;
         }
 
         /// Returns the last time the file was accessed in nanoseconds since UTC 1970-01-01
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 15c8307f58..16677007f9 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -179,8 +179,8 @@ test "Dir.Iterator" {
     }
 
     try testing.expect(entries.items.len == 2); // note that the Iterator skips '.' and '..'
-    try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .File }));
-    try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .Directory }));
+    try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
+    try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
 }
 
 test "Dir.Iterator many entries" {
@@ -214,7 +214,7 @@ test "Dir.Iterator many entries" {
     i = 0;
     while (i < num) : (i += 1) {
         const name = try std.fmt.bufPrint(&buf, "{}", .{i});
-        try testing.expect(contains(&entries, .{ .name = name, .kind = .File }));
+        try testing.expect(contains(&entries, .{ .name = name, .kind = .file }));
     }
 }
 
@@ -246,8 +246,8 @@ test "Dir.Iterator twice" {
         }
 
         try testing.expect(entries.items.len == 2); // note that the Iterator skips '.' and '..'
-        try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .File }));
-        try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .Directory }));
+        try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
+        try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
     }
 }
 
@@ -280,8 +280,8 @@ test "Dir.Iterator reset" {
         }
 
         try testing.expect(entries.items.len == 2); // note that the Iterator skips '.' and '..'
-        try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .File }));
-        try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .Directory }));
+        try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
+        try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
 
         iter.reset();
     }
@@ -428,7 +428,7 @@ test "directory operations on files" {
     // ensure the file still exists and is a file as a sanity check
     file = try tmp_dir.dir.openFile(test_file_name, .{});
     const stat = try file.stat();
-    try testing.expect(stat.kind == .File);
+    try testing.expect(stat.kind == .file);
     file.close();
 }
 
@@ -664,7 +664,7 @@ test "renameAbsolute" {
     try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(test_file_name, .{}));
     file = try tmp_dir.dir.openFile(renamed_test_file_name, .{});
     const stat = try file.stat();
-    try testing.expect(stat.kind == .File);
+    try testing.expect(stat.kind == .file);
     file.close();
 
     // Renaming directories
@@ -1348,7 +1348,7 @@ test "File.Metadata" {
     defer file.close();
 
     const metadata = try file.metadata();
-    try testing.expect(metadata.kind() == .File);
+    try testing.expect(metadata.kind() == .file);
     try testing.expect(metadata.size() == 0);
     _ = metadata.accessed();
     _ = metadata.modified();
diff --git a/src/Package.zig b/src/Package.zig
index d3ac71af1a..532439c60c 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -651,8 +651,8 @@ fn computePackageHash(
 
         while (try walker.next()) |entry| {
             switch (entry.kind) {
-                .Directory => continue,
-                .File => {},
+                .directory => continue,
+                .file => {},
                 else => return error.IllegalFileTypeInPackage,
             }
             const hashed_file = try arena.create(HashedFile);
diff --git a/src/main.zig b/src/main.zig
index afda88cebd..f7afbe767b 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -4830,11 +4830,11 @@ fn fmtPathDir(
 
     var dir_it = iterable_dir.iterate();
     while (try dir_it.next()) |entry| {
-        const is_dir = entry.kind == .Directory;
+        const is_dir = entry.kind == .directory;
 
         if (is_dir and (mem.eql(u8, entry.name, "zig-cache") or mem.eql(u8, entry.name, "zig-out"))) continue;
 
-        if (is_dir or entry.kind == .File and (mem.endsWith(u8, entry.name, ".zig") or mem.endsWith(u8, entry.name, ".zon"))) {
+        if (is_dir or entry.kind == .file and (mem.endsWith(u8, entry.name, ".zig") or mem.endsWith(u8, entry.name, ".zon"))) {
             const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name });
             defer fmt.gpa.free(full_path);
 
@@ -4864,7 +4864,7 @@ fn fmtPathFile(
 
     const stat = try source_file.stat();
 
-    if (stat.kind == .Directory)
+    if (stat.kind == .directory)
         return error.IsDir;
 
     const gpa = fmt.gpa;
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index e4f3d532ce..589438297d 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -349,7 +349,7 @@ fn addFromDirInner(
     var filenames = std.ArrayList([]const u8).init(ctx.arena);
 
     while (try it.next()) |entry| {
-        if (entry.kind != .File) continue;
+        if (entry.kind != .file) continue;
 
         // Ignore stuff such as .swp files
         switch (Compilation.classifyFileExt(entry.basename)) {
@@ -1039,7 +1039,7 @@ pub fn main() !void {
         const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len];
         var it = iterable_dir.iterate();
         while (try it.next()) |entry| {
-            if (entry.kind != .File) continue;
+            if (entry.kind != .file) continue;
             if (!std.mem.startsWith(u8, entry.name, stem)) continue;
             try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name }));
         }
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index 0321c0e0eb..b93cd07a2c 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -393,8 +393,8 @@ pub fn main() !void {
                 while (try dir_it.next()) |entry| {
                     const full_path = try std.fs.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
                     switch (entry.kind) {
-                        .Directory => try dir_stack.append(full_path),
-                        .File => {
+                        .directory => try dir_stack.append(full_path),
+                        .file => {
                             const rel_path = try std.fs.path.relative(allocator, target_include_dir, full_path);
                             const max_size = 2 * 1024 * 1024 * 1024;
                             const raw_bytes = try std.fs.cwd().readFileAlloc(allocator, full_path, max_size);
diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig
index 0f31e5e893..d20bc8dd11 100644
--- a/tools/update-linux-headers.zig
+++ b/tools/update-linux-headers.zig
@@ -193,8 +193,8 @@ pub fn main() !void {
                 while (try dir_it.next()) |entry| {
                     const full_path = try std.fs.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
                     switch (entry.kind) {
-                        .Directory => try dir_stack.append(full_path),
-                        .File => {
+                        .directory => try dir_stack.append(full_path),
+                        .file => {
                             const rel_path = try std.fs.path.relative(arena, target_include_dir, full_path);
                             const max_size = 2 * 1024 * 1024 * 1024;
                             const raw_bytes = try std.fs.cwd().readFileAlloc(arena, full_path, max_size);
diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig
index 24b0f45de7..dcc797f59f 100644
--- a/tools/update_glibc.zig
+++ b/tools/update_glibc.zig
@@ -57,7 +57,7 @@ pub fn main() !void {
         defer walker.deinit();
 
         walk: while (try walker.next()) |entry| {
-            if (entry.kind != .File) continue;
+            if (entry.kind != .file) continue;
             if (mem.startsWith(u8, entry.basename, ".")) continue;
             for (exempt_files) |p| {
                 if (mem.eql(u8, entry.path, p)) continue :walk;
@@ -98,7 +98,7 @@ pub fn main() !void {
     defer walker.deinit();
 
     walk: while (try walker.next()) |entry| {
-        if (entry.kind != .File) continue;
+        if (entry.kind != .file) continue;
         if (mem.startsWith(u8, entry.basename, ".")) continue;
         for (exempt_files) |p| {
             if (mem.eql(u8, entry.path, p)) continue :walk;
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 44d8b6a445..31bdf40ef7 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -227,7 +227,7 @@ fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]c
 
     var vendor_it = extensions_dir.iterate();
     while (try vendor_it.next()) |vendor_entry| {
-        std.debug.assert(vendor_entry.kind == .Directory); // If this fails, the structure of SPIRV-Registry has changed.
+        std.debug.assert(vendor_entry.kind == .directory); // If this fails, the structure of SPIRV-Registry has changed.
 
         const vendor_dir = try extensions_dir.dir.openIterableDir(vendor_entry.name, .{});
         var ext_it = vendor_dir.iterate();
-- 
cgit v1.2.3


From 0f58d34ef7935dd9877f4969ed6ec7d582cd104c Mon Sep 17 00:00:00 2001
From: mlugg 
Date: Mon, 29 May 2023 20:31:09 +0100
Subject: tests: disable incremental cases for now

These were failing on the prior commit. Unfortunately, the fix would
have been relatively complicated, and ties into underlying issues with
the current incremental compilation logic. After discussing this with
Andrew, we agreed that the best course of action is to completely
disable incremental compilation tests for now until it's more mature, at
which point we can re-enable them.
---
 test/src/Cases.zig | 7 +++++++
 1 file changed, 7 insertions(+)

(limited to 'test/src')

diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index 589438297d..e56d9ad201 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -468,6 +468,13 @@ pub fn lowerToBuildSteps(
     incremental_exe: *std.Build.Step.Compile,
 ) void {
     for (self.incremental_cases.items) |incr_case| {
+        if (true) {
+            // TODO: incremental tests are disabled for now, as incremental compilation bugs were
+            // getting in the way of practical improvements to the compiler, and incremental
+            // compilation is not currently used. They should be re-enabled once incremental
+            // compilation is in a happier state.
+            continue;
+        }
         if (opt_test_filter) |test_filter| {
             if (std.mem.indexOf(u8, incr_case.base_path, test_filter) == null) continue;
         }
-- 
cgit v1.2.3