diff options
Diffstat (limited to 'lib/std')
| -rw-r--r-- | lib/std/Build.zig | 3 | ||||
| -rw-r--r-- | lib/std/Build/Fuzz.zig | 451 | ||||
| -rw-r--r-- | lib/std/Build/Fuzz/WebServer.zig | 709 | ||||
| -rw-r--r-- | lib/std/Build/Fuzz/abi.zig | 112 | ||||
| -rw-r--r-- | lib/std/Build/Step.zig | 72 | ||||
| -rw-r--r-- | lib/std/Build/Step/Compile.zig | 9 | ||||
| -rw-r--r-- | lib/std/Build/Step/ObjCopy.zig | 2 | ||||
| -rw-r--r-- | lib/std/Build/Step/Options.zig | 1 | ||||
| -rw-r--r-- | lib/std/Build/Step/Run.zig | 26 | ||||
| -rw-r--r-- | lib/std/Build/Step/TranslateC.zig | 2 | ||||
| -rw-r--r-- | lib/std/Build/WebServer.zig | 823 | ||||
| -rw-r--r-- | lib/std/Build/abi.zig | 313 | ||||
| -rw-r--r-- | lib/std/http/WebSocket.zig | 14 | ||||
| -rw-r--r-- | lib/std/net.zig | 41 | ||||
| -rw-r--r-- | lib/std/zig/Server.zig | 15 |
15 files changed, 1647 insertions, 946 deletions
diff --git a/lib/std/Build.zig b/lib/std/Build.zig index d6b0e68f5d..db539e7b09 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -22,6 +22,8 @@ pub const Step = @import("Build/Step.zig"); pub const Module = @import("Build/Module.zig"); pub const Watch = @import("Build/Watch.zig"); pub const Fuzz = @import("Build/Fuzz.zig"); +pub const WebServer = @import("Build/WebServer.zig"); +pub const abi = @import("Build/abi.zig"); /// Shared state among all Build instances. graph: *Graph, @@ -125,6 +127,7 @@ pub const Graph = struct { random_seed: u32 = 0, dependency_cache: InitializedDepMap = .empty, allow_so_scripts: ?bool = null, + time_report: bool, }; const AvailableDeps = []const struct { []const u8, []const u8 }; diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 28f8781dd1..a25b501755 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -1,108 +1,134 @@ -const builtin = @import("builtin"); const std = @import("../std.zig"); const Build = std.Build; +const Cache = Build.Cache; const Step = std.Build.Step; const assert = std.debug.assert; const fatal = std.process.fatal; const Allocator = std.mem.Allocator; const log = std.log; +const Coverage = std.debug.Coverage; +const abi = Build.abi.fuzz; const Fuzz = @This(); const build_runner = @import("root"); -pub const WebServer = @import("Fuzz/WebServer.zig"); -pub const abi = @import("Fuzz/abi.zig"); - -pub fn start( - gpa: Allocator, - arena: Allocator, - global_cache_directory: Build.Cache.Directory, - zig_lib_directory: Build.Cache.Directory, - zig_exe_path: []const u8, - thread_pool: *std.Thread.Pool, - all_steps: []const *Step, - ttyconf: std.io.tty.Config, - listen_address: std.net.Address, - prog_node: std.Progress.Node, -) Allocator.Error!void { - const fuzz_run_steps = block: { - const rebuild_node = prog_node.start("Rebuilding Unit Tests", 0); +ws: *Build.WebServer, + +/// Allocated into `ws.gpa`. +run_steps: []const *Step.Run, + +wait_group: std.Thread.WaitGroup, +prog_node: std.Progress.Node, + +/// Protects `coverage_files`. +coverage_mutex: std.Thread.Mutex, +coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap), + +queue_mutex: std.Thread.Mutex, +queue_cond: std.Thread.Condition, +msg_queue: std.ArrayListUnmanaged(Msg), + +const Msg = union(enum) { + coverage: struct { + id: u64, + run: *Step.Run, + }, + entry_point: struct { + coverage_id: u64, + addr: u64, + }, +}; + +const CoverageMap = struct { + mapped_memory: []align(std.heap.page_size_min) const u8, + coverage: Coverage, + source_locations: []Coverage.SourceLocation, + /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested. + entry_points: std.ArrayListUnmanaged(u32), + start_timestamp: i64, + + fn deinit(cm: *CoverageMap, gpa: Allocator) void { + std.posix.munmap(cm.mapped_memory); + cm.coverage.deinit(gpa); + cm.* = undefined; + } +}; + +pub fn init(ws: *Build.WebServer) Allocator.Error!Fuzz { + const gpa = ws.gpa; + + const run_steps: []const *Step.Run = steps: { + var steps: std.ArrayListUnmanaged(*Step.Run) = .empty; + defer steps.deinit(gpa); + const rebuild_node = ws.root_prog_node.start("Rebuilding Unit Tests", 0); defer rebuild_node.end(); - var wait_group: std.Thread.WaitGroup = .{}; - defer wait_group.wait(); - var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .empty; - defer fuzz_run_steps.deinit(gpa); - for (all_steps) |step| { + var rebuild_wg: std.Thread.WaitGroup = .{}; + defer rebuild_wg.wait(); + + for (ws.all_steps) |step| { const run = step.cast(Step.Run) orelse continue; - if (run.fuzz_tests.items.len > 0 and run.producer != null) { - thread_pool.spawnWg(&wait_group, rebuildTestsWorkerRun, .{ run, ttyconf, rebuild_node }); - try fuzz_run_steps.append(gpa, run); - } + if (run.producer == null) continue; + if (run.fuzz_tests.items.len == 0) continue; + try steps.append(gpa, run); + ws.thread_pool.spawnWg(&rebuild_wg, rebuildTestsWorkerRun, .{ run, gpa, ws.ttyconf, rebuild_node }); } - if (fuzz_run_steps.items.len == 0) fatal("no fuzz tests found", .{}); - rebuild_node.setEstimatedTotalItems(fuzz_run_steps.items.len); - break :block try arena.dupe(*Step.Run, fuzz_run_steps.items); + + if (steps.items.len == 0) fatal("no fuzz tests found", .{}); + rebuild_node.setEstimatedTotalItems(steps.items.len); + break :steps try gpa.dupe(*Step.Run, steps.items); }; + errdefer gpa.free(run_steps); - // Detect failure. - for (fuzz_run_steps) |run| { + for (run_steps) |run| { assert(run.fuzz_tests.items.len > 0); if (run.rebuilt_executable == null) fatal("one or more unit tests failed to be rebuilt in fuzz mode", .{}); } - var web_server: WebServer = .{ - .gpa = gpa, - .global_cache_directory = global_cache_directory, - .zig_lib_directory = zig_lib_directory, - .zig_exe_path = zig_exe_path, - .listen_address = listen_address, - .fuzz_run_steps = fuzz_run_steps, - - .msg_queue = .{}, - .mutex = .{}, - .condition = .{}, - - .coverage_files = .{}, + return .{ + .ws = ws, + .run_steps = run_steps, + .wait_group = .{}, + .prog_node = .none, + .coverage_files = .empty, .coverage_mutex = .{}, - .coverage_condition = .{}, - - .base_timestamp = std.time.nanoTimestamp(), + .queue_mutex = .{}, + .queue_cond = .{}, + .msg_queue = .empty, }; +} - // For accepting HTTP connections. - const web_server_thread = std.Thread.spawn(.{}, WebServer.run, .{&web_server}) catch |err| { - fatal("unable to spawn web server thread: {s}", .{@errorName(err)}); - }; - defer web_server_thread.join(); +pub fn start(fuzz: *Fuzz) void { + const ws = fuzz.ws; + fuzz.prog_node = ws.root_prog_node.start("Fuzzing", fuzz.run_steps.len); // For polling messages and sending updates to subscribers. - const coverage_thread = std.Thread.spawn(.{}, WebServer.coverageRun, .{&web_server}) catch |err| { + fuzz.wait_group.start(); + _ = std.Thread.spawn(.{}, coverageRun, .{fuzz}) catch |err| { + fuzz.wait_group.finish(); fatal("unable to spawn coverage thread: {s}", .{@errorName(err)}); }; - defer coverage_thread.join(); - - { - const fuzz_node = prog_node.start("Fuzzing", fuzz_run_steps.len); - defer fuzz_node.end(); - var wait_group: std.Thread.WaitGroup = .{}; - defer wait_group.wait(); - for (fuzz_run_steps) |run| { - for (run.fuzz_tests.items) |unit_test_index| { - assert(run.rebuilt_executable != null); - thread_pool.spawnWg(&wait_group, fuzzWorkerRun, .{ - run, &web_server, unit_test_index, ttyconf, fuzz_node, - }); - } + for (fuzz.run_steps) |run| { + for (run.fuzz_tests.items) |unit_test_index| { + assert(run.rebuilt_executable != null); + ws.thread_pool.spawnWg(&fuzz.wait_group, fuzzWorkerRun, .{ + fuzz, run, unit_test_index, + }); } } +} +pub fn deinit(fuzz: *Fuzz) void { + if (true) @panic("TODO: terminate the fuzzer processes"); + fuzz.wait_group.wait(); + fuzz.prog_node.end(); - log.err("all fuzz workers crashed", .{}); + const gpa = fuzz.ws.gpa; + gpa.free(fuzz.run_steps); } -fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void { - rebuildTestsWorkerRunFallible(run, ttyconf, parent_prog_node) catch |err| { +fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void { + rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| { const compile = run.producer.?; log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{ compile.step.name, @errorName(err), @@ -110,14 +136,12 @@ fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog }; } -fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void { - const gpa = run.step.owner.allocator; - +fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void { const compile = run.producer.?; const prog_node = parent_prog_node.start(compile.step.name, 0); defer prog_node.end(); - const result = compile.rebuildInFuzzMode(prog_node); + const result = compile.rebuildInFuzzMode(gpa, prog_node); const show_compile_errors = compile.step.result_error_bundle.errorMessageCount() > 0; const show_error_msgs = compile.step.result_error_msgs.items.len > 0; @@ -138,24 +162,22 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par } fn fuzzWorkerRun( + fuzz: *Fuzz, run: *Step.Run, - web_server: *WebServer, unit_test_index: u32, - ttyconf: std.io.tty.Config, - parent_prog_node: std.Progress.Node, ) void { const gpa = run.step.owner.allocator; const test_name = run.cached_test_metadata.?.testName(unit_test_index); - const prog_node = parent_prog_node.start(test_name, 0); + const prog_node = fuzz.prog_node.start(test_name, 0); defer prog_node.end(); - run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) { + run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) { error.MakeFailed => { var buf: [256]u8 = undefined; const w = std.debug.lockStderrWriter(&buf); defer std.debug.unlockStderrWriter(); - build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, w, false) catch {}; + build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ws.ttyconf }, w, false) catch {}; return; }, else => { @@ -166,3 +188,270 @@ fn fuzzWorkerRun( }, }; } + +pub fn serveSourcesTar(fuzz: *Fuzz, req: *std.http.Server.Request) !void { + const gpa = fuzz.ws.gpa; + + var arena_state: std.heap.ArenaAllocator = .init(gpa); + defer arena_state.deinit(); + const arena = arena_state.allocator(); + + const DedupTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false); + var dedup_table: DedupTable = .empty; + defer dedup_table.deinit(gpa); + + for (fuzz.run_steps) |run_step| { + const compile_inputs = run_step.producer.?.step.inputs.table; + for (compile_inputs.keys(), compile_inputs.values()) |dir_path, *file_list| { + try dedup_table.ensureUnusedCapacity(gpa, file_list.items.len); + for (file_list.items) |sub_path| { + if (!std.mem.endsWith(u8, sub_path, ".zig")) continue; + const joined_path = try dir_path.join(arena, sub_path); + dedup_table.putAssumeCapacity(joined_path, {}); + } + } + } + + const deduped_paths = dedup_table.keys(); + const SortContext = struct { + pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool { + _ = this; + return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) { + .lt => true, + .gt => false, + .eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path), + }; + } + }; + std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan); + return fuzz.ws.serveTarFile(req, deduped_paths); +} + +pub const Previous = struct { + unique_runs: usize, + entry_points: usize, + pub const init: Previous = .{ .unique_runs = 0, .entry_points = 0 }; +}; +pub fn sendUpdate( + fuzz: *Fuzz, + socket: *std.http.WebSocket, + prev: *Previous, +) !void { + fuzz.coverage_mutex.lock(); + defer fuzz.coverage_mutex.unlock(); + + const coverage_maps = fuzz.coverage_files.values(); + if (coverage_maps.len == 0) return; + // TODO: handle multiple fuzz steps in the WebSocket packets + const coverage_map = &coverage_maps[0]; + const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); + // TODO: this isn't sound! We need to do volatile reads of these bits rather than handing the + // buffer off to the kernel, because we might race with the fuzzer process[es]. This brings the + // whole mmap strategy into question. Incidentally, I wonder if post-writergate we could pass + // this data straight to the socket with sendfile... + const seen_pcs = cov_header.seenBits(); + const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic); + const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic); + if (prev.unique_runs != unique_runs) { + // There has been an update. + if (prev.unique_runs == 0) { + // We need to send initial context. + const header: abi.SourceIndexHeader = .{ + .directories_len = @intCast(coverage_map.coverage.directories.entries.len), + .files_len = @intCast(coverage_map.coverage.files.entries.len), + .source_locations_len = @intCast(coverage_map.source_locations.len), + .string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len), + .start_timestamp = coverage_map.start_timestamp, + }; + const iovecs: [5]std.posix.iovec_const = .{ + makeIov(@ptrCast(&header)), + makeIov(@ptrCast(coverage_map.coverage.directories.keys())), + makeIov(@ptrCast(coverage_map.coverage.files.keys())), + makeIov(@ptrCast(coverage_map.source_locations)), + makeIov(coverage_map.coverage.string_bytes.items), + }; + try socket.writeMessagev(&iovecs, .binary); + } + + const header: abi.CoverageUpdateHeader = .{ + .n_runs = n_runs, + .unique_runs = unique_runs, + }; + const iovecs: [2]std.posix.iovec_const = .{ + makeIov(@ptrCast(&header)), + makeIov(@ptrCast(seen_pcs)), + }; + try socket.writeMessagev(&iovecs, .binary); + + prev.unique_runs = unique_runs; + } + + if (prev.entry_points != coverage_map.entry_points.items.len) { + const header: abi.EntryPointHeader = .init(@intCast(coverage_map.entry_points.items.len)); + const iovecs: [2]std.posix.iovec_const = .{ + makeIov(@ptrCast(&header)), + makeIov(@ptrCast(coverage_map.entry_points.items)), + }; + try socket.writeMessagev(&iovecs, .binary); + + prev.entry_points = coverage_map.entry_points.items.len; + } +} + +fn coverageRun(fuzz: *Fuzz) void { + defer fuzz.wait_group.finish(); + + fuzz.queue_mutex.lock(); + defer fuzz.queue_mutex.unlock(); + + while (true) { + fuzz.queue_cond.wait(&fuzz.queue_mutex); + for (fuzz.msg_queue.items) |msg| switch (msg) { + .coverage => |coverage| prepareTables(fuzz, coverage.run, coverage.id) catch |err| switch (err) { + error.AlreadyReported => continue, + else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}), + }, + .entry_point => |entry_point| addEntryPoint(fuzz, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) { + error.AlreadyReported => continue, + else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}), + }, + }; + fuzz.msg_queue.clearRetainingCapacity(); + } +} +fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported }!void { + const ws = fuzz.ws; + const gpa = ws.gpa; + + fuzz.coverage_mutex.lock(); + defer fuzz.coverage_mutex.unlock(); + + const gop = try fuzz.coverage_files.getOrPut(gpa, coverage_id); + if (gop.found_existing) { + // We are fuzzing the same executable with multiple threads. + // Perhaps the same unit test; perhaps a different one. In any + // case, since the coverage file is the same, we only have to + // notice changes to that one file in order to learn coverage for + // this particular executable. + return; + } + errdefer _ = fuzz.coverage_files.pop(); + + gop.value_ptr.* = .{ + .coverage = std.debug.Coverage.init, + .mapped_memory = undefined, // populated below + .source_locations = undefined, // populated below + .entry_points = .{}, + .start_timestamp = ws.now(), + }; + errdefer gop.value_ptr.coverage.deinit(gpa); + + const rebuilt_exe_path = run_step.rebuilt_executable.?; + var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| { + log.err("step '{s}': failed to load debug information for '{f}': {s}", .{ + run_step.step.name, rebuilt_exe_path, @errorName(err), + }); + return error.AlreadyReported; + }; + defer debug_info.deinit(gpa); + + const coverage_file_path: Build.Cache.Path = .{ + .root_dir = run_step.step.owner.cache_root, + .sub_path = "v/" ++ std.fmt.hex(coverage_id), + }; + var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + log.err("step '{s}': failed to load coverage file '{f}': {s}", .{ + run_step.step.name, coverage_file_path, @errorName(err), + }); + return error.AlreadyReported; + }; + defer coverage_file.close(); + + const file_size = coverage_file.getEndPos() catch |err| { + log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); + return error.AlreadyReported; + }; + + const mapped_memory = std.posix.mmap( + null, + file_size, + std.posix.PROT.READ, + .{ .TYPE = .SHARED }, + coverage_file.handle, + 0, + ) catch |err| { + log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); + return error.AlreadyReported; + }; + gop.value_ptr.mapped_memory = mapped_memory; + + const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); + const pcs = header.pcAddrs(); + const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len); + errdefer gpa.free(source_locations); + + // Unfortunately the PCs array that LLVM gives us from the 8-bit PC + // counters feature is not sorted. + var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{}; + defer sorted_pcs.deinit(gpa); + try sorted_pcs.resize(gpa, pcs.len); + @memcpy(sorted_pcs.items(.pc), pcs); + for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i); + sorted_pcs.sortUnstable(struct { + addrs: []const u64, + + pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { + return ctx.addrs[a_index] < ctx.addrs[b_index]; + } + }{ .addrs = sorted_pcs.items(.pc) }); + + debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| { + log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)}); + return error.AlreadyReported; + }; + + for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl; + gop.value_ptr.source_locations = source_locations; + + ws.notifyUpdate(); +} +fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void { + fuzz.coverage_mutex.lock(); + defer fuzz.coverage_mutex.unlock(); + + const coverage_map = fuzz.coverage_files.getPtr(coverage_id).?; + const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); + const pcs = header.pcAddrs(); + + // Since this pcs list is unsorted, we must linear scan for the best index. + const index = i: { + var best: usize = 0; + for (pcs[1..], 1..) |elem_addr, i| { + if (elem_addr == addr) break :i i; + if (elem_addr > addr) continue; + if (elem_addr > pcs[best]) best = i; + } + break :i best; + }; + if (index >= pcs.len) { + log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{ + addr, pcs[0], pcs[pcs.len - 1], + }); + return error.AlreadyReported; + } + if (false) { + const sl = coverage_map.source_locations[index]; + const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename); + log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{ + addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1], + }); + } + try coverage_map.entry_points.append(fuzz.ws.gpa, @intCast(index)); +} + +fn makeIov(s: []const u8) std.posix.iovec_const { + return .{ + .base = s.ptr, + .len = s.len, + }; +} diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig deleted file mode 100644 index 18582a60ef..0000000000 --- a/lib/std/Build/Fuzz/WebServer.zig +++ /dev/null @@ -1,709 +0,0 @@ -const builtin = @import("builtin"); - -const std = @import("../../std.zig"); -const Allocator = std.mem.Allocator; -const Build = std.Build; -const Step = std.Build.Step; -const Coverage = std.debug.Coverage; -const abi = std.Build.Fuzz.abi; -const log = std.log; -const assert = std.debug.assert; -const Cache = std.Build.Cache; -const Path = Cache.Path; - -const WebServer = @This(); - -gpa: Allocator, -global_cache_directory: Build.Cache.Directory, -zig_lib_directory: Build.Cache.Directory, -zig_exe_path: []const u8, -listen_address: std.net.Address, -fuzz_run_steps: []const *Step.Run, - -/// Messages from fuzz workers. Protected by mutex. -msg_queue: std.ArrayListUnmanaged(Msg), -/// Protects `msg_queue` only. -mutex: std.Thread.Mutex, -/// Signaled when there is a message in `msg_queue`. -condition: std.Thread.Condition, - -coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap), -/// Protects `coverage_files` only. -coverage_mutex: std.Thread.Mutex, -/// Signaled when `coverage_files` changes. -coverage_condition: std.Thread.Condition, - -/// Time at initialization of WebServer. -base_timestamp: i128, - -const fuzzer_bin_name = "fuzzer"; -const fuzzer_arch_os_abi = "wasm32-freestanding"; -const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext"; - -const CoverageMap = struct { - mapped_memory: []align(std.heap.page_size_min) const u8, - coverage: Coverage, - source_locations: []Coverage.SourceLocation, - /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested. - entry_points: std.ArrayListUnmanaged(u32), - start_timestamp: i64, - - fn deinit(cm: *CoverageMap, gpa: Allocator) void { - std.posix.munmap(cm.mapped_memory); - cm.coverage.deinit(gpa); - cm.* = undefined; - } -}; - -const Msg = union(enum) { - coverage: struct { - id: u64, - run: *Step.Run, - }, - entry_point: struct { - coverage_id: u64, - addr: u64, - }, -}; - -pub fn run(ws: *WebServer) void { - var http_server = ws.listen_address.listen(.{ - .reuse_address = true, - }) catch |err| { - log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.in.getPort(), @errorName(err) }); - return; - }; - const port = http_server.listen_address.in.getPort(); - log.info("web interface listening at http://127.0.0.1:{d}/", .{port}); - if (ws.listen_address.in.getPort() == 0) - log.info("hint: pass --port {d} to use this same port next time", .{port}); - - while (true) { - const connection = http_server.accept() catch |err| { - log.err("failed to accept connection: {s}", .{@errorName(err)}); - return; - }; - _ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| { - log.err("unable to spawn connection thread: {s}", .{@errorName(err)}); - connection.stream.close(); - continue; - }; - } -} - -fn now(s: *const WebServer) i64 { - return @intCast(std.time.nanoTimestamp() - s.base_timestamp); -} - -fn accept(ws: *WebServer, connection: std.net.Server.Connection) void { - defer connection.stream.close(); - - var read_buffer: [0x4000]u8 = undefined; - var server = std.http.Server.init(connection, &read_buffer); - var web_socket: std.http.WebSocket = undefined; - var send_buffer: [0x4000]u8 = undefined; - var ws_recv_buffer: [0x4000]u8 align(4) = undefined; - while (server.state == .ready) { - var request = server.receiveHead() catch |err| switch (err) { - error.HttpConnectionClosing => return, - else => { - log.err("closing http connection: {s}", .{@errorName(err)}); - return; - }, - }; - if (web_socket.init(&request, &send_buffer, &ws_recv_buffer) catch |err| { - log.err("initializing web socket: {s}", .{@errorName(err)}); - return; - }) { - serveWebSocket(ws, &web_socket) catch |err| { - log.err("unable to serve web socket connection: {s}", .{@errorName(err)}); - return; - }; - } else { - serveRequest(ws, &request) catch |err| switch (err) { - error.AlreadyReported => return, - else => |e| { - log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) }); - return; - }, - }; - } - } -} - -fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void { - if (std.mem.eql(u8, request.head.target, "/") or - std.mem.eql(u8, request.head.target, "/debug") or - std.mem.eql(u8, request.head.target, "/debug/")) - { - try serveFile(ws, request, "fuzzer/web/index.html", "text/html"); - } else if (std.mem.eql(u8, request.head.target, "/main.js") or - std.mem.eql(u8, request.head.target, "/debug/main.js")) - { - try serveFile(ws, request, "fuzzer/web/main.js", "application/javascript"); - } else if (std.mem.eql(u8, request.head.target, "/main.wasm")) { - try serveWasm(ws, request, .ReleaseFast); - } else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) { - try serveWasm(ws, request, .Debug); - } else if (std.mem.eql(u8, request.head.target, "/sources.tar") or - std.mem.eql(u8, request.head.target, "/debug/sources.tar")) - { - try serveSourcesTar(ws, request); - } else { - try request.respond("not found", .{ - .status = .not_found, - .extra_headers = &.{ - .{ .name = "content-type", .value = "text/plain" }, - }, - }); - } -} - -fn serveFile( - ws: *WebServer, - request: *std.http.Server.Request, - name: []const u8, - content_type: []const u8, -) !void { - const gpa = ws.gpa; - // The desired API is actually sendfile, which will require enhancing std.http.Server. - // We load the file with every request so that the user can make changes to the file - // and refresh the HTML page without restarting this server. - const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| { - log.err("failed to read '{f}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) }); - return error.AlreadyReported; - }; - defer gpa.free(file_contents); - try request.respond(file_contents, .{ - .extra_headers = &.{ - .{ .name = "content-type", .value = content_type }, - cache_control_header, - }, - }); -} - -fn serveWasm( - ws: *WebServer, - request: *std.http.Server.Request, - optimize_mode: std.builtin.OptimizeMode, -) !void { - const gpa = ws.gpa; - - var arena_instance = std.heap.ArenaAllocator.init(gpa); - defer arena_instance.deinit(); - const arena = arena_instance.allocator(); - - // Do the compilation every request, so that the user can edit the files - // and see the changes without restarting the server. - const wasm_base_path = try buildWasmBinary(ws, arena, optimize_mode); - const bin_name = try std.zig.binNameAlloc(arena, .{ - .root_name = fuzzer_bin_name, - .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{ - .arch_os_abi = fuzzer_arch_os_abi, - .cpu_features = fuzzer_cpu_features, - }) catch unreachable) catch unreachable), - .output_mode = .Exe, - }); - // std.http.Server does not have a sendfile API yet. - const bin_path = try wasm_base_path.join(arena, bin_name); - const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024); - defer gpa.free(file_contents); - try request.respond(file_contents, .{ - .extra_headers = &.{ - .{ .name = "content-type", .value = "application/wasm" }, - cache_control_header, - }, - }); -} - -fn buildWasmBinary( - ws: *WebServer, - arena: Allocator, - optimize_mode: std.builtin.OptimizeMode, -) !Path { - const gpa = ws.gpa; - - const main_src_path: Build.Cache.Path = .{ - .root_dir = ws.zig_lib_directory, - .sub_path = "fuzzer/web/main.zig", - }; - const walk_src_path: Build.Cache.Path = .{ - .root_dir = ws.zig_lib_directory, - .sub_path = "docs/wasm/Walk.zig", - }; - const html_render_src_path: Build.Cache.Path = .{ - .root_dir = ws.zig_lib_directory, - .sub_path = "docs/wasm/html_render.zig", - }; - - var argv: std.ArrayListUnmanaged([]const u8) = .empty; - - try argv.appendSlice(arena, &.{ - ws.zig_exe_path, "build-exe", // - "-fno-entry", // - "-O", @tagName(optimize_mode), // - "-target", fuzzer_arch_os_abi, // - "-mcpu", fuzzer_cpu_features, // - "--cache-dir", ws.global_cache_directory.path orelse ".", // - "--global-cache-dir", ws.global_cache_directory.path orelse ".", // - "--name", fuzzer_bin_name, // - "-rdynamic", // - "-fsingle-threaded", // - "--dep", "Walk", // - "--dep", "html_render", // - try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), // - try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), // - "--dep", "Walk", // - try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), // - "--listen=-", - }); - - var child = std.process.Child.init(argv.items, gpa); - child.stdin_behavior = .Pipe; - child.stdout_behavior = .Pipe; - child.stderr_behavior = .Pipe; - try child.spawn(); - - var poller = std.io.poll(gpa, enum { stdout, stderr }, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); - defer poller.deinit(); - - try sendMessage(child.stdin.?, .update); - try sendMessage(child.stdin.?, .exit); - - var result: ?Path = null; - var result_error_bundle = std.zig.ErrorBundle.empty; - - const stdout = poller.reader(.stdout); - - poll: while (true) { - const Header = std.zig.Server.Message.Header; - while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll; - const header = stdout.takeStruct(Header, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; - const body = stdout.take(header.bytes_len) catch unreachable; - - switch (header.tag) { - .zig_version => { - if (!std.mem.eql(u8, builtin.zig_version_string, body)) { - return error.ZigProtocolVersionMismatch; - } - }, - .error_bundle => { - const EbHdr = std.zig.Server.Message.ErrorBundle; - const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body)); - const extra_bytes = - body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len]; - const string_bytes = - body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len]; - // TODO: use @ptrCast when the compiler supports it - const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes); - const extra_array = try arena.alloc(u32, unaligned_extra.len); - @memcpy(extra_array, unaligned_extra); - result_error_bundle = .{ - .string_bytes = try arena.dupe(u8, string_bytes), - .extra = extra_array, - }; - }, - .emit_digest => { - const EmitDigest = std.zig.Server.Message.EmitDigest; - const ebp_hdr = @as(*align(1) const EmitDigest, @ptrCast(body)); - if (!ebp_hdr.flags.cache_hit) { - log.info("source changes detected; rebuilt wasm component", .{}); - } - const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len]; - result = .{ - .root_dir = ws.global_cache_directory, - .sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)), - }; - }, - else => {}, // ignore other messages - } - } - - const stderr_contents = try poller.toOwnedSlice(.stderr); - if (stderr_contents.len > 0) { - std.debug.print("{s}", .{stderr_contents}); - } - - // Send EOF to stdin. - child.stdin.?.close(); - child.stdin = null; - - switch (try child.wait()) { - .Exited => |code| { - if (code != 0) { - log.err( - "the following command exited with error code {d}:\n{s}", - .{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) }, - ); - return error.WasmCompilationFailed; - } - }, - .Signal, .Stopped, .Unknown => { - log.err( - "the following command terminated unexpectedly:\n{s}", - .{try Build.Step.allocPrintCmd(arena, null, argv.items)}, - ); - return error.WasmCompilationFailed; - }, - } - - if (result_error_bundle.errorMessageCount() > 0) { - const color = std.zig.Color.auto; - result_error_bundle.renderToStdErr(color.renderOptions()); - log.err("the following command failed with {d} compilation errors:\n{s}", .{ - result_error_bundle.errorMessageCount(), - try Build.Step.allocPrintCmd(arena, null, argv.items), - }); - return error.WasmCompilationFailed; - } - - return result orelse { - log.err("child process failed to report result\n{s}", .{ - try Build.Step.allocPrintCmd(arena, null, argv.items), - }); - return error.WasmCompilationFailed; - }; -} - -fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void { - const header: std.zig.Client.Message.Header = .{ - .tag = tag, - .bytes_len = 0, - }; - try file.writeAll(std.mem.asBytes(&header)); -} - -fn serveWebSocket(ws: *WebServer, web_socket: *std.http.WebSocket) !void { - ws.coverage_mutex.lock(); - defer ws.coverage_mutex.unlock(); - - // On first connection, the client needs to know what time the server - // thinks it is to rebase timestamps. - { - const timestamp_message: abi.CurrentTime = .{ .base = ws.now() }; - try web_socket.writeMessage(std.mem.asBytes(×tamp_message), .binary); - } - - // On first connection, the client needs all the coverage information - // so that subsequent updates can contain only the updated bits. - var prev_unique_runs: usize = 0; - var prev_entry_points: usize = 0; - try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points); - while (true) { - ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {}; - try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points); - } -} - -fn sendCoverageContext( - ws: *WebServer, - web_socket: *std.http.WebSocket, - prev_unique_runs: *usize, - prev_entry_points: *usize, -) !void { - const coverage_maps = ws.coverage_files.values(); - if (coverage_maps.len == 0) return; - // TODO: make each events URL correspond to one coverage map - const coverage_map = &coverage_maps[0]; - const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); - const seen_pcs = cov_header.seenBits(); - const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic); - const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic); - if (prev_unique_runs.* != unique_runs) { - // There has been an update. - if (prev_unique_runs.* == 0) { - // We need to send initial context. - const header: abi.SourceIndexHeader = .{ - .flags = .{}, - .directories_len = @intCast(coverage_map.coverage.directories.entries.len), - .files_len = @intCast(coverage_map.coverage.files.entries.len), - .source_locations_len = @intCast(coverage_map.source_locations.len), - .string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len), - .start_timestamp = coverage_map.start_timestamp, - }; - const iovecs: [5]std.posix.iovec_const = .{ - makeIov(std.mem.asBytes(&header)), - makeIov(std.mem.sliceAsBytes(coverage_map.coverage.directories.keys())), - makeIov(std.mem.sliceAsBytes(coverage_map.coverage.files.keys())), - makeIov(std.mem.sliceAsBytes(coverage_map.source_locations)), - makeIov(coverage_map.coverage.string_bytes.items), - }; - try web_socket.writeMessagev(&iovecs, .binary); - } - - const header: abi.CoverageUpdateHeader = .{ - .n_runs = n_runs, - .unique_runs = unique_runs, - }; - const iovecs: [2]std.posix.iovec_const = .{ - makeIov(std.mem.asBytes(&header)), - makeIov(std.mem.sliceAsBytes(seen_pcs)), - }; - try web_socket.writeMessagev(&iovecs, .binary); - - prev_unique_runs.* = unique_runs; - } - - if (prev_entry_points.* != coverage_map.entry_points.items.len) { - const header: abi.EntryPointHeader = .{ - .flags = .{ - .locs_len = @intCast(coverage_map.entry_points.items.len), - }, - }; - const iovecs: [2]std.posix.iovec_const = .{ - makeIov(std.mem.asBytes(&header)), - makeIov(std.mem.sliceAsBytes(coverage_map.entry_points.items)), - }; - try web_socket.writeMessagev(&iovecs, .binary); - - prev_entry_points.* = coverage_map.entry_points.items.len; - } -} - -fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void { - const gpa = ws.gpa; - - var arena_instance = std.heap.ArenaAllocator.init(gpa); - defer arena_instance.deinit(); - const arena = arena_instance.allocator(); - - var send_buffer: [0x4000]u8 = undefined; - var response = request.respondStreaming(.{ - .send_buffer = &send_buffer, - .respond_options = .{ - .extra_headers = &.{ - .{ .name = "content-type", .value = "application/x-tar" }, - cache_control_header, - }, - }, - }); - - const DedupeTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false); - var dedupe_table: DedupeTable = .{}; - defer dedupe_table.deinit(gpa); - - for (ws.fuzz_run_steps) |run_step| { - const compile_step_inputs = run_step.producer.?.step.inputs.table; - for (compile_step_inputs.keys(), compile_step_inputs.values()) |dir_path, *file_list| { - try dedupe_table.ensureUnusedCapacity(gpa, file_list.items.len); - for (file_list.items) |sub_path| { - // Special file "." means the entire directory. - if (std.mem.eql(u8, sub_path, ".")) continue; - const joined_path = try dir_path.join(arena, sub_path); - _ = dedupe_table.getOrPutAssumeCapacity(joined_path); - } - } - } - - const deduped_paths = dedupe_table.keys(); - const SortContext = struct { - pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool { - _ = this; - return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) { - .lt => true, - .gt => false, - .eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path), - }; - } - }; - std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan); - - var cwd_cache: ?[]const u8 = null; - - var adapter = response.writer().adaptToNewApi(); - var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface }; - var read_buffer: [1024]u8 = undefined; - - for (deduped_paths) |joined_path| { - var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| { - log.err("failed to open {f}: {s}", .{ joined_path, @errorName(err) }); - continue; - }; - defer file.close(); - const stat = try file.stat(); - var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size); - archiver.prefix = joined_path.root_dir.path orelse try memoizedCwd(arena, &cwd_cache); - try archiver.writeFile(joined_path.sub_path, &file_reader, stat.mtime); - } - - // intentionally not calling `archiver.finishPedantically` - try adapter.new_interface.flush(); - try response.end(); -} - -fn memoizedCwd(arena: Allocator, opt_ptr: *?[]const u8) ![]const u8 { - if (opt_ptr.*) |cached| return cached; - const result = try std.process.getCwdAlloc(arena); - opt_ptr.* = result; - return result; -} - -const cache_control_header: std.http.Header = .{ - .name = "cache-control", - .value = "max-age=0, must-revalidate", -}; - -pub fn coverageRun(ws: *WebServer) void { - ws.mutex.lock(); - defer ws.mutex.unlock(); - - while (true) { - ws.condition.wait(&ws.mutex); - for (ws.msg_queue.items) |msg| switch (msg) { - .coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) { - error.AlreadyReported => continue, - else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}), - }, - .entry_point => |entry_point| addEntryPoint(ws, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) { - error.AlreadyReported => continue, - else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}), - }, - }; - ws.msg_queue.clearRetainingCapacity(); - } -} - -fn prepareTables( - ws: *WebServer, - run_step: *Step.Run, - coverage_id: u64, -) error{ OutOfMemory, AlreadyReported }!void { - const gpa = ws.gpa; - - ws.coverage_mutex.lock(); - defer ws.coverage_mutex.unlock(); - - const gop = try ws.coverage_files.getOrPut(gpa, coverage_id); - if (gop.found_existing) { - // We are fuzzing the same executable with multiple threads. - // Perhaps the same unit test; perhaps a different one. In any - // case, since the coverage file is the same, we only have to - // notice changes to that one file in order to learn coverage for - // this particular executable. - return; - } - errdefer _ = ws.coverage_files.pop(); - - gop.value_ptr.* = .{ - .coverage = std.debug.Coverage.init, - .mapped_memory = undefined, // populated below - .source_locations = undefined, // populated below - .entry_points = .{}, - .start_timestamp = ws.now(), - }; - errdefer gop.value_ptr.coverage.deinit(gpa); - - const rebuilt_exe_path = run_step.rebuilt_executable.?; - var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| { - log.err("step '{s}': failed to load debug information for '{f}': {s}", .{ - run_step.step.name, rebuilt_exe_path, @errorName(err), - }); - return error.AlreadyReported; - }; - defer debug_info.deinit(gpa); - - const coverage_file_path: Build.Cache.Path = .{ - .root_dir = run_step.step.owner.cache_root, - .sub_path = "v/" ++ std.fmt.hex(coverage_id), - }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { - log.err("step '{s}': failed to load coverage file '{f}': {s}", .{ - run_step.step.name, coverage_file_path, @errorName(err), - }); - return error.AlreadyReported; - }; - defer coverage_file.close(); - - const file_size = coverage_file.getEndPos() catch |err| { - log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); - return error.AlreadyReported; - }; - - const mapped_memory = std.posix.mmap( - null, - file_size, - std.posix.PROT.READ, - .{ .TYPE = .SHARED }, - coverage_file.handle, - 0, - ) catch |err| { - log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); - return error.AlreadyReported; - }; - gop.value_ptr.mapped_memory = mapped_memory; - - const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); - const pcs = header.pcAddrs(); - const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len); - errdefer gpa.free(source_locations); - - // Unfortunately the PCs array that LLVM gives us from the 8-bit PC - // counters feature is not sorted. - var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{}; - defer sorted_pcs.deinit(gpa); - try sorted_pcs.resize(gpa, pcs.len); - @memcpy(sorted_pcs.items(.pc), pcs); - for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i); - sorted_pcs.sortUnstable(struct { - addrs: []const u64, - - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return ctx.addrs[a_index] < ctx.addrs[b_index]; - } - }{ .addrs = sorted_pcs.items(.pc) }); - - debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| { - log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)}); - return error.AlreadyReported; - }; - - for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl; - gop.value_ptr.source_locations = source_locations; - - ws.coverage_condition.broadcast(); -} - -fn addEntryPoint(ws: *WebServer, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void { - ws.coverage_mutex.lock(); - defer ws.coverage_mutex.unlock(); - - const coverage_map = ws.coverage_files.getPtr(coverage_id).?; - const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]); - const pcs = header.pcAddrs(); - // Since this pcs list is unsorted, we must linear scan for the best index. - const index = i: { - var best: usize = 0; - for (pcs[1..], 1..) |elem_addr, i| { - if (elem_addr == addr) break :i i; - if (elem_addr > addr) continue; - if (elem_addr > pcs[best]) best = i; - } - break :i best; - }; - if (index >= pcs.len) { - log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{ - addr, pcs[0], pcs[pcs.len - 1], - }); - return error.AlreadyReported; - } - if (false) { - const sl = coverage_map.source_locations[index]; - const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename); - log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{ - addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1], - }); - } - const gpa = ws.gpa; - try coverage_map.entry_points.append(gpa, @intCast(index)); -} - -fn makeIov(s: []const u8) std.posix.iovec_const { - return .{ - .base = s.ptr, - .len = s.len, - }; -} diff --git a/lib/std/Build/Fuzz/abi.zig b/lib/std/Build/Fuzz/abi.zig deleted file mode 100644 index a6abc13fee..0000000000 --- a/lib/std/Build/Fuzz/abi.zig +++ /dev/null @@ -1,112 +0,0 @@ -//! This file is shared among Zig code running in wildly different contexts: -//! libfuzzer, compiled alongside unit tests, the build runner, running on the -//! host computer, and the fuzzing web interface webassembly code running in -//! the browser. All of these components interface to some degree via an ABI. - -/// libfuzzer uses this and its usize is the one that counts. To match the ABI, -/// make the ints be the size of the target used with libfuzzer. -/// -/// Trailing: -/// * 1 bit per pc_addr, usize elements -/// * pc_addr: usize for each pcs_len -pub const SeenPcsHeader = extern struct { - n_runs: usize, - unique_runs: usize, - pcs_len: usize, - - /// Used for comptime assertions. Provides a mechanism for strategically - /// causing compile errors. - pub const trailing = .{ - .pc_bits_usize, - .pc_addr, - }; - - pub fn headerEnd(header: *const SeenPcsHeader) []const usize { - const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header); - const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader)); - const pcs_len = header.pcs_len; - return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)]; - } - - pub fn seenBits(header: *const SeenPcsHeader) []const usize { - return header.headerEnd()[0..seenElemsLen(header.pcs_len)]; - } - - pub fn seenElemsLen(pcs_len: usize) usize { - return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize); - } - - pub fn pcAddrs(header: *const SeenPcsHeader) []const usize { - const pcs_len = header.pcs_len; - return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len]; - } -}; - -pub const ToClientTag = enum(u8) { - current_time, - source_index, - coverage_update, - entry_points, - _, -}; - -pub const CurrentTime = extern struct { - tag: ToClientTag = .current_time, - /// Number of nanoseconds that all other timestamps are in reference to. - base: i64 align(1), -}; - -/// Sent to the fuzzer web client on first connection to the websocket URL. -/// -/// Trailing: -/// * std.debug.Coverage.String for each directories_len -/// * std.debug.Coverage.File for each files_len -/// * std.debug.Coverage.SourceLocation for each source_locations_len -/// * u8 for each string_bytes_len -pub const SourceIndexHeader = extern struct { - flags: Flags, - directories_len: u32, - files_len: u32, - source_locations_len: u32, - string_bytes_len: u32, - /// When, according to the server, fuzzing started. - start_timestamp: i64 align(4), - - pub const Flags = packed struct(u32) { - tag: ToClientTag = .source_index, - _: u24 = 0, - }; -}; - -/// Sent to the fuzzer web client whenever the set of covered source locations -/// changes. -/// -/// Trailing: -/// * one bit per source_locations_len, contained in u64 elements -pub const CoverageUpdateHeader = extern struct { - flags: Flags = .{}, - n_runs: u64, - unique_runs: u64, - - pub const Flags = packed struct(u64) { - tag: ToClientTag = .coverage_update, - _: u56 = 0, - }; - - pub const trailing = .{ - .pc_bits_usize, - }; -}; - -/// Sent to the fuzzer web client when the set of entry points is updated. -/// -/// Trailing: -/// * one u32 index of source_locations per locs_len -pub const EntryPointHeader = extern struct { - flags: Flags, - - pub const Flags = packed struct(u32) { - tag: ToClientTag = .entry_points, - locs_len: u24, - }; -}; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 8583427aad..ee883ee152 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -72,6 +72,14 @@ pub const MakeOptions = struct { progress_node: std.Progress.Node, thread_pool: *std.Thread.Pool, watch: bool, + web_server: switch (builtin.target.cpu.arch) { + else => ?*Build.WebServer, + // WASM code references `Build.abi` which happens to incidentally reference this type, but + // it currently breaks because `std.net.Address` doesn't work there. Work around for now. + .wasm32 => void, + }, + /// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`. + gpa: Allocator, }; pub const MakeFn = *const fn (step: *Step, options: MakeOptions) anyerror!void; @@ -229,7 +237,17 @@ pub fn init(options: StepOptions) Step { pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!void { const arena = s.owner.allocator; - s.makeFn(s, options) catch |err| switch (err) { + var timer: ?std.time.Timer = t: { + if (!s.owner.graph.time_report) break :t null; + if (s.id == .compile) break :t null; + break :t std.time.Timer.start() catch @panic("--time-report not supported on this host"); + }; + const make_result = s.makeFn(s, options); + if (timer) |*t| { + options.web_server.?.updateTimeReportGeneric(s, t.read()); + } + + make_result catch |err| switch (err) { error.MakeFailed => return error.MakeFailed, error.MakeSkipped => return error.MakeSkipped, else => { @@ -372,18 +390,20 @@ pub fn evalZigProcess( argv: []const []const u8, prog_node: std.Progress.Node, watch: bool, + web_server: ?*Build.WebServer, + gpa: Allocator, ) !?Path { if (s.getZigProcess()) |zp| update: { assert(watch); if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd); - const result = zigProcessUpdate(s, zp, watch) catch |err| switch (err) { + const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) { error.BrokenPipe => { // Process restart required. const term = zp.child.wait() catch |e| { return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) }); }; _ = term; - s.clearZigProcess(); + s.clearZigProcess(gpa); break :update; }, else => |e| return e, @@ -398,7 +418,7 @@ pub fn evalZigProcess( return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; - s.clearZigProcess(); + s.clearZigProcess(gpa); try handleChildProcessTerm(s, term, null, argv); return error.MakeFailed; } @@ -408,7 +428,6 @@ pub fn evalZigProcess( assert(argv.len != 0); const b = s.owner; const arena = b.allocator; - const gpa = arena; try handleChildProcUnsupported(s, null, argv); try handleVerbose(s.owner, null, argv); @@ -435,9 +454,12 @@ pub fn evalZigProcess( .progress_ipc_fd = if (std.Progress.have_ipc) child.progress_node.getIpcFd() else {}, }; if (watch) s.setZigProcess(zp); - defer if (!watch) zp.poller.deinit(); + defer if (!watch) { + zp.poller.deinit(); + gpa.destroy(zp); + }; - const result = try zigProcessUpdate(s, zp, watch); + const result = try zigProcessUpdate(s, zp, watch, web_server, gpa); if (!watch) { // Send EOF to stdin. @@ -499,7 +521,7 @@ pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus { }; } -fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path { +fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.WebServer, gpa: Allocator) !?Path { const b = s.owner; const arena = b.allocator; @@ -537,12 +559,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path { body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len]; // TODO: use @ptrCast when the compiler supports it const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes); - const extra_array = try arena.alloc(u32, unaligned_extra.len); - @memcpy(extra_array, unaligned_extra); - s.result_error_bundle = .{ - .string_bytes = try arena.dupe(u8, string_bytes), - .extra = extra_array, - }; + { + s.result_error_bundle = .{ .string_bytes = &.{}, .extra = &.{} }; + errdefer s.result_error_bundle.deinit(gpa); + s.result_error_bundle.string_bytes = try gpa.dupe(u8, string_bytes); + const extra = try gpa.alloc(u32, unaligned_extra.len); + @memcpy(extra, unaligned_extra); + s.result_error_bundle.extra = extra; + } // This message indicates the end of the update. if (watch) break :poll; }, @@ -602,6 +626,20 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path { } } }, + .time_report => if (web_server) |ws| { + const TimeReport = std.zig.Server.Message.TimeReport; + const tr: *align(1) const TimeReport = @ptrCast(body[0..@sizeOf(TimeReport)]); + ws.updateTimeReportCompile(.{ + .compile = s.cast(Step.Compile).?, + .use_llvm = tr.flags.use_llvm, + .stats = tr.stats, + .ns_total = timer.read(), + .llvm_pass_timings_len = tr.llvm_pass_timings_len, + .files_len = tr.files_len, + .decls_len = tr.decls_len, + .trailing = body[@sizeOf(TimeReport)..], + }); + }, else => {}, // ignore other messages } } @@ -630,8 +668,7 @@ fn setZigProcess(s: *Step, zp: *ZigProcess) void { } } -fn clearZigProcess(s: *Step) void { - const gpa = s.owner.allocator; +fn clearZigProcess(s: *Step, gpa: Allocator) void { switch (s.id) { .compile => { const compile = s.cast(Compile).?; @@ -947,7 +984,8 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const try gop.value_ptr.append(gpa, basename); } -fn reset(step: *Step, gpa: Allocator) void { +/// Implementation detail of file watching and forced rebuilds. Prepares the step for being re-evaluated. +pub fn reset(step: *Step, gpa: Allocator) void { assert(step.state == .precheck_done); step.result_error_msgs.clearRetainingCapacity(); diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 141d18a7bf..5e14796792 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -1491,6 +1491,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { if (b.verbose_link or compile.verbose_link) try zig_args.append("--verbose-link"); if (b.verbose_cc or compile.verbose_cc) try zig_args.append("--verbose-cc"); if (b.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features"); + if (b.graph.time_report) try zig_args.append("--time-report"); if (compile.generated_asm != null) try zig_args.append("-femit-asm"); if (compile.generated_bin == null) try zig_args.append("-fno-emit-bin"); @@ -1851,6 +1852,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void { zig_args, options.progress_node, (b.graph.incremental == true) and options.watch, + options.web_server, + options.gpa, ) catch |err| switch (err) { error.NeedCompileErrorCheck => { assert(compile.expect_errors != null); @@ -1905,9 +1908,7 @@ fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifa return out_dir.joinString(arena, name) catch @panic("OOM"); } -pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path { - const gpa = c.step.owner.allocator; - +pub fn rebuildInFuzzMode(c: *Compile, gpa: Allocator, progress_node: std.Progress.Node) !Path { c.step.result_error_msgs.clearRetainingCapacity(); c.step.result_stderr = ""; @@ -1915,7 +1916,7 @@ pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path { c.step.result_error_bundle = std.zig.ErrorBundle.empty; const zig_args = try getZigArgs(c, true); - const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false); + const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false, null, gpa); return maybe_output_bin_path.?; } diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 74f871d2fc..3c9eb2eaa7 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -236,7 +236,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try argv.appendSlice(&.{ full_src_path, full_dest_path }); try argv.append("--listen=-"); - _ = try step.evalZigProcess(argv.items, prog_node, false); + _ = try step.evalZigProcess(argv.items, prog_node, false, options.web_server, options.gpa); objcopy.output_file.path = full_dest_path; if (objcopy.output_file_debug) |*file| file.path = full_dest_path_debug; diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 6f8c40b1d5..fd6194f7ff 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -549,6 +549,7 @@ test Options { .result = try std.zig.system.resolveTargetQuery(.{}), }, .zig_lib_directory = std.Build.Cache.Directory.cwd(), + .time_report = false, }; var builder = try std.Build.create( diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 819fc6745d..d317422bd9 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -944,7 +944,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { pub fn rerunInFuzzMode( run: *Run, - web_server: *std.Build.Fuzz.WebServer, + fuzz: *std.Build.Fuzz, unit_test_index: u32, prog_node: std.Progress.Node, ) !void { @@ -984,7 +984,7 @@ pub fn rerunInFuzzMode( const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, .{ .unit_test_index = unit_test_index, - .web_server = web_server, + .fuzz = fuzz, }); } @@ -1054,7 +1054,7 @@ fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term } const FuzzContext = struct { - web_server: *std.Build.Fuzz.WebServer, + fuzz: *std.Build.Fuzz, unit_test_index: u32, }; @@ -1638,31 +1638,31 @@ fn evalZigTest( }; }, .coverage_id => { - const web_server = fuzz_context.?.web_server; + const fuzz = fuzz_context.?.fuzz; const msg_ptr: *align(1) const u64 = @ptrCast(body); coverage_id = msg_ptr.*; { - web_server.mutex.lock(); - defer web_server.mutex.unlock(); - try web_server.msg_queue.append(web_server.gpa, .{ .coverage = .{ + fuzz.queue_mutex.lock(); + defer fuzz.queue_mutex.unlock(); + try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .coverage = .{ .id = coverage_id.?, .run = run, } }); - web_server.condition.signal(); + fuzz.queue_cond.signal(); } }, .fuzz_start_addr => { - const web_server = fuzz_context.?.web_server; + const fuzz = fuzz_context.?.fuzz; const msg_ptr: *align(1) const u64 = @ptrCast(body); const addr = msg_ptr.*; { - web_server.mutex.lock(); - defer web_server.mutex.unlock(); - try web_server.msg_queue.append(web_server.gpa, .{ .entry_point = .{ + fuzz.queue_mutex.lock(); + defer fuzz.queue_mutex.unlock(); + try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .entry_point = .{ .addr = addr, .coverage_id = coverage_id.?, } }); - web_server.condition.signal(); + fuzz.queue_cond.signal(); } }, else => {}, // ignore other messages diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index 7187aaf8c6..53c4007e26 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -187,7 +187,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const c_source_path = translate_c.source.getPath2(b, step); try argv_list.append(c_source_path); - const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false); + const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false, options.web_server, options.gpa); const basename = std.fs.path.stem(std.fs.path.basename(c_source_path)); translate_c.out_basename = b.fmt("{s}.zig", .{basename}); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig new file mode 100644 index 0000000000..9264d7473c --- /dev/null +++ b/lib/std/Build/WebServer.zig @@ -0,0 +1,823 @@ +gpa: Allocator, +thread_pool: *std.Thread.Pool, +graph: *const Build.Graph, +all_steps: []const *Build.Step, +listen_address: std.net.Address, +ttyconf: std.io.tty.Config, +root_prog_node: std.Progress.Node, +watch: bool, + +tcp_server: ?std.net.Server, +serve_thread: ?std.Thread, + +base_timestamp: i128, +/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`. +step_names_trailing: []u8, + +/// The bit-packed "step status" data. Values are `abi.StepUpdate.Status`. LSBs are earlier steps. +/// Accessed atomically. +step_status_bits: []u8, + +fuzz: ?Fuzz, +time_report_mutex: std.Thread.Mutex, +time_report_msgs: [][]u8, +time_report_update_times: []i64, + +build_status: std.atomic.Value(abi.BuildStatus), +/// When an event occurs which means WebSocket clients should be sent updates, call `notifyUpdate` +/// to increment this value. Each client thread waits for this increment with `std.Thread.Futex`, so +/// `notifyUpdate` will wake those threads. Updates are sent on a short interval regardless, so it +/// is recommended to only use `notifyUpdate` for changes which the user should see immediately. For +/// instance, we do not call `notifyUpdate` when the number of "unique runs" in the fuzzer changes, +/// because this value changes quickly so this would result in constantly spamming all clients with +/// an unreasonable number of packets. +update_id: std.atomic.Value(u32), + +runner_request_mutex: std.Thread.Mutex, +runner_request_ready_cond: std.Thread.Condition, +runner_request_empty_cond: std.Thread.Condition, +runner_request: ?RunnerRequest, + +/// If a client is not explicitly notified of changes with `notifyUpdate`, it will be sent updates +/// on a fixed interval of this many milliseconds. +const default_update_interval_ms = 500; + +/// Thread-safe. Triggers updates to be sent to connected WebSocket clients; see `update_id`. +pub fn notifyUpdate(ws: *WebServer) void { + _ = ws.update_id.rmw(.Add, 1, .release); + std.Thread.Futex.wake(&ws.update_id, 16); +} + +pub const Options = struct { + gpa: Allocator, + thread_pool: *std.Thread.Pool, + graph: *const std.Build.Graph, + all_steps: []const *Build.Step, + ttyconf: std.io.tty.Config, + root_prog_node: std.Progress.Node, + watch: bool, + listen_address: std.net.Address, +}; +pub fn init(opts: Options) WebServer { + if (builtin.single_threaded) { + // The upcoming `std.Io` interface should allow us to use `Io.async` and `Io.concurrent` + // instead of threads, so that the web server can function in single-threaded builds. + std.process.fatal("--webui not yet implemented for single-threaded builds", .{}); + } + + if (builtin.os.tag == .windows) { + // At the time of writing, there are two bugs in the standard library which break this feature on Windows: + // * Reading from a socket on one thread while writing to it on another seems to deadlock. + // * Vectored writes to sockets currently trigger an infinite loop when a buffer has length 0. + // + // Both of these bugs are expected to be solved by changes which are currently in the unmerged + // 'wrangle-writer-buffering' branch. Until that makes it in, this must remain disabled. + std.process.fatal("--webui is currently disabled on Windows due to bugs", .{}); + } + + const all_steps = opts.all_steps; + + const step_names_trailing = opts.gpa.alloc(u8, len: { + var name_bytes: usize = 0; + for (all_steps) |step| name_bytes += step.name.len; + break :len name_bytes + all_steps.len * 4; + }) catch @panic("out of memory"); + { + const step_name_lens: []align(1) u32 = @ptrCast(step_names_trailing[0 .. all_steps.len * 4]); + var idx: usize = all_steps.len * 4; + for (all_steps, step_name_lens) |step, *name_len| { + name_len.* = @intCast(step.name.len); + @memcpy(step_names_trailing[idx..][0..step.name.len], step.name); + idx += step.name.len; + } + assert(idx == step_names_trailing.len); + } + + const step_status_bits = opts.gpa.alloc( + u8, + std.math.divCeil(usize, all_steps.len, 4) catch unreachable, + ) catch @panic("out of memory"); + @memset(step_status_bits, 0); + + const time_reports_len: usize = if (opts.graph.time_report) all_steps.len else 0; + const time_report_msgs = opts.gpa.alloc([]u8, time_reports_len) catch @panic("out of memory"); + const time_report_update_times = opts.gpa.alloc(i64, time_reports_len) catch @panic("out of memory"); + @memset(time_report_msgs, &.{}); + @memset(time_report_update_times, std.math.minInt(i64)); + + return .{ + .gpa = opts.gpa, + .thread_pool = opts.thread_pool, + .graph = opts.graph, + .all_steps = all_steps, + .listen_address = opts.listen_address, + .ttyconf = opts.ttyconf, + .root_prog_node = opts.root_prog_node, + .watch = opts.watch, + + .tcp_server = null, + .serve_thread = null, + + .base_timestamp = std.time.nanoTimestamp(), + .step_names_trailing = step_names_trailing, + + .step_status_bits = step_status_bits, + + .fuzz = null, + .time_report_mutex = .{}, + .time_report_msgs = time_report_msgs, + .time_report_update_times = time_report_update_times, + + .build_status = .init(.idle), + .update_id = .init(0), + + .runner_request_mutex = .{}, + .runner_request_ready_cond = .{}, + .runner_request_empty_cond = .{}, + .runner_request = null, + }; +} +pub fn deinit(ws: *WebServer) void { + const gpa = ws.gpa; + + gpa.free(ws.step_names_trailing); + gpa.free(ws.step_status_bits); + + if (ws.fuzz) |*f| f.deinit(); + for (ws.time_report_msgs) |msg| gpa.free(msg); + gpa.free(ws.time_report_msgs); + gpa.free(ws.time_report_update_times); + + if (ws.serve_thread) |t| { + if (ws.tcp_server) |*s| s.stream.close(); + t.join(); + } + if (ws.tcp_server) |*s| s.deinit(); + + gpa.free(ws.step_names_trailing); +} +pub fn start(ws: *WebServer) error{AlreadyReported}!void { + assert(ws.tcp_server == null); + assert(ws.serve_thread == null); + + ws.tcp_server = ws.listen_address.listen(.{ .reuse_address = true }) catch |err| { + log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) }); + return error.AlreadyReported; + }; + ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| { + log.err("unable to spawn web server thread: {s}", .{@errorName(err)}); + ws.tcp_server.?.deinit(); + ws.tcp_server = null; + return error.AlreadyReported; + }; + + log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.listen_address}); + if (ws.listen_address.getPort() == 0) { + log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.listen_address}); + } +} +fn serve(ws: *WebServer) void { + while (true) { + const connection = ws.tcp_server.?.accept() catch |err| { + log.err("failed to accept connection: {s}", .{@errorName(err)}); + return; + }; + _ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| { + log.err("unable to spawn connection thread: {s}", .{@errorName(err)}); + connection.stream.close(); + continue; + }; + } +} + +pub fn startBuild(ws: *WebServer) void { + if (ws.fuzz) |*fuzz| { + fuzz.deinit(); + ws.fuzz = null; + } + for (ws.step_status_bits) |*bits| @atomicStore(u8, bits, 0, .monotonic); + ws.build_status.store(.running, .monotonic); + ws.notifyUpdate(); +} + +pub fn updateStepStatus(ws: *WebServer, step: *Build.Step, new_status: abi.StepUpdate.Status) void { + const step_idx: u32 = for (ws.all_steps, 0..) |s, i| { + if (s == step) break @intCast(i); + } else unreachable; + const ptr = &ws.step_status_bits[step_idx / 4]; + const bit_offset: u3 = @intCast((step_idx % 4) * 2); + const old_bits: u2 = @truncate(@atomicLoad(u8, ptr, .monotonic) >> bit_offset); + const mask = @as(u8, @intFromEnum(new_status) ^ old_bits) << bit_offset; + _ = @atomicRmw(u8, ptr, .Xor, mask, .monotonic); + ws.notifyUpdate(); +} + +pub fn finishBuild(ws: *WebServer, opts: struct { + fuzz: bool, +}) void { + if (opts.fuzz) { + switch (builtin.os.tag) { + // Current implementation depends on two things that need to be ported to Windows: + // * Memory-mapping to share data between the fuzzer and build runner. + // * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving + // many addresses to source locations). + .windows => std.process.fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}), + else => {}, + } + if (@bitSizeOf(usize) != 64) { + // Current implementation depends on posix.mmap()'s second parameter, `length: usize`, + // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case + // on 32-bit platforms. + // Affects or affected by issues #5185, #22523, and #22464. + std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)}); + } + assert(ws.fuzz == null); + + ws.build_status.store(.fuzz_init, .monotonic); + ws.notifyUpdate(); + + ws.fuzz = Fuzz.init(ws) catch |err| std.process.fatal("failed to start fuzzer: {s}", .{@errorName(err)}); + ws.fuzz.?.start(); + } + + ws.build_status.store(if (ws.watch) .watching else .idle, .monotonic); + ws.notifyUpdate(); +} + +pub fn now(s: *const WebServer) i64 { + return @intCast(std.time.nanoTimestamp() - s.base_timestamp); +} + +fn accept(ws: *WebServer, connection: std.net.Server.Connection) void { + defer connection.stream.close(); + + var read_buf: [0x4000]u8 = undefined; + var server: std.http.Server = .init(connection, &read_buf); + + while (true) { + var request = server.receiveHead() catch |err| switch (err) { + error.HttpConnectionClosing => return, + else => { + log.err("failed to receive http request: {s}", .{@errorName(err)}); + return; + }, + }; + var ws_send_buf: [0x4000]u8 = undefined; + var ws_recv_buf: [0x4000]u8 align(4) = undefined; + if (std.http.WebSocket.init(&request, &ws_send_buf, &ws_recv_buf) catch |err| { + log.err("failed to initialize websocket connection: {s}", .{@errorName(err)}); + return; + }) |ws_init| { + var web_socket = ws_init; + ws.serveWebSocket(&web_socket) catch |err| { + log.err("failed to serve websocket: {s}", .{@errorName(err)}); + return; + }; + comptime unreachable; + } else { + ws.serveRequest(&request) catch |err| switch (err) { + error.AlreadyReported => return, + else => { + log.err("failed to serve '{s}': {s}", .{ request.head.target, @errorName(err) }); + return; + }, + }; + } + } +} + +fn makeIov(s: []const u8) std.posix.iovec_const { + return .{ + .base = s.ptr, + .len = s.len, + }; +} +fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn { + var prev_build_status = ws.build_status.load(.monotonic); + + const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len); + defer ws.gpa.free(prev_step_status_bits); + for (prev_step_status_bits, ws.step_status_bits) |*copy, *shared| { + copy.* = @atomicLoad(u8, shared, .monotonic); + } + + _ = try std.Thread.spawn(.{}, recvWebSocketMessages, .{ ws, sock }); + + { + const hello_header: abi.Hello = .{ + .status = prev_build_status, + .flags = .{ + .time_report = ws.graph.time_report, + }, + .timestamp = ws.now(), + .steps_len = @intCast(ws.all_steps.len), + }; + try sock.writeMessagev(&.{ + makeIov(@ptrCast(&hello_header)), + makeIov(ws.step_names_trailing), + makeIov(prev_step_status_bits), + }, .binary); + } + + var prev_fuzz: Fuzz.Previous = .init; + var prev_time: i64 = std.math.minInt(i64); + while (true) { + const start_time = ws.now(); + const start_update_id = ws.update_id.load(.acquire); + + if (ws.fuzz) |*fuzz| { + try fuzz.sendUpdate(sock, &prev_fuzz); + } + + { + ws.time_report_mutex.lock(); + defer ws.time_report_mutex.unlock(); + for (ws.time_report_msgs, ws.time_report_update_times) |msg, update_time| { + if (update_time <= prev_time) continue; + // We want to send `msg`, but shouldn't block `ws.time_report_mutex` while we do, so + // that we don't hold up the build system on the client accepting this packet. + const owned_msg = try ws.gpa.dupe(u8, msg); + defer ws.gpa.free(owned_msg); + // Temporarily unlock, then re-lock after the message is sent. + ws.time_report_mutex.unlock(); + defer ws.time_report_mutex.lock(); + try sock.writeMessage(msg, .binary); + } + } + + { + const build_status = ws.build_status.load(.monotonic); + if (build_status != prev_build_status) { + prev_build_status = build_status; + const msg: abi.StatusUpdate = .{ .new = build_status }; + try sock.writeMessage(@ptrCast(&msg), .binary); + } + } + + for (prev_step_status_bits, ws.step_status_bits, 0..) |*prev_byte, *shared, byte_idx| { + const cur_byte = @atomicLoad(u8, shared, .monotonic); + if (prev_byte.* == cur_byte) continue; + const cur: [4]abi.StepUpdate.Status = .{ + @enumFromInt(@as(u2, @truncate(cur_byte >> 0))), + @enumFromInt(@as(u2, @truncate(cur_byte >> 2))), + @enumFromInt(@as(u2, @truncate(cur_byte >> 4))), + @enumFromInt(@as(u2, @truncate(cur_byte >> 6))), + }; + const prev: [4]abi.StepUpdate.Status = .{ + @enumFromInt(@as(u2, @truncate(prev_byte.* >> 0))), + @enumFromInt(@as(u2, @truncate(prev_byte.* >> 2))), + @enumFromInt(@as(u2, @truncate(prev_byte.* >> 4))), + @enumFromInt(@as(u2, @truncate(prev_byte.* >> 6))), + }; + for (cur, prev, byte_idx * 4..) |cur_status, prev_status, step_idx| { + const msg: abi.StepUpdate = .{ .step_idx = @intCast(step_idx), .bits = .{ .status = cur_status } }; + if (cur_status != prev_status) try sock.writeMessage(@ptrCast(&msg), .binary); + } + prev_byte.* = cur_byte; + } + + prev_time = start_time; + std.Thread.Futex.timedWait(&ws.update_id, start_update_id, std.time.ns_per_ms * default_update_interval_ms) catch {}; + } +} +fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void { + while (true) { + const msg = sock.readSmallMessage() catch return; + if (msg.opcode != .binary) continue; + if (msg.data.len == 0) continue; + const tag: abi.ToServerTag = @enumFromInt(msg.data[0]); + switch (tag) { + _ => continue, + .rebuild => while (true) { + ws.runner_request_mutex.lock(); + defer ws.runner_request_mutex.unlock(); + if (ws.runner_request == null) { + ws.runner_request = .rebuild; + ws.runner_request_ready_cond.signal(); + break; + } + ws.runner_request_empty_cond.wait(&ws.runner_request_mutex); + }, + } + } +} + +fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void { + // Strip an optional leading '/debug' component from the request. + const target: []const u8, const debug: bool = target: { + if (mem.eql(u8, req.head.target, "/debug")) break :target .{ "/", true }; + if (mem.eql(u8, req.head.target, "/debug/")) break :target .{ "/", true }; + if (mem.startsWith(u8, req.head.target, "/debug/")) break :target .{ req.head.target["/debug".len..], true }; + break :target .{ req.head.target, false }; + }; + + if (mem.eql(u8, target, "/")) return serveLibFile(ws, req, "build-web/index.html", "text/html"); + if (mem.eql(u8, target, "/main.js")) return serveLibFile(ws, req, "build-web/main.js", "application/javascript"); + if (mem.eql(u8, target, "/style.css")) return serveLibFile(ws, req, "build-web/style.css", "text/css"); + if (mem.eql(u8, target, "/time_report.css")) return serveLibFile(ws, req, "build-web/time_report.css", "text/css"); + if (mem.eql(u8, target, "/main.wasm")) return serveClientWasm(ws, req, if (debug) .Debug else .ReleaseFast); + + if (ws.fuzz) |*fuzz| { + if (mem.eql(u8, target, "/sources.tar")) return fuzz.serveSourcesTar(req); + } + + try req.respond("not found", .{ + .status = .not_found, + .extra_headers = &.{ + .{ .name = "Content-Type", .value = "text/plain" }, + }, + }); +} + +fn serveLibFile( + ws: *WebServer, + request: *std.http.Server.Request, + sub_path: []const u8, + content_type: []const u8, +) !void { + return serveFile(ws, request, .{ + .root_dir = ws.graph.zig_lib_directory, + .sub_path = sub_path, + }, content_type); +} +fn serveClientWasm( + ws: *WebServer, + req: *std.http.Server.Request, + optimize_mode: std.builtin.OptimizeMode, +) !void { + var arena_state: std.heap.ArenaAllocator = .init(ws.gpa); + defer arena_state.deinit(); + const arena = arena_state.allocator(); + + // We always rebuild the wasm on-the-fly, so that if it is edited the user can just refresh the page. + const bin_path = try buildClientWasm(ws, arena, optimize_mode); + return serveFile(ws, req, bin_path, "application/wasm"); +} + +pub fn serveFile( + ws: *WebServer, + request: *std.http.Server.Request, + path: Cache.Path, + content_type: []const u8, +) !void { + const gpa = ws.gpa; + // The desired API is actually sendfile, which will require enhancing std.http.Server. + // We load the file with every request so that the user can make changes to the file + // and refresh the HTML page without restarting this server. + const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| { + log.err("failed to read '{f}': {s}", .{ path, @errorName(err) }); + return error.AlreadyReported; + }; + defer gpa.free(file_contents); + try request.respond(file_contents, .{ + .extra_headers = &.{ + .{ .name = "Content-Type", .value = content_type }, + cache_control_header, + }, + }); +} +pub fn serveTarFile( + ws: *WebServer, + request: *std.http.Server.Request, + paths: []const Cache.Path, +) !void { + const gpa = ws.gpa; + + var send_buf: [0x4000]u8 = undefined; + var response = request.respondStreaming(.{ + .send_buffer = &send_buf, + .respond_options = .{ + .extra_headers = &.{ + .{ .name = "Content-Type", .value = "application/x-tar" }, + cache_control_header, + }, + }, + }); + + var cached_cwd_path: ?[]const u8 = null; + defer if (cached_cwd_path) |p| gpa.free(p); + + var response_buf: [1024]u8 = undefined; + var adapter = response.writer().adaptToNewApi(); + adapter.new_interface.buffer = &response_buf; + var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface }; + + for (paths) |path| { + var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| { + log.err("failed to open '{f}': {s}", .{ path, @errorName(err) }); + continue; + }; + defer file.close(); + const stat = try file.stat(); + var read_buffer: [1024]u8 = undefined; + var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size); + + // TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can + // be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI: + // it turns out the WASM treats the first path component as the module name, typically + // resulting in modules named "" and "src". The compiler needs to tell the build system + // about the module graph so that the build system can correctly encode this information in + // the tar file. + archiver.prefix = path.root_dir.path orelse cwd: { + if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa); + break :cwd cached_cwd_path.?; + }; + try archiver.writeFile(path.sub_path, &file_reader, stat.mtime); + } + + // intentionally not calling `archiver.finishPedantically` + try adapter.new_interface.flush(); + try response.end(); +} + +fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.OptimizeMode) !Cache.Path { + const root_name = "build-web"; + const arch_os_abi = "wasm32-freestanding"; + const cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext"; + + const gpa = ws.gpa; + const graph = ws.graph; + + const main_src_path: Cache.Path = .{ + .root_dir = graph.zig_lib_directory, + .sub_path = "build-web/main.zig", + }; + const walk_src_path: Cache.Path = .{ + .root_dir = graph.zig_lib_directory, + .sub_path = "docs/wasm/Walk.zig", + }; + const html_render_src_path: Cache.Path = .{ + .root_dir = graph.zig_lib_directory, + .sub_path = "docs/wasm/html_render.zig", + }; + + var argv: std.ArrayListUnmanaged([]const u8) = .empty; + + try argv.appendSlice(arena, &.{ + graph.zig_exe, "build-exe", // + "-fno-entry", // + "-O", @tagName(optimize), // + "-target", arch_os_abi, // + "-mcpu", cpu_features, // + "--cache-dir", graph.global_cache_root.path orelse ".", // + "--global-cache-dir", graph.global_cache_root.path orelse ".", // + "--zig-lib-dir", graph.zig_lib_directory.path orelse ".", // + "--name", root_name, // + "-rdynamic", // + "-fsingle-threaded", // + "--dep", "Walk", // + "--dep", "html_render", // + try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), // + try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), // + "--dep", "Walk", // + try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), // + "--listen=-", + }); + + var child: std.process.Child = .init(argv.items, gpa); + child.stdin_behavior = .Pipe; + child.stdout_behavior = .Pipe; + child.stderr_behavior = .Pipe; + try child.spawn(); + + var poller = std.io.poll(gpa, enum { stdout, stderr }, .{ + .stdout = child.stdout.?, + .stderr = child.stderr.?, + }); + defer poller.deinit(); + + try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{ + .{ .tag = .update, .bytes_len = 0 }, + .{ .tag = .exit, .bytes_len = 0 }, + }))); + + const Header = std.zig.Server.Message.Header; + var result: ?Cache.Path = null; + var result_error_bundle = std.zig.ErrorBundle.empty; + + const stdout = poller.reader(.stdout); + + poll: while (true) { + while (stdout.buffered().len < @sizeOf(Header)) if (!(try poller.poll())) break :poll; + const header = stdout.takeStruct(Header, .little) catch unreachable; + while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; + const body = stdout.take(header.bytes_len) catch unreachable; + + switch (header.tag) { + .zig_version => { + if (!std.mem.eql(u8, builtin.zig_version_string, body)) { + return error.ZigProtocolVersionMismatch; + } + }, + .error_bundle => { + const EbHdr = std.zig.Server.Message.ErrorBundle; + const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body)); + const extra_bytes = + body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len]; + const string_bytes = + body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len]; + const unaligned_extra: []align(1) const u32 = @ptrCast(extra_bytes); + const extra_array = try arena.alloc(u32, unaligned_extra.len); + @memcpy(extra_array, unaligned_extra); + result_error_bundle = .{ + .string_bytes = try arena.dupe(u8, string_bytes), + .extra = extra_array, + }; + }, + .emit_digest => { + const EmitDigest = std.zig.Server.Message.EmitDigest; + const ebp_hdr: *align(1) const EmitDigest = @ptrCast(body); + if (!ebp_hdr.flags.cache_hit) { + log.info("source changes detected; rebuilt wasm component", .{}); + } + const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len]; + result = .{ + .root_dir = graph.global_cache_root, + .sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)), + }; + }, + else => {}, // ignore other messages + } + } + + const stderr_contents = try poller.toOwnedSlice(.stderr); + if (stderr_contents.len > 0) { + std.debug.print("{s}", .{stderr_contents}); + } + + // Send EOF to stdin. + child.stdin.?.close(); + child.stdin = null; + + switch (try child.wait()) { + .Exited => |code| { + if (code != 0) { + log.err( + "the following command exited with error code {d}:\n{s}", + .{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) }, + ); + return error.WasmCompilationFailed; + } + }, + .Signal, .Stopped, .Unknown => { + log.err( + "the following command terminated unexpectedly:\n{s}", + .{try Build.Step.allocPrintCmd(arena, null, argv.items)}, + ); + return error.WasmCompilationFailed; + }, + } + + if (result_error_bundle.errorMessageCount() > 0) { + const color = std.zig.Color.auto; + result_error_bundle.renderToStdErr(color.renderOptions()); + log.err("the following command failed with {d} compilation errors:\n{s}", .{ + result_error_bundle.errorMessageCount(), + try Build.Step.allocPrintCmd(arena, null, argv.items), + }); + return error.WasmCompilationFailed; + } + + const base_path = result orelse { + log.err("child process failed to report result\n{s}", .{ + try Build.Step.allocPrintCmd(arena, null, argv.items), + }); + return error.WasmCompilationFailed; + }; + const bin_name = try std.zig.binNameAlloc(arena, .{ + .root_name = root_name, + .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{ + .arch_os_abi = arch_os_abi, + .cpu_features = cpu_features, + }) catch unreachable) catch unreachable), + .output_mode = .Exe, + }); + return base_path.join(arena, bin_name); +} + +pub fn updateTimeReportCompile(ws: *WebServer, opts: struct { + compile: *Build.Step.Compile, + + use_llvm: bool, + stats: abi.time_report.CompileResult.Stats, + ns_total: u64, + + llvm_pass_timings_len: u32, + files_len: u32, + decls_len: u32, + + /// The trailing data of `abi.time_report.CompileResult`, except the step name. + trailing: []const u8, +}) void { + const gpa = ws.gpa; + + const step_idx: u32 = for (ws.all_steps, 0..) |s, i| { + if (s == &opts.compile.step) break @intCast(i); + } else unreachable; + + const old_buf = old: { + ws.time_report_mutex.lock(); + defer ws.time_report_mutex.unlock(); + const old = ws.time_report_msgs[step_idx]; + ws.time_report_msgs[step_idx] = &.{}; + break :old old; + }; + const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.CompileResult) + opts.trailing.len) catch @panic("out of memory"); + + const out_header: *align(1) abi.time_report.CompileResult = @ptrCast(buf[0..@sizeOf(abi.time_report.CompileResult)]); + out_header.* = .{ + .step_idx = step_idx, + .flags = .{ + .use_llvm = opts.use_llvm, + }, + .stats = opts.stats, + .ns_total = opts.ns_total, + .llvm_pass_timings_len = opts.llvm_pass_timings_len, + .files_len = opts.files_len, + .decls_len = opts.decls_len, + }; + @memcpy(buf[@sizeOf(abi.time_report.CompileResult)..], opts.trailing); + + { + ws.time_report_mutex.lock(); + defer ws.time_report_mutex.unlock(); + assert(ws.time_report_msgs[step_idx].len == 0); + ws.time_report_msgs[step_idx] = buf; + ws.time_report_update_times[step_idx] = ws.now(); + } + ws.notifyUpdate(); +} + +pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64) void { + const gpa = ws.gpa; + + const step_idx: u32 = for (ws.all_steps, 0..) |s, i| { + if (s == step) break @intCast(i); + } else unreachable; + + const old_buf = old: { + ws.time_report_mutex.lock(); + defer ws.time_report_mutex.unlock(); + const old = ws.time_report_msgs[step_idx]; + ws.time_report_msgs[step_idx] = &.{}; + break :old old; + }; + const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.GenericResult)) catch @panic("out of memory"); + const out: *align(1) abi.time_report.GenericResult = @ptrCast(buf); + out.* = .{ + .step_idx = step_idx, + .ns_total = ns_total, + }; + { + ws.time_report_mutex.lock(); + defer ws.time_report_mutex.unlock(); + assert(ws.time_report_msgs[step_idx].len == 0); + ws.time_report_msgs[step_idx] = buf; + ws.time_report_update_times[step_idx] = ws.now(); + } + ws.notifyUpdate(); +} + +const RunnerRequest = union(enum) { + rebuild, +}; +pub fn getRunnerRequest(ws: *WebServer) ?RunnerRequest { + ws.runner_request_mutex.lock(); + defer ws.runner_request_mutex.unlock(); + if (ws.runner_request) |req| { + ws.runner_request = null; + ws.runner_request_empty_cond.signal(); + return req; + } + return null; +} +pub fn wait(ws: *WebServer) RunnerRequest { + ws.runner_request_mutex.lock(); + defer ws.runner_request_mutex.unlock(); + while (true) { + if (ws.runner_request) |req| { + ws.runner_request = null; + ws.runner_request_empty_cond.signal(); + return req; + } + ws.runner_request_ready_cond.wait(&ws.runner_request_mutex); + } +} + +const cache_control_header: std.http.Header = .{ + .name = "Cache-Control", + .value = "max-age=0, must-revalidate", +}; + +const builtin = @import("builtin"); +const std = @import("std"); +const assert = std.debug.assert; +const mem = std.mem; +const log = std.log.scoped(.web_server); +const Allocator = std.mem.Allocator; +const Build = std.Build; +const Cache = Build.Cache; +const Fuzz = Build.Fuzz; +const abi = Build.abi; + +const WebServer = @This(); diff --git a/lib/std/Build/abi.zig b/lib/std/Build/abi.zig new file mode 100644 index 0000000000..d5b02d951a --- /dev/null +++ b/lib/std/Build/abi.zig @@ -0,0 +1,313 @@ +//! This file is shared among Zig code running in wildly different contexts: +//! * The build runner, running on the host computer +//! * The build system web interface Wasm code, running in the browser +//! * `libfuzzer`, compiled alongside unit tests +//! +//! All of these components interface to some degree via an ABI: +//! * The build runner communicates with the web interface over a WebSocket connection +//! * The build runner communicates with `libfuzzer` over a shared memory-mapped file + +// Check that no WebSocket message type has implicit padding bits. This ensures we never send any +// undefined bits over the wire, and also helps validate that the layout doesn't differ between, for +// instance, the web server in `std.Build` and the Wasm client. +comptime { + const check = struct { + fn check(comptime T: type) void { + const std = @import("std"); + std.debug.assert(@typeInfo(T) == .@"struct"); + std.debug.assert(@typeInfo(T).@"struct".layout == .@"extern"); + std.debug.assert(std.meta.hasUniqueRepresentation(T)); + } + }.check; + + // server->client + check(Hello); + check(StatusUpdate); + check(StepUpdate); + check(fuzz.SourceIndexHeader); + check(fuzz.CoverageUpdateHeader); + check(fuzz.EntryPointHeader); + check(time_report.GenericResult); + check(time_report.CompileResult); + + // client->server + check(Rebuild); +} + +/// All WebSocket messages sent by the server to the client begin with a `ToClientTag` byte. This +/// enum is non-exhaustive only to avoid Illegal Behavior when malformed messages are sent over the +/// socket; unnamed tags are an error condition and should terminate the connection. +/// +/// Every tag has a curresponding `extern struct` representing the full message (or a header of the +/// message if it is variable-length). For instance, `.hello` corresponds to `Hello`. +/// +/// When introducing a tag, make sure to add a corresponding `extern struct` whose first field is +/// this enum, and `check` its layout in the `comptime` block above. +pub const ToClientTag = enum(u8) { + hello, + status_update, + step_update, + + // `--fuzz` + fuzz_source_index, + fuzz_coverage_update, + fuzz_entry_points, + + // `--time-report` + time_report_generic_result, + time_report_compile_result, + + _, +}; + +/// Like `ToClientTag`, but for messages sent by the client to the server. +pub const ToServerTag = enum(u8) { + rebuild, + + _, +}; + +/// The current overall status of the build runner. +/// Keep in sync with indices in web UI `main.js:updateBuildStatus`. +pub const BuildStatus = enum(u8) { + idle, + watching, + running, + fuzz_init, +}; + +/// WebSocket server->client. +/// +/// Sent by the server as the first message after a WebSocket connection opens to provide basic +/// information about the server, the build graph, etc. +/// +/// Trailing: +/// * `step_name_len: u32` for each `steps_len` +/// * `step_name: [step_name_len]u8` for each `step_name_len` +/// * `step_status: u8` for every 4 `steps_len`; every 2 bits is a `StepUpdate.Status`, LSBs first +pub const Hello = extern struct { + tag: ToClientTag = .hello, + + status: BuildStatus, + flags: Flags, + + /// Any message containing a timestamp represents it as a number of nanoseconds relative to when + /// the build began. This field is the current timestamp, represented in that form. + timestamp: i64 align(4), + + /// The number of steps in the build graph which are reachable from the top-level step[s] being + /// run; in other words, the number of steps which will be executed by this build. The name of + /// each step trails this message. + steps_len: u32 align(1), + + pub const Flags = packed struct(u16) { + /// Whether time reporting is enabled. + time_report: bool, + _: u15 = 0, + }; +}; +/// WebSocket server->client. +/// +/// Indicates that the build status has changed. +pub const StatusUpdate = extern struct { + tag: ToClientTag = .status_update, + new: BuildStatus, +}; +/// WebSocket server->client. +/// +/// Indicates a change in a step's status. +pub const StepUpdate = extern struct { + tag: ToClientTag = .step_update, + step_idx: u32 align(1), + bits: packed struct(u8) { + status: Status, + _: u6 = 0, + }, + /// Keep in sync with indices in web UI `main.js:updateStepStatus`. + pub const Status = enum(u2) { + pending, + wip, + success, + failure, + }; +}; + +pub const Rebuild = extern struct { + tag: ToServerTag = .rebuild, +}; + +/// ABI bits specifically relating to the fuzzer interface. +pub const fuzz = struct { + /// libfuzzer uses this and its usize is the one that counts. To match the ABI, + /// make the ints be the size of the target used with libfuzzer. + /// + /// Trailing: + /// * 1 bit per pc_addr, usize elements + /// * pc_addr: usize for each pcs_len + pub const SeenPcsHeader = extern struct { + n_runs: usize, + unique_runs: usize, + pcs_len: usize, + + /// Used for comptime assertions. Provides a mechanism for strategically + /// causing compile errors. + pub const trailing = .{ + .pc_bits_usize, + .pc_addr, + }; + + pub fn headerEnd(header: *const SeenPcsHeader) []const usize { + const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header); + const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader)); + const pcs_len = header.pcs_len; + return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)]; + } + + pub fn seenBits(header: *const SeenPcsHeader) []const usize { + return header.headerEnd()[0..seenElemsLen(header.pcs_len)]; + } + + pub fn seenElemsLen(pcs_len: usize) usize { + return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize); + } + + pub fn pcAddrs(header: *const SeenPcsHeader) []const usize { + const pcs_len = header.pcs_len; + return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len]; + } + }; + + /// WebSocket server->client. + /// + /// Sent once, when fuzzing starts, to indicate the available coverage data. + /// + /// Trailing: + /// * std.debug.Coverage.String for each directories_len + /// * std.debug.Coverage.File for each files_len + /// * std.debug.Coverage.SourceLocation for each source_locations_len + /// * u8 for each string_bytes_len + pub const SourceIndexHeader = extern struct { + tag: ToClientTag = .fuzz_source_index, + _: [3]u8 = @splat(0), + directories_len: u32, + files_len: u32, + source_locations_len: u32, + string_bytes_len: u32, + /// When, according to the server, fuzzing started. + start_timestamp: i64 align(4), + }; + + /// WebSocket server->client. + /// + /// Sent whenever the set of covered source locations is updated. + /// + /// Trailing: + /// * one bit per source_locations_len, contained in u64 elements + pub const CoverageUpdateHeader = extern struct { + tag: ToClientTag = .fuzz_coverage_update, + _: [7]u8 = @splat(0), + n_runs: u64, + unique_runs: u64, + + pub const trailing = .{ + .pc_bits_usize, + }; + }; + + /// WebSocket server->client. + /// + /// Sent whenever the set of entry points is updated. + /// + /// Trailing: + /// * one u32 index of source_locations per locsLen() + pub const EntryPointHeader = extern struct { + tag: ToClientTag = .fuzz_entry_points, + locs_len_raw: [3]u8, + + pub fn locsLen(hdr: EntryPointHeader) u24 { + return @bitCast(hdr.locs_len_raw); + } + pub fn init(locs_len: u24) EntryPointHeader { + return .{ .locs_len_raw = @bitCast(locs_len) }; + } + }; +}; + +/// ABI bits specifically relating to the time report interface. +pub const time_report = struct { + /// WebSocket server->client. + /// + /// Sent after a `Step` finishes, providing the time taken to execute the step. + pub const GenericResult = extern struct { + tag: ToClientTag = .time_report_generic_result, + step_idx: u32 align(1), + ns_total: u64 align(1), + }; + + /// WebSocket server->client. + /// + /// Sent after a `Step.Compile` finishes, providing the step's time report. + /// + /// Trailing: + /// * `llvm_pass_timings: [llvm_pass_timings_len]u8` (ASCII-encoded) + /// * for each `files_len`: + /// * `name` (null-terminated UTF-8 string) + /// * for each `decls_len`: + /// * `name` (null-terminated UTF-8 string) + /// * `file: u32` (index of file this decl is in) + /// * `sema_ns: u64` (nanoseconds spent semantically analyzing this decl) + /// * `codegen_ns: u64` (nanoseconds spent semantically analyzing this decl) + /// * `link_ns: u64` (nanoseconds spent semantically analyzing this decl) + pub const CompileResult = extern struct { + tag: ToClientTag = .time_report_compile_result, + + step_idx: u32 align(1), + + flags: Flags, + stats: Stats align(1), + ns_total: u64 align(1), + + llvm_pass_timings_len: u32 align(1), + files_len: u32 align(1), + decls_len: u32 align(1), + + pub const Flags = packed struct(u8) { + use_llvm: bool, + _: u7 = 0, + }; + + pub const Stats = extern struct { + n_reachable_files: u32, + n_imported_files: u32, + n_generic_instances: u32, + n_inline_calls: u32, + + cpu_ns_parse: u64, + cpu_ns_astgen: u64, + cpu_ns_sema: u64, + cpu_ns_codegen: u64, + cpu_ns_link: u64, + + real_ns_files: u64, + real_ns_decls: u64, + real_ns_llvm_emit: u64, + real_ns_link_flush: u64, + + pub const init: Stats = .{ + .n_reachable_files = 0, + .n_imported_files = 0, + .n_generic_instances = 0, + .n_inline_calls = 0, + .cpu_ns_parse = 0, + .cpu_ns_astgen = 0, + .cpu_ns_sema = 0, + .cpu_ns_codegen = 0, + .cpu_ns_link = 0, + .real_ns_files = 0, + .real_ns_decls = 0, + .real_ns_llvm_emit = 0, + .real_ns_link_flush = 0, + }; + }; + }; +}; diff --git a/lib/std/http/WebSocket.zig b/lib/std/http/WebSocket.zig index 8ab434ceae..b9a66cdbd6 100644 --- a/lib/std/http/WebSocket.zig +++ b/lib/std/http/WebSocket.zig @@ -18,14 +18,13 @@ pub const InitError = error{WebSocketUpgradeMissingKey} || std.http.Server.Request.ReaderError; pub fn init( - ws: *WebSocket, request: *std.http.Server.Request, send_buffer: []u8, recv_buffer: []align(4) u8, -) InitError!bool { +) InitError!?WebSocket { switch (request.head.version) { - .@"HTTP/1.0" => return false, - .@"HTTP/1.1" => if (request.head.method != .GET) return false, + .@"HTTP/1.0" => return null, + .@"HTTP/1.1" => if (request.head.method != .GET) return null, } var sec_websocket_key: ?[]const u8 = null; @@ -36,12 +35,12 @@ pub fn init( sec_websocket_key = header.value; } else if (std.ascii.eqlIgnoreCase(header.name, "upgrade")) { if (!std.ascii.eqlIgnoreCase(header.value, "websocket")) - return false; + return null; upgrade_websocket = true; } } if (!upgrade_websocket) - return false; + return null; const key = sec_websocket_key orelse return error.WebSocketUpgradeMissingKey; @@ -55,7 +54,7 @@ pub fn init( request.head.content_length = std.math.maxInt(u64); - ws.* = .{ + return .{ .key = key, .recv_fifo = std.fifo.LinearFifo(u8, .Slice).init(recv_buffer), .reader = try request.reader(), @@ -74,7 +73,6 @@ pub fn init( .request = request, .outstanding_len = 0, }; - return true; } pub const Header0 = packed struct(u8) { diff --git a/lib/std/net.zig b/lib/std/net.zig index d7387662c0..ac851059d2 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -42,6 +42,47 @@ pub const Address = extern union { in6: Ip6Address, un: if (has_unix_sockets) posix.sockaddr.un else void, + /// Parse an IP address which may include a port. For IPv4, this is just written `address:port`. + /// For IPv6, RFC 3986 defines this as an "IP literal", and the port is differentiated from the + /// address by surrounding the address part in brackets '[addr]:port'. Even if the port is not + /// given, the brackets are mandatory. + pub fn parseIpAndPort(str: []const u8) error{ InvalidAddress, InvalidPort }!Address { + if (str.len == 0) return error.InvalidAddress; + if (str[0] == '[') { + const addr_end = std.mem.indexOfScalar(u8, str, ']') orelse + return error.InvalidAddress; + const addr_str = str[1..addr_end]; + const port: u16 = p: { + if (addr_end == str.len - 1) break :p 0; + if (str[addr_end + 1] != ':') return error.InvalidAddress; + break :p parsePort(str[addr_end + 2 ..]) orelse return error.InvalidPort; + }; + return parseIp6(addr_str, port) catch error.InvalidAddress; + } else { + if (std.mem.indexOfScalar(u8, str, ':')) |idx| { + // hold off on `error.InvalidPort` since `error.InvalidAddress` might make more sense + const port: ?u16 = parsePort(str[idx + 1 ..]); + const addr = parseIp4(str[0..idx], port orelse 0) catch return error.InvalidAddress; + if (port == null) return error.InvalidPort; + return addr; + } else { + return parseIp4(str, 0) catch error.InvalidAddress; + } + } + } + fn parsePort(str: []const u8) ?u16 { + var p: u16 = 0; + for (str) |c| switch (c) { + '0'...'9' => { + const shifted = std.math.mul(u16, p, 10) catch return null; + p = std.math.add(u16, shifted, c - '0') catch return null; + }, + else => return null, + }; + if (p == 0) return null; + return p; + } + /// Parse the given IP address string into an Address value. /// It is recommended to use `resolveIp` instead, to handle /// IPv6 link-local unix addresses. diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index 12bd259b16..ea60354741 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -50,6 +50,8 @@ pub const Message = struct { /// address of the fuzz unit test. This is used to provide a starting /// point to view coverage. fuzz_start_addr, + /// Body is a TimeReport. + time_report, _, }; @@ -95,6 +97,19 @@ pub const Message = struct { }; }; + /// Trailing is the same as in `std.Build.abi.time_report.CompileResult`, excluding `step_name`. + pub const TimeReport = extern struct { + stats: std.Build.abi.time_report.CompileResult.Stats align(4), + llvm_pass_timings_len: u32, + files_len: u32, + decls_len: u32, + flags: Flags, + pub const Flags = packed struct(u32) { + use_llvm: bool, + _: u31 = 0, + }; + }; + /// Trailing: /// * the hex digest of the cache directory within the /o/ subdirectory. pub const EmitDigest = extern struct { |
