diff options
38 files changed, 1152 insertions, 354 deletions
diff --git a/lib/build_runner.zig b/lib/build_runner.zig index bfc6ab77f6..42903b82f3 100644 --- a/lib/build_runner.zig +++ b/lib/build_runner.zig @@ -151,8 +151,7 @@ pub fn main() !void { std.debug.print("Expected argument after {s}\n\n", .{arg}); usageAndErr(builder, false, stderr_stream); }; - // TODO: support shorthand such as "2GiB", "2GB", or "2G" - max_rss = std.fmt.parseInt(usize, max_rss_text, 10) catch |err| { + max_rss = std.fmt.parseIntSizeSuffix(max_rss_text, 10) catch |err| { std.debug.print("invalid byte size: '{s}': {s}\n", .{ max_rss_text, @errorName(err), }); diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig index 94cd4feb8c..b358fbfa80 100644 --- a/lib/compiler_rt/arm.zig +++ b/lib/compiler_rt/arm.zig @@ -51,7 +51,7 @@ const __udivmodsi4 = @import("int.zig").__udivmodsi4; const __divmoddi4 = @import("int.zig").__divmoddi4; const __udivmoddi4 = @import("int.zig").__udivmoddi4; -extern fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8; +extern fn memset(dest: ?[*]u8, c: i32, n: usize) ?[*]u8; extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8; extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8; @@ -81,17 +81,17 @@ pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void _ = memmove(dest, src, n); } -pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void { +pub fn __aeabi_memset(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void { @setRuntimeSafety(false); // This is dentical to the standard `memset` definition but with the last // two arguments swapped _ = memset(dest, c, n); } -pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void { +pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void { @setRuntimeSafety(false); _ = memset(dest, c, n); } -pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void { +pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void { @setRuntimeSafety(false); _ = memset(dest, c, n); } diff --git a/lib/docs/index.html b/lib/docs/index.html index 6b9f95a7d0..36cfcc55cc 100644 --- a/lib/docs/index.html +++ b/lib/docs/index.html @@ -875,7 +875,7 @@ <div class="help-modal"> <div class="modal"> <h1>Keyboard Shortcuts</h1> - <dl><dt><kbd>?</kbd></dt><dd>Show this help modal</dd></dl> + <dl><dt><kbd>?</kbd></dt><dd>Toggle this help modal</dd></dl> <dl><dt><kbd>s</kbd></dt><dd>Focus the search field</dd></dl> <div style="margin-left: 1em"> <dl><dt><kbd>↑</kbd></dt><dd>Move up in search results</dd></dl> diff --git a/lib/docs/main.js b/lib/docs/main.js index d18c08f096..9686b09aaf 100644 --- a/lib/docs/main.js +++ b/lib/docs/main.js @@ -127,16 +127,20 @@ const NAV_MODES = { window.guideSearch = guidesSearchIndex; parseGuides(); - + // identifiers can contain '?' so we want to allow typing + // the question mark when the search is focused instead of toggling the help modal + let canToggleHelpModal = true; domSearch.disabled = false; domSearch.addEventListener("keydown", onSearchKeyDown, false); domSearch.addEventListener("focus", ev => { domSearchPlaceholder.classList.add("hidden"); + canToggleHelpModal = false; }); domSearch.addEventListener("blur", ev => { if (domSearch.value.length == 0) domSearchPlaceholder.classList.remove("hidden"); + canToggleHelpModal = true; }); domSectSearchAllResultsLink.addEventListener('click', onClickSearchShowAllResults, false); function onClickSearchShowAllResults(ev) { @@ -4066,9 +4070,16 @@ function addDeclToSearchResults(decl, declIndex, modNames, item, list, stack) { } break; case "?": - ev.preventDefault(); - ev.stopPropagation(); - showHelpModal(); + if (!canToggleHelpModal) break; + + // toggle the help modal + if (!domHelpModal.classList.contains("hidden")) { + onEscape(ev); + } else { + ev.preventDefault(); + ev.stopPropagation(); + showHelpModal(); + } break; } } @@ -4875,4 +4886,4 @@ function RadixTree() { // BUT! -// We want to be able to search "Hash", for example!
\ No newline at end of file +// We want to be able to search "Hash", for example! diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index a17784c96a..f6939e0e38 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -306,7 +306,9 @@ fn render_cmake( } var it = std.mem.tokenize(u8, line[1..], " \t\r"); const cmakedefine = it.next().?; - if (!std.mem.eql(u8, cmakedefine, "cmakedefine")) { + if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and + !std.mem.eql(u8, cmakedefine, "cmakedefine01")) + { try output.appendSlice(line); try output.appendSlice("\n"); continue; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index c506e23f90..ba2c084e24 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -822,7 +822,7 @@ fn runCommand( .zig_test => { const prefix: []const u8 = p: { if (result.stdio.test_metadata) |tm| { - if (tm.next_index <= tm.names.len) { + if (tm.next_index > 0 and tm.next_index <= tm.names.len) { const name = tm.testName(tm.next_index - 1); break :p b.fmt("while executing test '{s}', ", .{name}); } diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig index b0bb3047cb..2cf0f7a46b 100644 --- a/lib/std/Uri.zig +++ b/lib/std/Uri.zig @@ -216,6 +216,7 @@ pub fn format( const needs_absolute = comptime std.mem.indexOf(u8, fmt, "+") != null; const needs_path = comptime std.mem.indexOf(u8, fmt, "/") != null or fmt.len == 0; + const needs_fragment = comptime std.mem.indexOf(u8, fmt, "#") != null; if (needs_absolute) { try writer.writeAll(uri.scheme); @@ -253,9 +254,11 @@ pub fn format( try Uri.writeEscapedQuery(writer, q); } - if (uri.fragment) |f| { - try writer.writeAll("#"); - try Uri.writeEscapedQuery(writer, f); + if (needs_fragment) { + if (uri.fragment) |f| { + try writer.writeAll("#"); + try Uri.writeEscapedQuery(writer, f); + } } } } diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index e5ee8a432a..a29f6303f7 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -3969,6 +3969,22 @@ pub const host_cpu_load_info = extern struct { pub const host_cpu_load_info_data_t = host_cpu_load_info; pub const host_cpu_load_info_t = *host_cpu_load_info; +pub const host_load_info = extern struct { + avenrun: [3]integer_t, + mach_factor: [3]integer_t, +}; + +pub const host_load_info_data_t = host_load_info; +pub const host_load_info_t = *host_load_info; + +pub const host_preferred_user_arch = extern struct { + cpu_type: cpu_type_t, + cpu_subtype: cpu_subtype_t, +}; + +pub const host_preferred_user_arch_data_t = host_preferred_user_arch; +pub const host_preferred_user_arch_t = *host_preferred_user_arch; + pub const HOST = struct { pub const BASIC_INFO = 1; pub const SCHED_INFO = 3; @@ -3985,6 +4001,8 @@ pub const HOST = struct { pub const RESOURCES_SIZES_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(kernel_resource_sizes_data_t) / @sizeOf(integer_t)); pub const PRIORITY_INFO_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(host_priority_info_data_t) / @sizeOf(integer_t)); pub const CPU_LOAD_INFO_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(host_cpu_load_info_data_t) / @sizeOf(integer_t)); + pub const LOAD_INFO_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(host_load_info_data_t) / @sizeOf(integer_t)); + pub const PREFERRED_USER_ARCH_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(host_preferred_user_arch_data_t) / @sizeOf(integer_t)); }; pub const host_basic_info = packed struct(u32) { diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index 3b93ccc0c0..5fcb247af3 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -2607,3 +2607,31 @@ pub const domainset = extern struct { pub extern "c" fn cpuset_getdomain(level: cpulevel_t, which: cpuwhich_t, id: id_t, len: usize, domain: *domainset_t, r: *c_int) c_int; pub extern "c" fn cpuset_setdomain(level: cpulevel_t, which: cpuwhich_t, id: id_t, len: usize, domain: *const domainset_t, r: c_int) c_int; + +const ioctl_cmd = enum(u32) { + VOID = 0x20000000, + OUT = 0x40000000, + IN = 0x80000000, + INOUT = ioctl_cmd.IN | ioctl_cmd.OUT, + DIRMASK = ioctl_cmd.VOID | ioctl_cmd.IN | ioctl_cmd.OUT, +}; + +fn ioImpl(cmd: ioctl_cmd, op: u8, nr: u8, comptime IT: type) u32 { + return @bitCast(u32, @enumToInt(cmd) | @intCast(u32, @truncate(u8, @sizeOf(IT))) << 16 | @intCast(u32, op) << 8 | nr); +} + +pub fn IO(op: u8, nr: u8) u32 { + return ioImpl(ioctl_cmd.VOID, op, nr, 0); +} + +pub fn IOR(op: u8, nr: u8, comptime IT: type) u32 { + return ioImpl(ioctl_cmd.OUT, op, nr, @sizeOf(IT)); +} + +pub fn IOW(op: u8, nr: u8, comptime IT: type) u32 { + return ioImpl(ioctl_cmd.IN, op, nr, @sizeOf(IT)); +} + +pub fn IOWR(op: u8, nr: u8, comptime IT: type) u32 { + return ioImpl(ioctl_cmd.INOUT, op, nr, @sizeOf(IT)); +} diff --git a/lib/std/enums.zig b/lib/std/enums.zig index 8e67c358b7..aa6edd60b1 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -48,6 +48,22 @@ pub fn values(comptime E: type) []const E { return comptime valuesFromFields(E, @typeInfo(E).Enum.fields); } +/// A safe alternative to @tagName() for non-exhaustive enums that doesn't +/// panic when `e` has no tagged value. +/// Returns the tag name for `e` or null if no tag exists. +pub fn tagName(comptime E: type, e: E) ?[]const u8 { + return inline for (@typeInfo(E).Enum.fields) |f| { + if (@enumToInt(e) == f.value) break f.name; + } else null; +} + +test tagName { + const E = enum(u8) { a, b, _ }; + try testing.expect(tagName(E, .a) != null); + try testing.expectEqualStrings("a", tagName(E, .a).?); + try testing.expect(tagName(E, @intToEnum(E, 42)) == null); +} + /// Determines the length of a direct-mapped enum array, indexed by /// @intCast(usize, @enumToInt(enum_value)). /// If the enum is non-exhaustive, the resulting length will only be enough @@ -731,13 +747,7 @@ pub fn EnumArray(comptime E: type, comptime V: type) type { return IndexedArray(EnumIndexer(E), V, mixin.EnumArrayExt); } -/// Pass this function as the Ext parameter to Indexed* if you -/// do not want to attach any extensions. This parameter was -/// originally an optional, but optional generic functions -/// seem to be broken at the moment. -/// TODO: Once #8169 is fixed, consider switching this param -/// back to an optional. -pub fn NoExtension(comptime Self: type) type { +fn NoExtension(comptime Self: type) type { _ = Self; return NoExt; } @@ -746,12 +756,12 @@ const NoExt = struct {}; /// A set type with an Indexer mapping from keys to indices. /// Presence or absence is stored as a dense bitfield. This /// type does no allocation and can be copied by value. -pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type { +pub fn IndexedSet(comptime I: type, comptime Ext: ?fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); - pub usingnamespace Ext(Self); + pub usingnamespace (Ext orelse NoExtension)(Self); /// The indexing rules for converting between keys and indices. pub const Indexer = I; @@ -991,12 +1001,12 @@ test "std.enums.EnumSet const iterator" { /// A map from keys to values, using an index lookup. Uses a /// bitfield to track presence and a dense array of values. /// This type does no allocation and can be copied by value. -pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { +pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: ?fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); - pub usingnamespace Ext(Self); + pub usingnamespace (Ext orelse NoExtension)(Self); /// The index mapping for this map pub const Indexer = I; @@ -1151,12 +1161,12 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) ty /// A dense array of values, using an indexed lookup. /// This type does no allocation and can be copied by value. -pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { +pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: ?fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); - pub usingnamespace Ext(Self); + pub usingnamespace (Ext orelse NoExtension)(Self); /// The index mapping for this map pub const Indexer = I; diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index c3ccd75d27..41add83c3b 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -1699,7 +1699,7 @@ pub const ParseIntError = error{ /// The result cannot fit in the type specified Overflow, - /// The input was empty or had a byte that was not a digit + /// The input was empty or contained an invalid character InvalidCharacter, }; @@ -1905,6 +1905,56 @@ test "parseUnsigned" { try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "", 10)); } +/// Parses a number like '2G', '2Gi', or '2GiB'. +pub fn parseIntSizeSuffix(buf: []const u8, radix: u8) ParseIntError!usize { + var without_B = buf; + if (mem.endsWith(u8, buf, "B")) without_B.len -= 1; + var without_i = without_B; + var base: usize = 1000; + if (mem.endsWith(u8, without_B, "i")) { + without_i.len -= 1; + base = 1024; + } + if (without_i.len == 0) return error.InvalidCharacter; + const orders_of_magnitude: usize = switch (without_i[without_i.len - 1]) { + 'k', 'K' => 1, + 'M' => 2, + 'G' => 3, + 'T' => 4, + 'P' => 5, + 'E' => 6, + 'Z' => 7, + 'Y' => 8, + 'R' => 9, + 'Q' => 10, + else => 0, + }; + var without_suffix = without_i; + if (orders_of_magnitude > 0) { + without_suffix.len -= 1; + } else if (without_i.len != without_B.len) { + return error.InvalidCharacter; + } + const multiplier = math.powi(usize, base, orders_of_magnitude) catch |err| switch (err) { + error.Underflow => unreachable, + error.Overflow => return error.Overflow, + }; + const number = try std.fmt.parseInt(usize, without_suffix, radix); + return math.mul(usize, number, multiplier); +} + +test "parseIntSizeSuffix" { + try std.testing.expect(try parseIntSizeSuffix("2", 10) == 2); + try std.testing.expect(try parseIntSizeSuffix("2B", 10) == 2); + try std.testing.expect(try parseIntSizeSuffix("2kB", 10) == 2000); + try std.testing.expect(try parseIntSizeSuffix("2k", 10) == 2000); + try std.testing.expect(try parseIntSizeSuffix("2KiB", 10) == 2048); + try std.testing.expect(try parseIntSizeSuffix("2Ki", 10) == 2048); + try std.testing.expect(try parseIntSizeSuffix("aKiB", 16) == 10240); + try std.testing.expect(parseIntSizeSuffix("", 10) == error.InvalidCharacter); + try std.testing.expect(parseIntSizeSuffix("2iB", 10) == error.InvalidCharacter); +} + pub const parseFloat = @import("fmt/parse_float.zig").parseFloat; pub const ParseFloatError = @import("fmt/parse_float.zig").ParseFloatError; diff --git a/lib/std/http.zig b/lib/std/http.zig index 744615d7d7..e9c62705b5 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -275,5 +275,4 @@ test { _ = Client; _ = Method; _ = Status; - _ = @import("http/test.zig"); } diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 42ef766bd3..023bdd28bc 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -71,7 +71,7 @@ pub const ConnectionPool = struct { while (next) |node| : (next = node.prev) { if ((node.data.buffered.conn.protocol == .tls) != criteria.is_tls) continue; if (node.data.port != criteria.port) continue; - if (mem.eql(u8, node.data.host, criteria.host)) continue; + if (!mem.eql(u8, node.data.host, criteria.host)) continue; pool.acquireUnsafe(node); return node; @@ -251,47 +251,50 @@ pub const Connection = struct { /// A buffered (and peekable) Connection. pub const BufferedConnection = struct { - pub const buffer_size = 0x2000; + pub const buffer_size = std.crypto.tls.max_ciphertext_record_len; conn: Connection, - buf: [buffer_size]u8 = undefined, - start: u16 = 0, - end: u16 = 0, + read_buf: [buffer_size]u8 = undefined, + read_start: u16 = 0, + read_end: u16 = 0, + + write_buf: [buffer_size]u8 = undefined, + write_end: u16 = 0, pub fn fill(bconn: *BufferedConnection) ReadError!void { - if (bconn.end != bconn.start) return; + if (bconn.read_end != bconn.read_start) return; - const nread = try bconn.conn.read(bconn.buf[0..]); + const nread = try bconn.conn.read(bconn.read_buf[0..]); if (nread == 0) return error.EndOfStream; - bconn.start = 0; - bconn.end = @truncate(u16, nread); + bconn.read_start = 0; + bconn.read_end = @intCast(u16, nread); } pub fn peek(bconn: *BufferedConnection) []const u8 { - return bconn.buf[bconn.start..bconn.end]; + return bconn.read_buf[bconn.read_start..bconn.read_end]; } pub fn clear(bconn: *BufferedConnection, num: u16) void { - bconn.start += num; + bconn.read_start += num; } pub fn readAtLeast(bconn: *BufferedConnection, buffer: []u8, len: usize) ReadError!usize { var out_index: u16 = 0; while (out_index < len) { - const available = bconn.end - bconn.start; + const available = bconn.read_end - bconn.read_start; const left = buffer.len - out_index; if (available > 0) { - const can_read = @truncate(u16, @min(available, left)); + const can_read = @intCast(u16, @min(available, left)); - @memcpy(buffer[out_index..][0..can_read], bconn.buf[bconn.start..][0..can_read]); + @memcpy(buffer[out_index..][0..can_read], bconn.read_buf[bconn.read_start..][0..can_read]); out_index += can_read; - bconn.start += can_read; + bconn.read_start += can_read; continue; } - if (left > bconn.buf.len) { + if (left > bconn.read_buf.len) { // skip the buffer if the output is large enough return bconn.conn.read(buffer[out_index..]); } @@ -314,11 +317,30 @@ pub const BufferedConnection = struct { } pub fn writeAll(bconn: *BufferedConnection, buffer: []const u8) WriteError!void { - return bconn.conn.writeAll(buffer); + if (bconn.write_buf.len - bconn.write_end >= buffer.len) { + @memcpy(bconn.write_buf[bconn.write_end..][0..buffer.len], buffer); + bconn.write_end += @intCast(u16, buffer.len); + } else { + try bconn.flush(); + try bconn.conn.writeAll(buffer); + } } pub fn write(bconn: *BufferedConnection, buffer: []const u8) WriteError!usize { - return bconn.conn.write(buffer); + if (bconn.write_buf.len - bconn.write_end >= buffer.len) { + @memcpy(bconn.write_buf[bconn.write_end..][0..buffer.len], buffer); + bconn.write_end += @intCast(u16, buffer.len); + + return buffer.len; + } else { + try bconn.flush(); + return try bconn.conn.write(buffer); + } + } + + pub fn flush(bconn: *BufferedConnection) WriteError!void { + defer bconn.write_end = 0; + return bconn.conn.writeAll(bconn.write_buf[0..bconn.write_end]); } pub const WriteError = Connection.WriteError; @@ -355,8 +377,6 @@ pub const Compression = union(enum) { /// A HTTP response originating from a server. pub const Response = struct { pub const ParseError = Allocator.Error || error{ - ShortHttpStatusLine, - BadHttpVersion, HttpHeadersInvalid, HttpHeaderContinuationsUnsupported, HttpTransferEncodingUnsupported, @@ -370,12 +390,12 @@ pub const Response = struct { const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 12) - return error.ShortHttpStatusLine; + return error.HttpHeadersInvalid; const version: http.Version = switch (int64(first_line[0..8])) { int64("HTTP/1.0") => .@"HTTP/1.0", int64("HTTP/1.1") => .@"HTTP/1.1", - else => return error.BadHttpVersion, + else => return error.HttpHeadersInvalid, }; if (first_line[8] != ' ') return error.HttpHeadersInvalid; const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*)); @@ -569,8 +589,7 @@ pub const Request = struct { /// Send the request to the server. pub fn start(req: *Request) StartError!void { - var buffered = std.io.bufferedWriter(req.connection.data.buffered.writer()); - const w = buffered.writer(); + const w = req.connection.data.buffered.writer(); try w.writeAll(@tagName(req.method)); try w.writeByte(' '); @@ -644,7 +663,7 @@ pub const Request = struct { try w.writeAll("\r\n"); - try buffered.flush(); + try req.connection.data.buffered.flush(); } pub const TransferReadError = BufferedConnection.ReadError || proto.HeadersParser.ReadError; @@ -695,16 +714,16 @@ pub const Request = struct { if (req.method == .CONNECT and req.response.status == .ok) { req.connection.data.closing = false; - req.connection.data.proxied = true; req.response.parser.done = true; } + // we default to using keep-alive if not provided const req_connection = req.headers.getFirstValue("connection"); const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?); const res_connection = req.response.headers.getFirstValue("connection"); const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?); - if (req_keepalive and res_keepalive) { + if (res_keepalive and (req_keepalive or req_connection == null)) { req.connection.data.closing = false; } else { req.connection.data.closing = true; @@ -725,6 +744,11 @@ pub const Request = struct { req.response.parser.done = true; } + // HEAD requests have no body + if (req.method == .HEAD) { + req.response.parser.done = true; + } + if (req.transfer_encoding == .none and req.response.status.class() == .redirect and req.handle_redirects) { req.response.skip = true; @@ -866,6 +890,8 @@ pub const Request = struct { .content_length => |len| if (len != 0) return error.MessageNotCompleted, .none => {}, } + + try req.connection.data.buffered.flush(); } }; diff --git a/lib/std/http/Headers.zig b/lib/std/http/Headers.zig index 376fd60b61..429df9368a 100644 --- a/lib/std/http/Headers.zig +++ b/lib/std/http/Headers.zig @@ -36,17 +36,6 @@ pub const Field = struct { name: []const u8, value: []const u8, - pub fn modify(entry: *Field, allocator: Allocator, new_value: []const u8) !void { - if (entry.value.len <= new_value.len) { - // TODO: eliminate this use of `@constCast`. - @memcpy(@constCast(entry.value)[0..new_value.len], new_value); - } else { - allocator.free(entry.value); - - entry.value = try allocator.dupe(u8, new_value); - } - } - fn lessThan(ctx: void, a: Field, b: Field) bool { _ = ctx; if (a.name.ptr == b.name.ptr) return false; diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index c7f2a86c27..6b5db6725f 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -95,47 +95,50 @@ pub const Connection = struct { /// A buffered (and peekable) Connection. pub const BufferedConnection = struct { - pub const buffer_size = 0x2000; + pub const buffer_size = std.crypto.tls.max_ciphertext_record_len; conn: Connection, - buf: [buffer_size]u8 = undefined, - start: u16 = 0, - end: u16 = 0, + read_buf: [buffer_size]u8 = undefined, + read_start: u16 = 0, + read_end: u16 = 0, + + write_buf: [buffer_size]u8 = undefined, + write_end: u16 = 0, pub fn fill(bconn: *BufferedConnection) ReadError!void { - if (bconn.end != bconn.start) return; + if (bconn.read_end != bconn.read_start) return; - const nread = try bconn.conn.read(bconn.buf[0..]); + const nread = try bconn.conn.read(bconn.read_buf[0..]); if (nread == 0) return error.EndOfStream; - bconn.start = 0; - bconn.end = @truncate(u16, nread); + bconn.read_start = 0; + bconn.read_end = @intCast(u16, nread); } pub fn peek(bconn: *BufferedConnection) []const u8 { - return bconn.buf[bconn.start..bconn.end]; + return bconn.read_buf[bconn.read_start..bconn.read_end]; } pub fn clear(bconn: *BufferedConnection, num: u16) void { - bconn.start += num; + bconn.read_start += num; } pub fn readAtLeast(bconn: *BufferedConnection, buffer: []u8, len: usize) ReadError!usize { var out_index: u16 = 0; while (out_index < len) { - const available = bconn.end - bconn.start; + const available = bconn.read_end - bconn.read_start; const left = buffer.len - out_index; if (available > 0) { - const can_read = @truncate(u16, @min(available, left)); + const can_read = @intCast(u16, @min(available, left)); - @memcpy(buffer[out_index..][0..can_read], bconn.buf[bconn.start..][0..can_read]); + @memcpy(buffer[out_index..][0..can_read], bconn.read_buf[bconn.read_start..][0..can_read]); out_index += can_read; - bconn.start += can_read; + bconn.read_start += can_read; continue; } - if (left > bconn.buf.len) { + if (left > bconn.read_buf.len) { // skip the buffer if the output is large enough return bconn.conn.read(buffer[out_index..]); } @@ -158,11 +161,30 @@ pub const BufferedConnection = struct { } pub fn writeAll(bconn: *BufferedConnection, buffer: []const u8) WriteError!void { - return bconn.conn.writeAll(buffer); + if (bconn.write_buf.len - bconn.write_end >= buffer.len) { + @memcpy(bconn.write_buf[bconn.write_end..][0..buffer.len], buffer); + bconn.write_end += @intCast(u16, buffer.len); + } else { + try bconn.flush(); + try bconn.conn.writeAll(buffer); + } } pub fn write(bconn: *BufferedConnection, buffer: []const u8) WriteError!usize { - return bconn.conn.write(buffer); + if (bconn.write_buf.len - bconn.write_end >= buffer.len) { + @memcpy(bconn.write_buf[bconn.write_end..][0..buffer.len], buffer); + bconn.write_end += @intCast(u16, buffer.len); + + return buffer.len; + } else { + try bconn.flush(); + return try bconn.conn.write(buffer); + } + } + + pub fn flush(bconn: *BufferedConnection) WriteError!void { + defer bconn.write_end = 0; + return bconn.conn.writeAll(bconn.write_buf[0..bconn.write_end]); } pub const WriteError = Connection.WriteError; @@ -199,8 +221,6 @@ pub const Compression = union(enum) { /// A HTTP request originating from a client. pub const Request = struct { pub const ParseError = Allocator.Error || error{ - ShortHttpStatusLine, - BadHttpVersion, UnknownHttpMethod, HttpHeadersInvalid, HttpHeaderContinuationsUnsupported, @@ -215,7 +235,7 @@ pub const Request = struct { const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 10) - return error.ShortHttpStatusLine; + return error.HttpHeadersInvalid; const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid; const method_str = first_line[0..method_end]; @@ -229,7 +249,7 @@ pub const Request = struct { const version: http.Version = switch (int64(version_str[0..8])) { int64("HTTP/1.0") => .@"HTTP/1.0", int64("HTTP/1.1") => .@"HTTP/1.1", - else => return error.BadHttpVersion, + else => return error.HttpHeadersInvalid, }; const target = first_line[method_end + 1 .. version_start]; @@ -312,7 +332,7 @@ pub const Request = struct { transfer_encoding: ?http.TransferEncoding = null, transfer_compression: ?http.ContentEncoding = null, - headers: http.Headers = undefined, + headers: http.Headers, parser: proto.HeadersParser, compression: Compression = .none, }; @@ -329,21 +349,63 @@ pub const Response = struct { transfer_encoding: ResponseTransfer = .none, - server: *Server, + allocator: Allocator, address: net.Address, connection: BufferedConnection, headers: http.Headers, request: Request, + state: State = .first, + + const State = enum { + first, + start, + waited, + responded, + finished, + }; + pub fn deinit(res: *Response) void { - res.server.allocator.destroy(res); + res.connection.close(); + + res.headers.deinit(); + res.request.headers.deinit(); + + if (res.request.parser.header_bytes_owned) { + res.request.parser.header_bytes.deinit(res.allocator); + } } + pub const ResetState = enum { reset, closing }; + /// Reset this response to its initial state. This must be called before handling a second request on the same connection. - pub fn reset(res: *Response) void { - res.request.headers.deinit(); - res.headers.deinit(); + pub fn reset(res: *Response) ResetState { + if (res.state == .first) { + res.state = .start; + return .reset; + } + + if (!res.request.parser.done) { + // If the response wasn't fully read, then we need to close the connection. + res.connection.conn.closing = true; + return .closing; + } + + // A connection is only keep-alive if the Connection header is present and it's value is not "close". + // The server and client must both agree + // + // do() defaults to using keep-alive if the client requests it. + const res_connection = res.headers.getFirstValue("connection"); + const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?); + + const req_connection = res.request.headers.getFirstValue("connection"); + const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?); + if (req_keepalive and (res_keepalive or res_connection == null)) { + res.connection.conn.closing = false; + } else { + res.connection.conn.closing = true; + } switch (res.request.compression) { .none => {}, @@ -352,19 +414,30 @@ pub const Response = struct { .zstd => |*zstd| zstd.deinit(), } - if (!res.request.parser.done) { - // If the response wasn't fully read, then we need to close the connection. - res.connection.conn.closing = true; - } + res.state = .start; + res.version = .@"HTTP/1.1"; + res.status = .ok; + res.reason = null; - if (res.connection.conn.closing) { - res.connection.close(); + res.transfer_encoding = .none; - if (res.request.parser.header_bytes_owned) { - res.request.parser.header_bytes.deinit(res.server.allocator); - } + res.headers.clearRetainingCapacity(); + + res.request.headers.clearAndFree(); // FIXME: figure out why `clearRetainingCapacity` causes a leak in hash_map here + res.request.parser.reset(); + + res.request = Request{ + .version = undefined, + .method = undefined, + .target = undefined, + .headers = res.request.headers, + .parser = res.request.parser, + }; + + if (res.connection.conn.closing) { + return .closing; } else { - res.request.parser.reset(); + return .reset; } } @@ -372,8 +445,12 @@ pub const Response = struct { /// Send the response headers. pub fn do(res: *Response) !void { - var buffered = std.io.bufferedWriter(res.connection.writer()); - const w = buffered.writer(); + switch (res.state) { + .waited => res.state = .responded, + .first, .start, .responded, .finished => unreachable, + } + + const w = res.connection.writer(); try w.writeAll(@tagName(res.version)); try w.writeByte(' '); @@ -391,7 +468,14 @@ pub const Response = struct { } if (!res.headers.contains("connection")) { - try w.writeAll("Connection: keep-alive\r\n"); + const req_connection = res.request.headers.getFirstValue("connection"); + const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?); + + if (req_keepalive) { + try w.writeAll("Connection: keep-alive\r\n"); + } else { + try w.writeAll("Connection: close\r\n"); + } } const has_transfer_encoding = res.headers.contains("transfer-encoding"); @@ -424,7 +508,7 @@ pub const Response = struct { try w.writeAll("\r\n"); - try buffered.flush(); + try res.connection.flush(); } pub const TransferReadError = BufferedConnection.ReadError || proto.HeadersParser.ReadError; @@ -452,29 +536,23 @@ pub const Response = struct { /// Wait for the client to send a complete request head. pub fn wait(res: *Response) WaitError!void { + switch (res.state) { + .first, .start => res.state = .waited, + .waited, .responded, .finished => unreachable, + } + while (true) { try res.connection.fill(); - const nchecked = try res.request.parser.checkCompleteHead(res.server.allocator, res.connection.peek()); + const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); res.connection.clear(@intCast(u16, nchecked)); if (res.request.parser.state.isContent()) break; } - res.request.headers = .{ .allocator = res.server.allocator, .owned = true }; + res.request.headers = .{ .allocator = res.allocator, .owned = true }; try res.request.parse(res.request.parser.header_bytes.items); - const res_connection = res.headers.getFirstValue("connection"); - const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?); - - const req_connection = res.request.headers.getFirstValue("connection"); - const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?); - if (res_keepalive and req_keepalive) { - res.connection.conn.closing = false; - } else { - res.connection.conn.closing = true; - } - if (res.request.transfer_encoding) |te| { switch (te) { .chunked => { @@ -494,13 +572,13 @@ pub const Response = struct { if (res.request.transfer_compression) |tc| switch (tc) { .compress => return error.CompressionNotSupported, .deflate => res.request.compression = .{ - .deflate = std.compress.zlib.zlibStream(res.server.allocator, res.transferReader()) catch return error.CompressionInitializationFailed, + .deflate = std.compress.zlib.zlibStream(res.allocator, res.transferReader()) catch return error.CompressionInitializationFailed, }, .gzip => res.request.compression = .{ - .gzip = std.compress.gzip.decompress(res.server.allocator, res.transferReader()) catch return error.CompressionInitializationFailed, + .gzip = std.compress.gzip.decompress(res.allocator, res.transferReader()) catch return error.CompressionInitializationFailed, }, .zstd => res.request.compression = .{ - .zstd = std.compress.zstd.decompressStream(res.server.allocator, res.transferReader()), + .zstd = std.compress.zstd.decompressStream(res.allocator, res.transferReader()), }, }; } @@ -515,6 +593,11 @@ pub const Response = struct { } pub fn read(res: *Response, buffer: []u8) ReadError!usize { + switch (res.state) { + .waited, .responded, .finished => {}, + .first, .start => unreachable, + } + const out_index = switch (res.request.compression) { .deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure, .gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure, @@ -528,12 +611,12 @@ pub const Response = struct { while (!res.request.parser.state.isContent()) { // read trailing headers try res.connection.fill(); - const nchecked = try res.request.parser.checkCompleteHead(res.server.allocator, res.connection.peek()); + const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek()); res.connection.clear(@intCast(u16, nchecked)); } if (has_trail) { - res.request.headers = http.Headers{ .allocator = res.server.allocator, .owned = false }; + res.request.headers = http.Headers{ .allocator = res.allocator, .owned = false }; // The response headers before the trailers are already guaranteed to be valid, so they will always be parsed again and cannot return an error. // This will *only* fail for a malformed trailer. @@ -564,6 +647,11 @@ pub const Response = struct { /// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent. pub fn write(res: *Response, bytes: []const u8) WriteError!usize { + switch (res.state) { + .responded => {}, + .first, .waited, .start, .finished => unreachable, + } + switch (res.transfer_encoding) { .chunked => { try res.connection.writer().print("{x}\r\n", .{bytes.len}); @@ -583,7 +671,7 @@ pub const Response = struct { } } - pub fn writeAll(req: *Request, bytes: []const u8) WriteError!void { + pub fn writeAll(req: *Response, bytes: []const u8) WriteError!void { var index: usize = 0; while (index < bytes.len) { index += try write(req, bytes[index..]); @@ -594,11 +682,18 @@ pub const Response = struct { /// Finish the body of a request. This notifies the server that you have no more data to send. pub fn finish(res: *Response) FinishError!void { + switch (res.state) { + .responded => res.state = .finished, + .first, .waited, .start, .finished => unreachable, + } + switch (res.transfer_encoding) { .chunked => try res.connection.writeAll("0\r\n\r\n"), .content_length => |len| if (len != 0) return error.MessageNotCompleted, .none => {}, } + + try res.connection.flush(); } }; @@ -635,31 +730,34 @@ pub const HeaderStrategy = union(enum) { static: []u8, }; -/// Accept a new connection and allocate a Response for it. -pub fn accept(server: *Server, options: HeaderStrategy) AcceptError!*Response { +pub const AcceptOptions = struct { + allocator: Allocator, + header_strategy: HeaderStrategy = .{ .dynamic = 8192 }, +}; + +/// Accept a new connection. +pub fn accept(server: *Server, options: AcceptOptions) AcceptError!Response { const in = try server.socket.accept(); - const res = try server.allocator.create(Response); - res.* = .{ - .server = server, + return Response{ + .allocator = options.allocator, .address = in.address, .connection = .{ .conn = .{ .stream = in.stream, .protocol = .plain, } }, - .headers = .{ .allocator = server.allocator }, + .headers = .{ .allocator = options.allocator }, .request = .{ .version = undefined, .method = undefined, .target = undefined, - .parser = switch (options) { + .headers = .{ .allocator = options.allocator, .owned = false }, + .parser = switch (options.header_strategy) { .dynamic => |max| proto.HeadersParser.initDynamic(max), .static => |buf| proto.HeadersParser.initStatic(buf), }, }, }; - - return res; } test "HTTP server handles a chunked transfer coding request" { diff --git a/lib/std/http/test.zig b/lib/std/http/test.zig deleted file mode 100644 index ee164c297a..0000000000 --- a/lib/std/http/test.zig +++ /dev/null @@ -1,72 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; - -test "client requests server" { - const builtin = @import("builtin"); - - // This test requires spawning threads. - if (builtin.single_threaded) { - return error.SkipZigTest; - } - - const native_endian = comptime builtin.cpu.arch.endian(); - if (builtin.zig_backend == .stage2_llvm and native_endian == .Big) { - // https://github.com/ziglang/zig/issues/13782 - return error.SkipZigTest; - } - - if (builtin.os.tag == .wasi) return error.SkipZigTest; - - const allocator = std.testing.allocator; - - const max_header_size = 8192; - var server = std.http.Server.init(allocator, .{ .reuse_address = true }); - defer server.deinit(); - - const address = try std.net.Address.parseIp("127.0.0.1", 0); - try server.listen(address); - const server_port = server.socket.listen_address.in.getPort(); - - const server_thread = try std.Thread.spawn(.{}, (struct { - fn apply(s: *std.http.Server) !void { - const res = try s.accept(.{ .dynamic = max_header_size }); - defer res.deinit(); - defer res.reset(); - try res.wait(); - - const server_body: []const u8 = "message from server!\n"; - res.transfer_encoding = .{ .content_length = server_body.len }; - try res.headers.append("content-type", "text/plain"); - try res.headers.append("connection", "close"); - try res.do(); - - var buf: [128]u8 = undefined; - const n = try res.readAll(&buf); - try expect(std.mem.eql(u8, buf[0..n], "Hello, World!\n")); - _ = try res.writer().writeAll(server_body); - try res.finish(); - } - }).apply, .{&server}); - - var uri_buf: [22]u8 = undefined; - const uri = try std.Uri.parse(try std.fmt.bufPrint(&uri_buf, "http://127.0.0.1:{d}", .{server_port})); - var client = std.http.Client{ .allocator = allocator }; - defer client.deinit(); - var client_headers = std.http.Headers{ .allocator = allocator }; - defer client_headers.deinit(); - var client_req = try client.request(.POST, uri, client_headers, .{}); - defer client_req.deinit(); - - client_req.transfer_encoding = .{ .content_length = 14 }; // this will be checked to ensure you sent exactly 14 bytes - try client_req.start(); // this sends the request - try client_req.writeAll("Hello, "); - try client_req.writeAll("World!\n"); - try client_req.finish(); - try client_req.wait(); // this waits for a response - - const body = try client_req.reader().readAllAlloc(allocator, 8192 * 1024); - defer allocator.free(body); - try expect(std.mem.eql(u8, body, "message from server!\n")); - - server_thread.join(); -} diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index b01d9b04ff..686a3fdbda 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1519,7 +1519,7 @@ pub const Mutable = struct { r.positive = r_positive; } - if (xy_trailing != 0) { + if (xy_trailing != 0 and r.limbs[r.len - 1] != 0) { // Manually shift here since we know its limb aligned. mem.copyBackwards(Limb, r.limbs[xy_trailing..], r.limbs[0..r.len]); @memset(r.limbs[0..xy_trailing], 0); diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 25f9815f9d..0514453cf4 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -1373,6 +1373,19 @@ test "big.int div trunc single-single -/-" { try testing.expect((try r.to(i32)) == er); } +test "big.int divTrunc #15535" { + var one = try Managed.initSet(testing.allocator, 1); + defer one.deinit(); + var x = try Managed.initSet(testing.allocator, std.math.pow(u128, 2, 64)); + defer x.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + var q = try Managed.init(testing.allocator); + defer q.deinit(); + try q.divTrunc(&r, &x, &x); + try testing.expect(r.order(one) == std.math.Order.lt); +} + test "big.int divFloor #10932" { var a = try Managed.init(testing.allocator); defer a.deinit(); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 6cc60ddad2..aae5c1b617 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1013,6 +1013,54 @@ pub fn indexOfAnyPos(comptime T: type, slice: []const T, start_index: usize, val return null; } +/// Find the first item in `slice` which is not contained in `values`. +/// +/// Comparable to `strspn` in the C standard library. +pub fn indexOfNone(comptime T: type, slice: []const T, values: []const T) ?usize { + return indexOfNonePos(T, slice, 0, values); +} + +/// Find the last item in `slice` which is not contained in `values`. +/// +/// Like `strspn` in the C standard library, but searches from the end. +pub fn lastIndexOfNone(comptime T: type, slice: []const T, values: []const T) ?usize { + var i: usize = slice.len; + outer: while (i != 0) { + i -= 1; + for (values) |value| { + if (slice[i] == value) continue :outer; + } + return i; + } + return null; +} + +/// Find the first item in `slice[start_index..]` which is not contained in `values`. +/// The returned index will be relative to the start of `slice`, and never less than `start_index`. +/// +/// Comparable to `strspn` in the C standard library. +pub fn indexOfNonePos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize { + var i: usize = start_index; + outer: while (i < slice.len) : (i += 1) { + for (values) |value| { + if (slice[i] == value) continue :outer; + } + return i; + } + return null; +} + +test "indexOfNone" { + try testing.expect(indexOfNone(u8, "abc123", "123").? == 0); + try testing.expect(lastIndexOfNone(u8, "abc123", "123").? == 2); + try testing.expect(indexOfNone(u8, "123abc", "123").? == 3); + try testing.expect(lastIndexOfNone(u8, "123abc", "123").? == 5); + try testing.expect(indexOfNone(u8, "123123", "123") == null); + try testing.expect(indexOfNone(u8, "333333", "123") == null); + + try testing.expect(indexOfNonePos(u8, "abc123", 3, "321") == null); +} + pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize { return indexOfPos(T, haystack, 0, needle); } diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 7be3b71347..cd83061d53 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -14,48 +14,7 @@ test { _ = TrailerFlags; } -pub fn tagName(v: anytype) []const u8 { - const T = @TypeOf(v); - switch (@typeInfo(T)) { - .ErrorSet => return @errorName(v), - else => return @tagName(v), - } -} - -test "std.meta.tagName" { - const E1 = enum { - A, - B, - }; - const E2 = enum(u8) { - C = 33, - D, - }; - const U1 = union(enum) { - G: u8, - H: u16, - }; - const U2 = union(E2) { - C: u8, - D: u16, - }; - - var u1g = U1{ .G = 0 }; - var u1h = U1{ .H = 0 }; - var u2a = U2{ .C = 0 }; - var u2b = U2{ .D = 0 }; - - try testing.expect(mem.eql(u8, tagName(E1.A), "A")); - try testing.expect(mem.eql(u8, tagName(E1.B), "B")); - try testing.expect(mem.eql(u8, tagName(E2.C), "C")); - try testing.expect(mem.eql(u8, tagName(E2.D), "D")); - try testing.expect(mem.eql(u8, tagName(error.E), "E")); - try testing.expect(mem.eql(u8, tagName(error.F), "F")); - try testing.expect(mem.eql(u8, tagName(u1g), "G")); - try testing.expect(mem.eql(u8, tagName(u1h), "H")); - try testing.expect(mem.eql(u8, tagName(u2a), "C")); - try testing.expect(mem.eql(u8, tagName(u2b), "D")); -} +pub const tagName = @compileError("deprecated; use @tagName or @errorName directly"); /// Given an enum or tagged union, returns true if the comptime-supplied /// string matches the name of the tag value. This match process should @@ -487,14 +487,14 @@ typedef ptrdiff_t intptr_t; zig_basic_operator(uint##w##_t, div_floor_u##w, /) \ \ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \ - return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \ + return lhs / rhs + (lhs % rhs != INT##w##_C(0) ? zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \ } \ \ zig_basic_operator(uint##w##_t, mod_u##w, %) \ \ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \ int##w##_t rem = lhs % rhs; \ - return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \ + return rem + (rem != INT##w##_C(0) ? rhs & zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \ } \ \ static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ @@ -1078,7 +1078,7 @@ static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) { uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \ temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \ temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \ - return temp * (UINT##w##_MAX / 255) >> (w - 8); \ + return temp * (UINT##w##_MAX / 255) >> (UINT8_C(w) - UINT8_C(8)); \ } \ \ zig_builtin_popcount_common(w) @@ -1298,15 +1298,6 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { return lhs % rhs; } -static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0)); -} - -static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { - zig_i128 rem = zig_rem_i128(lhs, rhs); - return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0)); -} - #else /* zig_has_int128 */ static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { @@ -1394,20 +1385,26 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { return __modti3(lhs, rhs); } -static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { - zig_i128 rem = zig_rem_i128(lhs, rhs); - return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0)); -} +#endif /* zig_has_int128 */ + +#define zig_div_floor_u128 zig_div_trunc_u128 static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0))); + zig_i128 rem = zig_rem_i128(lhs, rhs); + int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0) + ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0); + return zig_add_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(mask, (uint64_t)mask)); } -#endif /* zig_has_int128 */ - -#define zig_div_floor_u128 zig_div_trunc_u128 #define zig_mod_u128 zig_rem_u128 +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0) + ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0); + return zig_add_i128(rem, zig_and_i128(rhs, zig_make_i128(mask, (uint64_t)mask))); +} + static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } diff --git a/src/Autodoc.zig b/src/Autodoc.zig index f12544bde5..e5b8139e0d 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -2398,7 +2398,19 @@ fn walkInstruction( return DocData.WalkResult{ .typeRef = if (callee.typeRef) |tr| switch (tr) { - .type => |func_type_idx| self.types.items[func_type_idx].Fn.ret, + .type => |func_type_idx| switch (self.types.items[func_type_idx]) { + .Fn => |func| func.ret, + else => blk: { + printWithContext( + file, + inst_index, + "unexpected callee type in walkInstruction.call: `{s}`\n", + .{@tagName(self.types.items[func_type_idx])}, + ); + + break :blk null; + }, + }, else => null, } else null, .expr = .{ .call = call_slot_index }, diff --git a/src/Module.zig b/src/Module.zig index 6a33990463..b0c18def78 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6115,6 +6115,8 @@ pub const PeerTypeCandidateSrc = union(enum) { return null; }, .override => |candidate_srcs| { + if (candidate_i >= candidate_srcs.len) + return null; return candidate_srcs[candidate_i]; }, .typeof_builtin_call_node_offset => |node_offset| { diff --git a/src/Sema.zig b/src/Sema.zig index b2387dfe12..225a6c5bff 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3401,8 +3401,8 @@ fn indexablePtrLen( ) CompileError!Air.Inst.Ref { const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(); - const array_ty = if (is_pointer_to) object_ty.childType() else object_ty; - try checkIndexable(sema, block, src, array_ty); + const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); } @@ -3413,7 +3413,7 @@ fn indexablePtrLenOrNone( object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const object_ty = sema.typeOf(object); - const array_ty = t: { + const indexable_ty = t: { const ptr_size = object_ty.ptrSizeOrNull() orelse break :t object_ty; break :t switch (ptr_size) { .Many => return .none, @@ -3421,7 +3421,7 @@ fn indexablePtrLenOrNone( else => object_ty, }; }; - try checkIndexable(sema, block, src, array_ty); + try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); } @@ -3991,7 +3991,16 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - try checkIndexable(sema, block, arg_src, object_ty); + if (!object_ty.isIndexable()) { + // Instead of using checkIndexable we customize this error. + const msg = msg: { + const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(block, arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + } if (!object_ty.indexableHasLen()) continue; break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); @@ -19910,7 +19919,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { - return sema.fail(block, operand_src, "null pointer casted to type {}", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); @@ -22013,10 +22022,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const msg = msg: { const msg = try sema.errMsg(block, src, "unknown @memcpy length", .{}); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, dest_src, msg, "destination type {} provides no length", .{ + try sema.errNote(block, dest_src, msg, "destination type '{}' provides no length", .{ sema.typeOf(dest_ptr).fmt(sema.mod), }); - try sema.errNote(block, src_src, msg, "source type {} provides no length", .{ + try sema.errNote(block, src_src, msg, "source type '{}' provides no length", .{ sema.typeOf(src_ptr).fmt(sema.mod), }); break :msg msg; @@ -22746,7 +22755,7 @@ fn resolveExternOptions( const payload = library_name_val.castTag(.opt_payload).?.data; const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); if (library_name.len == 0) { - return sema.fail(block, library_src, "library name name cannot be empty", .{}); + return sema.fail(block, library_src, "library name cannot be empty", .{}); } break :blk try sema.handleExternLibName(block, library_src, library_name); } else null; @@ -24767,9 +24776,7 @@ fn elemPtr( .Pointer => indexable_ptr_ty.elemType(), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; - if (!indexable_ty.isIndexable()) { - return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)}); - } + try checkIndexable(sema, block, src, indexable_ty); switch (indexable_ty.zigTypeTag()) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), @@ -24801,9 +24808,7 @@ fn elemPtrOneLayerOnly( const indexable_ty = sema.typeOf(indexable); const target = sema.mod.getTarget(); - if (!indexable_ty.isIndexable()) { - return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)}); - } + try checkIndexable(sema, block, src, indexable_ty); switch (indexable_ty.ptrSize()) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), @@ -24824,7 +24829,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable + assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -24843,9 +24848,7 @@ fn elemVal( const indexable_ty = sema.typeOf(indexable); const target = sema.mod.getTarget(); - if (!indexable_ty.isIndexable()) { - return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)}); - } + try checkIndexable(sema, block, src, indexable_ty); // TODO in case of a vector of pointers, we need to detect whether the element // index is a scalar or vector instead of unconditionally casting to usize. @@ -24873,7 +24876,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable + assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -30997,23 +31000,12 @@ fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ } } -fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, array_ty: Type) !void { - if (!array_ty.isIndexable()) { +fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { + if (!ty.isIndexable()) { const msg = msg: { - const msg = try sema.errMsg( - block, - src, - "type '{}' does not support indexing", - .{array_ty.fmt(sema.mod)}, - ); + const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - try sema.errNote( - block, - src, - msg, - "for loop operand must be an array, slice, tuple, or vector", - .{}, - ); + try sema.errNote(block, src, msg, "operand must be an array, slice, tuple, or vector", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7da99de5c1..86b74b1429 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6508,23 +6508,16 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(); - const inst_scalar_cty = try f.typeToIndex(inst_scalar_ty, .complete); - const need_memcpy = f.indexToCType(inst_scalar_cty).tag() == .array; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, writer, inst_ty); - if (need_memcpy) try writer.writeAll("memcpy(&"); + const a = try Assignment.init(f, inst_scalar_ty); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); - try writer.writeAll(if (need_memcpy) ", &" else " = "); + try a.assign(f, writer); try f.writeCValue(writer, operand, .Other); - if (need_memcpy) { - try writer.writeAll(", sizeof("); - try f.renderCType(writer, inst_scalar_cty); - try writer.writeAll("))"); - } - try writer.writeAll(";\n"); + try a.end(f, writer); try v.end(f, inst, writer); return local; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d697a41988..991ac04573 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7034,7 +7034,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const scalar_ty = self.air.typeOfIndex(inst).scalarType(); - if (scalar_ty.isAnyFloat()) return self.builder.buildMinNum(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } @@ -7045,7 +7045,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const scalar_ty = self.air.typeOfIndex(inst).scalarType(); - if (scalar_ty.isAnyFloat()) return self.builder.buildMaxNum(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } @@ -7215,20 +7215,28 @@ pub const FuncGen = struct { return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } if (scalar_ty.isSignedInt()) { - // const d = @divTrunc(a, b); - // const r = @rem(a, b); - // return if (r == 0) d else d - ((a < 0) ^ (b < 0)); - const result_llvm_ty = try self.dg.lowerType(inst_ty); - const zero = result_llvm_ty.constNull(); - const div_trunc = self.builder.buildSDiv(lhs, rhs, ""); + const target = self.dg.module.getTarget(); + const inst_llvm_ty = try self.dg.lowerType(inst_ty); + const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + const vec_len = inst_ty.vectorLen(); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); + + const shifts = try self.gpa.alloc(*llvm.Value, vec_len); + defer self.gpa.free(shifts); + + @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); + break :const_vector llvm.constVector(shifts.ptr, vec_len); + } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); + + const div = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); - const rem_eq_0 = self.builder.buildICmp(.EQ, rem, zero, ""); - const a_lt_0 = self.builder.buildICmp(.SLT, lhs, zero, ""); - const b_lt_0 = self.builder.buildICmp(.SLT, rhs, zero, ""); - const a_b_xor = self.builder.buildXor(a_lt_0, b_lt_0, ""); - const a_b_xor_ext = self.builder.buildZExt(a_b_xor, div_trunc.typeOf(), ""); - const d_sub_xor = self.builder.buildSub(div_trunc, a_b_xor_ext, ""); - return self.builder.buildSelect(rem_eq_0, div_trunc, d_sub_xor, ""); + const div_sign = self.builder.buildXor(lhs, rhs, ""); + const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); + const zero = inst_llvm_ty.constNull(); + const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); + const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero, ""); + return self.builder.buildNSWAdd(div, correction, ""); } return self.builder.buildUDiv(lhs, rhs, ""); } @@ -7280,12 +7288,27 @@ pub const FuncGen = struct { return self.builder.buildSelect(ltz, c, a, ""); } if (scalar_ty.isSignedInt()) { - const a = self.builder.buildSRem(lhs, rhs, ""); - const b = self.builder.buildNSWAdd(a, rhs, ""); - const c = self.builder.buildSRem(b, rhs, ""); + const target = self.dg.module.getTarget(); + const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + const vec_len = inst_ty.vectorLen(); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); + + const shifts = try self.gpa.alloc(*llvm.Value, vec_len); + defer self.gpa.free(shifts); + + @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); + break :const_vector llvm.constVector(shifts.ptr, vec_len); + } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); + + const rem = self.builder.buildSRem(lhs, rhs, ""); + const div_sign = self.builder.buildXor(lhs, rhs, ""); + const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); + const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, ""); const zero = inst_llvm_ty.constNull(); - const ltz = self.builder.buildICmp(.SLT, lhs, zero, ""); - return self.builder.buildSelect(ltz, c, a, ""); + const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); + const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero, ""); + return self.builder.buildNSWAdd(rem, correction, ""); } return self.builder.buildURem(lhs, rhs, ""); } diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 7e16111059..0e1e14a228 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -449,6 +449,9 @@ fn testDivision() !void { try expect(mod(i32, 10, 12) == 10); try expect(mod(i32, -14, 12) == 10); try expect(mod(i32, -2, 12) == 10); + try expect(mod(i32, 10, -12) == -2); + try expect(mod(i32, -14, -12) == -2); + try expect(mod(i32, -2, -12) == -2); comptime { try expect( diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index e6a8553e8c..1bd3723fca 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -96,6 +96,31 @@ test "@min for vectors" { comptime try S.doTheTest(); } +test "@min/max for floats" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + const S = struct { + fn doTheTest(comptime T: type) !void { + var x: T = -3.14; + var y: T = 5.27; + try expectEqual(x, @min(x, y)); + try expectEqual(x, @min(y, x)); + try expectEqual(y, @max(x, y)); + try expectEqual(y, @max(y, x)); + } + }; + + inline for (.{ f16, f32, f64, f80, f128, c_longdouble }) |T| { + try S.doTheTest(T); + comptime try S.doTheTest(T); + } + comptime try S.doTheTest(comptime_float); +} + test "@min/@max on lazy values" { const A = extern struct { u8_4: [4]u8 }; const B = extern struct { u8_16: [16]u8 }; diff --git a/test/cases/compile_errors/array_access_of_non_array.zig b/test/cases/compile_errors/array_access_of_non_array.zig index 06fa1569e6..0f06bae1dd 100644 --- a/test/cases/compile_errors/array_access_of_non_array.zig +++ b/test/cases/compile_errors/array_access_of_non_array.zig @@ -1,9 +1,9 @@ export fn f() void { - var bad : bool = undefined; + var bad: bool = undefined; bad[0] = bad[0]; } export fn g() void { - var bad : bool = undefined; + var bad: bool = undefined; _ = bad[0]; } @@ -11,5 +11,7 @@ export fn g() void { // backend=stage2 // target=native // -// :3:8: error: element access of non-indexable type 'bool' -// :7:12: error: element access of non-indexable type 'bool' +// :3:8: error: type 'bool' does not support indexing +// :3:8: note: operand must be an array, slice, tuple, or vector +// :7:12: error: type 'bool' does not support indexing +// :7:12: note: operand must be an array, slice, tuple, or vector diff --git a/test/cases/compile_errors/array_access_of_type.zig b/test/cases/compile_errors/array_access_of_type.zig index d38a13b2fa..1e66ca3776 100644 --- a/test/cases/compile_errors/array_access_of_type.zig +++ b/test/cases/compile_errors/array_access_of_type.zig @@ -7,4 +7,5 @@ export fn foo() void { // backend=stage2 // target=native // -// :2:14: error: element access of non-indexable type 'type' +// :2:14: error: type 'type' does not support indexing +// :2:14: note: operand must be an array, slice, tuple, or vector diff --git a/test/cases/compile_errors/compile_time_null_ptr_cast.zig b/test/cases/compile_errors/compile_time_null_ptr_cast.zig index d3750c8654..25805e9f35 100644 --- a/test/cases/compile_errors/compile_time_null_ptr_cast.zig +++ b/test/cases/compile_errors/compile_time_null_ptr_cast.zig @@ -8,4 +8,4 @@ comptime { // backend=llvm // target=native // -// :3:32: error: null pointer casted to type *i32 +// :3:32: error: null pointer casted to type '*i32' diff --git a/test/cases/compile_errors/for.zig b/test/cases/compile_errors/for.zig index 5bd3aa0c64..435bb68607 100644 --- a/test/cases/compile_errors/for.zig +++ b/test/cases/compile_errors/for.zig @@ -31,8 +31,8 @@ export fn d() void { // :2:5: error: non-matching for loop lengths // :2:11: note: length 10 here // :2:19: note: length 11 here -// :9:14: error: type 'bool' does not support indexing -// :9:14: note: for loop operand must be an array, slice, tuple, or vector +// :9:14: error: type 'bool' is not indexable and not a range +// :9:14: note: for loop operand must be a range, array, slice, tuple, or vector // :15:16: error: pointer capture of non pointer type '[10]u8' // :15:10: note: consider using '&' here // :22:5: error: unbounded for loop diff --git a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig index 910ec807a8..2a4990b1ec 100644 --- a/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig +++ b/test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig @@ -25,12 +25,12 @@ pub export fn non_matching_lengths() void { // target=native // // :5:5: error: unknown @memcpy length -// :5:18: note: destination type [*]u8 provides no length -// :5:24: note: source type [*]align(4) const u8 provides no length +// :5:18: note: destination type '[*]u8' provides no length +// :5:24: note: source type '[*]align(4) const u8' provides no length // :10:13: error: type 'u8' does not support indexing -// :10:13: note: for loop operand must be an array, slice, tuple, or vector +// :10:13: note: operand must be an array, slice, tuple, or vector // :15:13: error: type '*u8' does not support indexing -// :15:13: note: for loop operand must be an array, slice, tuple, or vector +// :15:13: note: operand must be an array, slice, tuple, or vector // :20:5: error: non-matching @memcpy lengths // :20:13: note: length 6 here // :20:20: note: length 5 here diff --git a/test/cases/compile_errors/indexing_non-tuple_struct.zig b/test/cases/compile_errors/indexing_non-tuple_struct.zig index 912ee689d1..1691faaae0 100644 --- a/test/cases/compile_errors/indexing_non-tuple_struct.zig +++ b/test/cases/compile_errors/indexing_non-tuple_struct.zig @@ -8,4 +8,5 @@ export fn a() void { // backend=stage2 // target=native // -// :4:6: error: element access of non-indexable type 'tmp.a.S' +// :4:6: error: type 'tmp.a.S' does not support indexing +// :4:6: note: operand must be an array, slice, tuple, or vector diff --git a/test/cases/compile_errors/indexing_single-item_pointer.zig b/test/cases/compile_errors/indexing_single-item_pointer.zig index efd063817c..41b94c8f26 100644 --- a/test/cases/compile_errors/indexing_single-item_pointer.zig +++ b/test/cases/compile_errors/indexing_single-item_pointer.zig @@ -6,4 +6,5 @@ export fn entry(ptr: *i32) i32 { // backend=stage2 // target=native // -// :2:15: error: element access of non-indexable type '*i32' +// :2:15: error: type '*i32' does not support indexing +// :2:15: note: operand must be an array, slice, tuple, or vector diff --git a/test/cases/compile_errors/issue_15572_break_on_inline_while.zig b/test/cases/compile_errors/issue_15572_break_on_inline_while.zig new file mode 100644 index 0000000000..a09a72fde5 --- /dev/null +++ b/test/cases/compile_errors/issue_15572_break_on_inline_while.zig @@ -0,0 +1,20 @@ +const std = @import("std"); + +pub const DwarfSection = enum { + eh_frame, + eh_frame_hdr, +}; + +pub fn main() void { + const section = inline for (@typeInfo(DwarfSection).Enum.fields) |section| { + if (std.mem.eql(u8, section.name, "eh_frame")) break section; + }; + + _ = section; +} + +// error +// backend=stage2 +// target=native +// +// :9:28: error: incompatible types: 'builtin.Type.EnumField' and 'void' diff --git a/test/standalone.zig b/test/standalone.zig index 1f4d7cfded..a055da9761 100644 --- a/test/standalone.zig +++ b/test/standalone.zig @@ -55,6 +55,10 @@ pub const simple_cases = [_]SimpleCase{ .os_filter = .windows, .link_libc = true, }, + .{ + .src_path = "test/standalone/http.zig", + .all_modes = true, + }, // Ensure the development tools are buildable. Alphabetically sorted. // No need to build `tools/spirv/grammar.zig`. diff --git a/test/standalone/http.zig b/test/standalone/http.zig new file mode 100644 index 0000000000..13dc278b6d --- /dev/null +++ b/test/standalone/http.zig @@ -0,0 +1,541 @@ +const std = @import("std"); + +const http = std.http; +const Server = http.Server; +const Client = http.Client; + +const mem = std.mem; +const testing = std.testing; + +const max_header_size = 8192; + +var gpa_server = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = 12 }){}; +var gpa_client = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = 12 }){}; + +const salloc = gpa_server.allocator(); +const calloc = gpa_client.allocator(); + +var server: Server = undefined; + +fn handleRequest(res: *Server.Response) !void { + const log = std.log.scoped(.server); + + log.info("{s} {s} {s}", .{ @tagName(res.request.method), @tagName(res.request.version), res.request.target }); + + const body = try res.reader().readAllAlloc(salloc, 8192); + defer salloc.free(body); + + if (res.request.headers.contains("connection")) { + try res.headers.append("connection", "keep-alive"); + } + + if (mem.startsWith(u8, res.request.target, "/get")) { + if (std.mem.indexOf(u8, res.request.target, "?chunked") != null) { + res.transfer_encoding = .chunked; + } else { + res.transfer_encoding = .{ .content_length = 14 }; + } + + try res.headers.append("content-type", "text/plain"); + + try res.do(); + if (res.request.method != .HEAD) { + try res.writeAll("Hello, "); + try res.writeAll("World!\n"); + try res.finish(); + } + } else if (mem.startsWith(u8, res.request.target, "/large")) { + res.transfer_encoding = .{ .content_length = 14 * 1024 + 14 * 10 }; + + try res.do(); + + var i: u32 = 0; + while (i < 5) : (i += 1) { + try res.writeAll("Hello, World!\n"); + } + + try res.writeAll("Hello, World!\n" ** 1024); + + i = 0; + while (i < 5) : (i += 1) { + try res.writeAll("Hello, World!\n"); + } + + try res.finish(); + } else if (mem.eql(u8, res.request.target, "/echo-content")) { + try testing.expectEqualStrings("Hello, World!\n", body); + try testing.expectEqualStrings("text/plain", res.request.headers.getFirstValue("content-type").?); + + if (res.request.headers.contains("transfer-encoding")) { + try testing.expectEqualStrings("chunked", res.request.headers.getFirstValue("transfer-encoding").?); + res.transfer_encoding = .chunked; + } else { + res.transfer_encoding = .{ .content_length = 14 }; + try testing.expectEqualStrings("14", res.request.headers.getFirstValue("content-length").?); + } + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("World!\n"); + try res.finish(); + } else if (mem.eql(u8, res.request.target, "/trailer")) { + res.transfer_encoding = .chunked; + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("World!\n"); + // try res.finish(); + try res.connection.writeAll("0\r\nX-Checksum: aaaa\r\n\r\n"); + try res.connection.flush(); + } else if (mem.eql(u8, res.request.target, "/redirect/1")) { + res.transfer_encoding = .chunked; + + res.status = .found; + try res.headers.append("location", "../../get"); + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("Redirected!\n"); + try res.finish(); + } else if (mem.eql(u8, res.request.target, "/redirect/2")) { + res.transfer_encoding = .chunked; + + res.status = .found; + try res.headers.append("location", "/redirect/1"); + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("Redirected!\n"); + try res.finish(); + } else if (mem.eql(u8, res.request.target, "/redirect/3")) { + res.transfer_encoding = .chunked; + + const location = try std.fmt.allocPrint(salloc, "http://127.0.0.1:{d}/redirect/2", .{server.socket.listen_address.getPort()}); + defer salloc.free(location); + + res.status = .found; + try res.headers.append("location", location); + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("Redirected!\n"); + try res.finish(); + } else if (mem.eql(u8, res.request.target, "/redirect/4")) { + res.transfer_encoding = .chunked; + + res.status = .found; + try res.headers.append("location", "/redirect/3"); + + try res.do(); + try res.writeAll("Hello, "); + try res.writeAll("Redirected!\n"); + try res.finish(); + } else { + res.status = .not_found; + try res.do(); + } +} + +var handle_new_requests = true; + +fn runServer(srv: *Server) !void { + outer: while (handle_new_requests) { + var res = try srv.accept(.{ + .allocator = salloc, + .header_strategy = .{ .dynamic = max_header_size }, + }); + defer res.deinit(); + + while (res.reset() != .closing) { + res.wait() catch |err| switch (err) { + error.HttpHeadersInvalid => continue :outer, + error.EndOfStream => continue, + else => return err, + }; + + try handleRequest(&res); + } + } +} + +fn serverThread(srv: *Server) void { + defer srv.deinit(); + defer _ = gpa_server.deinit(); + + runServer(srv) catch |err| { + std.debug.print("server error: {}\n", .{err}); + + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } + + _ = gpa_server.deinit(); + std.os.exit(1); + }; +} + +fn killServer(addr: std.net.Address) void { + handle_new_requests = false; + + const conn = std.net.tcpConnectToAddress(addr) catch return; + conn.close(); +} + +pub fn main() !void { + const log = std.log.scoped(.client); + + defer _ = gpa_client.deinit(); + + server = Server.init(salloc, .{ .reuse_address = true }); + + const addr = std.net.Address.parseIp("127.0.0.1", 0) catch unreachable; + try server.listen(addr); + + const port = server.socket.listen_address.getPort(); + + const server_thread = try std.Thread.spawn(.{}, serverThread, .{&server}); + + var client = Client{ .allocator = calloc }; + // defer client.deinit(); handled below + + { // read content-length response + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // read large content-length response + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/large", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192 * 1024); + defer calloc.free(body); + + try testing.expectEqual(@as(usize, 14 * 1024 + 14 * 10), body.len); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // send head request and not read chunked + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.HEAD, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("", body); + try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?); + try testing.expectEqualStrings("14", req.response.headers.getFirstValue("content-length").?); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // read chunked response + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // send head request and not read chunked + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.HEAD, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("", body); + try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?); + try testing.expectEqualStrings("chunked", req.response.headers.getFirstValue("transfer-encoding").?); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // check trailing headers + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/trailer", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + try testing.expectEqualStrings("aaaa", req.response.headers.getFirstValue("x-checksum").?); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // send content-length request + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + try h.append("content-type", "text/plain"); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.POST, uri, h, .{}); + defer req.deinit(); + + req.transfer_encoding = .{ .content_length = 14 }; + + try req.start(); + try req.writeAll("Hello, "); + try req.writeAll("World!\n"); + try req.finish(); + + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // read content-length response with connection close + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + try h.append("connection", "close"); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?); + } + + // connection has been closed + try testing.expect(client.connection_pool.free_len == 0); + + { // send chunked request + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + try h.append("content-type", "text/plain"); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.POST, uri, h, .{}); + defer req.deinit(); + + req.transfer_encoding = .chunked; + + try req.start(); + try req.writeAll("Hello, "); + try req.writeAll("World!\n"); + try req.finish(); + + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // relative redirect + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/1", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // redirect from root + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/2", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // absolute redirect + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/3", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + try req.wait(); + + const body = try req.reader().readAllAlloc(calloc, 8192); + defer calloc.free(body); + + try testing.expectEqualStrings("Hello, World!\n", body); + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + { // too many redirects + var h = http.Headers{ .allocator = calloc }; + defer h.deinit(); + + const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/4", .{port}); + defer calloc.free(location); + const uri = try std.Uri.parse(location); + + log.info("{s}", .{location}); + var req = try client.request(.GET, uri, h, .{}); + defer req.deinit(); + + try req.start(); + req.wait() catch |err| switch (err) { + error.TooManyHttpRedirects => {}, + else => return err, + }; + } + + // connection has been kept alive + try testing.expect(client.connection_pool.free_len == 1); + + client.deinit(); + + killServer(server.socket.listen_address); + server_thread.join(); +} |
